metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jfuruness/lib_roa_collector",
"score": 3
}
|
#### File: lib_roa_collector/lib_roa_collector/roa_collector.py
```python
import csv
from datetime import datetime, timedelta
from multiprocessing import cpu_count
import os
import re
from lib_utils.base_classes import Base
from lib_utils.file_funcs import download_file
from lib_utils.helper_funcs import get_hrefs, run_cmds
class ROACollector(Base):
"""Downloads ROAs from ripe"""
url = "https://ftp.ripe.net/rpki/"
def run(self):
"""Gets URLs, downloads files, and concatenates them"""
# Get URLs and paths
urls = self._get_urls()
paths = self._get_paths(urls)
# Download all files with multiprocessing
self.download_mp(download_file, [urls, paths])
# Extract data to the tsv_path
self._extract_data()
def _get_urls(self):
"""Gets URLs to all the ROA CSVs"""
# Get URLs of Tals
urls = [self.url + x for x in get_hrefs(self.url) if ".tal" in x]
# https://ftp.ripe.net/rpki/afrinic.tal/2021/08/16/roas.csv
return [self.dl_time.strftime(f"{x}/%Y/%m/%d/roas.csv") for x in urls]
def _get_paths(self, urls):
"""Gets all paths from the URLs for downloading"""
paths = []
for url in urls:
# Get name of tal
tal = re.findall("rpki/(.+).tal/", url)[0]
# Get download path
paths.append(self.dir_ / f"{tal}.csv")
return paths
def _extract_data(self):
"""Extracts data and adds it to the TSV path"""
rows = []
# Done this way because many links are broken, so paths are empty
for fname in os.listdir(self.dir_):
if ".csv" not in fname:
continue
tal = fname.replace(".csv", "")
path = self.dir_ / fname
with path.open(mode="r") as f:
for row in csv.DictReader(f):
# Don't start and end 1 early for URI - this cuts off data
new_row = {"uri": row["URI"],
# Get rid of AS in front of ASN
"asn": row["ASN"][2:],
# Replaace bad key names
"prefix": row["IP Prefix"],
"max_length": row["Max Length"],
"not_before": row["Not Before"],
"not_after": row["Not After"],
"tal": tal}
rows.append(new_row)
# Write to file
with self.tsv_path.open(mode="w") as f:
writer = csv.DictWriter(f, new_row.keys(), delimiter="\t")
writer.writeheader()
writer.writerows(rows)
```
|
{
"source": "jfuruness/lib_rovpp",
"score": 2
}
|
#### File: v2/v2_aggressive/rovpp_v2a_lite_simple_as.py
```python
from ..v2_base import ROVPPV2LiteSimpleAS
class ROVPPV2aLiteSimpleAS(ROVPPV2LiteSimpleAS):
__slots__ = []
name = "ROV++V2a Lite Simple"
def _policy_propagate(*args, **kwargs):
"""Do nothing. Send blackholes according to export policy"""
return False
```
#### File: v2/v2_base_shorten/rovpp_v2_shorten_lite_simple_as.py
```python
from lib_bgp_simulator import Relationships
from ..v2_base import ROVPPV2LiteSimpleAS
class ROVPPV2ShortenLiteSimpleAS(ROVPPV2LiteSimpleAS):
__slots__ = tuple()
name = "ROV++V2 Shorten Lite Simple"
def _copy_and_process(self,
ann,
recv_relationship,
holes=None,
**extra_kwargs):
"""Deep copies ann and modifies attrs"""
# if ann.invalid_by_roa and not ann.preventive:
# extra_kwargs["blackhole"] = True
# extra_kwargs["traceback_end"] = True
# These anns will be blackholes
if ann.invalid_by_roa and not ann.preventive:
extra_kwargs["as_path"] = tuple([ann.as_path[-1]])
return super(ROVPPV2ShortenLiteSimpleAS, self)._copy_and_process(
ann, recv_relationship, holes=holes, **extra_kwargs)
```
#### File: old/as_classes/non_lite.py
```python
class NonLite:
def _new_ann_better(self,
current_ann,
current_processed,
default_current_recv_rel,
new_ann,
new_processed,
default_new_recv_rel):
"""Assigns the priority to an announcement according to <NAME>ford
NOTE: processed is processed for second ann"""
new_rel_better = self._new_rel_better(current_ann,
current_processed,
default_current_recv_rel,
new_ann,
new_processed,
default_new_recv_rel)
if new_rel_better is not None:
return new_rel_better
else:
new_holes_smaller = self._new_holes_smaller(current_ann, new_ann)
if new_holes_smaller is not None:
return new_holes_smaller
else:
return self._new_as_path_ties_better(current_ann,
current_processed,
new_ann,
new_processed)
def _new_holes_smaller(self, current_ann, new_ann):
"""Best by hole size"""
# Holes aren't counted for this prefix
if current_ann.temp_holes is None:
return None
if len(current_ann.temp_holes) > len(new_ann.temp_holes):
return True
elif len(current_ann.temp_holes) < len(new_ann.temp_holes):
return False
else:
return None
```
#### File: as_classes/v1/rovpp_v1_simple_as.py
```python
from copy import deepcopy
from collections import defaultdict
from typing import Dict, Optional
from ipaddress import ip_network
from lib_bgp_simulator import BGPSimpleAS, Relationships
from .rovpp_ann import ROVPPAnn
class ROVPPV1LiteSimpleAS(BGPSimpleAS):
name = "ROV++V1 Simple"
__slots__ = tuple()
def _policy_propagate(self, _, ann, *args):
"""Only propagate announcements that aren't blackholes"""
# Policy handled this ann for propagation (and did nothing)
return ann.blackhole
def _new_ann_better(self,
current_ann: Optional[Ann],
current_processed: bool,
default_current_recv_rel: Relationships,
new_ann: Ann,
new_processed: Relationships,
default_new_recv_rel: Relationships
# NOTE: this is set to holes dict
holes=None) -> Optional[bool]:
"""Must include this here since we blackhole prefixes now
This does the same thing as the original func
except it deprefers blackholes and invalid anns
"""
# Valid > invalid
new_validity_better = self._new_validity_better(current_ann,
new_ann)
if new_validity_better is not None:
return new_validity_better
else:
# Not blackhole > blackhole
new_blackhole_state_better = self._new_blackhole_state_better(
current_ann, new_ann)
if new_blackhole_state_better is not None:
return new_blackhole_state_better:
else:
new_holes_better = self._new_holes_better(
current_ann,
current_ann_processed,
new_ann,
new_ann_processed,
extra_new_ann_kwargs["holes"])
if new_holes_better is not None:
return new_holes_better
else:
return super(ROVPPV1LiteSimpleAS, self)._new_ann_better(
current_ann,
current_processed,
default_current_recv_rel,
new_ann,
new_processed,
default_new_recv_rel)
def _new_validity_better(self, current_ann, new_ann):
"""Returns True if new better, False if old better, None if eq"""
if not new_ann.invalid_by_roa and current_ann.invalid_by_roa:
return True
elif new_ann.invalid_by_roa and not current_ann.invalid_by_roa:
return False
else:
return None
def _new_blackhole_state_better(self, current_ann, new_ann):
"""Returns True if new better, False if old better, None if eq"""
if not new_ann.blackhole and current_ann.blackhole:
return True
elif new_ann.blackhole and not current_ann.blackhole:
return False
else:
return None
def _new_holes_better(self,
current_ann,
current_ann_processed,
new_ann,
new_ann_processed,
holes):
"""Returns new ann has less holes, or None if =="""
# Could do this using int(processed) but so unreadable
# Holes for new announcement
if new_ann_processed:
new_holes = len(new_ann.holes)
else:
new_holes = len(holes[new_ann])
# Holes for current announcement
if current_ann_processed:
current_holes = len(current_ann.holes)
else:
current_holes = len(holes[current_ann])
if new_ann_holes < current_ann_holes:
return True
elif new_ann_holes > current_ann_holes:
return False
# B explicit for future devs
# One is not better than the other, return None
else:
return None
def receive_ann(self, ann: Ann, *args, **kwargs):
"""Ensures that announcments are ROV++ and valid"""
if not hasattr(ann, "blackholes"):
raise NotImplementedError(
"Policy can't handle announcements without blackhole attrs")
return super(ROVPPV1LiteSimpleAS, self).receive_ann(
ann, *args, **kwargs)
def processing_incoming_ann(self,
from_rel: Relationships,
*args,
propagation_round: Optional[int] = None,
engine_input: Optional[EngineInput] = None,
reset_q: bool = True,
**kwargs):
"""Processes all incoming announcements"""
holes: Dict[ROVPPAnn, Tuple[ROVPPAnn]] = self._get_ann_to_holes_dict(
engine_input)
# With this solution, by changing the Gao Rexford, I can actually just
# process __normally__ and drop nothing, since all invalids are
# replaced with blackholes in copy and process
# Must rewrite this to include holes in many different places
self.process_incoming_anns(self,
recv_relationship,
*args,
propagation_round=propagation_round,
engine_input=engine_input,
reset_q=False,
holes=holes
**kwargs)
self._add_blackholes(from_rel, holes)
# It's possible that we had a previously valid prefix
# Then later recieved a subprefix that was invalid
# So we must recount the holes of each ann in local RIB
self._recount_holes(propagation_round)
self.reset_q(reset_q)
def _recount_holes(self, propagation_round):
# It's possible that we had a previously valid prefix
# Then later recieved a subprefix that was invalid
# Or there was previously an invalid subprefix
# But later that invalid subprefix was removed
# So we must recount the holes of each ann in local RIB
assert propagation_round == 0, "Must recount holes if you plan on this"
def _get_ann_to_holes_dict(self, engine_input):
"""Gets announcements to a typle of Ann holes
Holes are subprefix hijacks
"""
holes = dict()
for _, ann in self._recv_q.prefix_anns():
ann_holes = []
for subprefix in engine_input.prefix_subprefix_dict[ann.prefix]:
for sub_ann in self._recv_q.get_ann_list(subprefix):
if sub_ann.invalid_by_roa:
ann_holes.append(sub_ann)
holes[ann] = tuple(ann_holes)
return holes
def _add_blackholes(self, from_rel, holes_dict):
"""Manipulates local RIB by adding blackholes and dropping invalid"""
# For each ann in local RIB:
for _, ann in self._local_rib.prefix_anns():
# For each hole in ann: (holes are invalid subprefixes)
for unprocessed_hole_ann, rel in holes_dict[ann]:
# If there is not an existing valid ann for that hole subprefix
existing_local_rib_subprefix_ann = self._local_rib.get_ann(
unprocessed_hole_ann.prefix)
if (existing_local_rib_subprefix_ann is None
or existing_local_rib_subprefix_ann.invalid_by_roa):
# Remove current ann and replace with blackhole
self._local_rib.remove_ann(unprocessed_hole_ann.prefix)
# Create the blackhole
blackhole = self._copy_and_process(unprocessed_hole_ann,
from_rel)
# Add the blackhole
self._local_rib.add_ann(blackhole)
# Do nothing - ann should already be a blackhole
assert ((ann.blackhole and ann.invalid_by_roa)
or not ann.invalid_by_roa):
def _copy_and_process(self, ann, recv_relationship, holes=None, **extra_kwargs):
"""Deep copies ann and modifies attrs"""
if ann.invalid_by_roa and not ann.preventive:
extra_kwargs["blackhole"] = True
extra_kwargs["holes"] = holes[ann]
return super(ROVPPV1LiteSimpleAS, self)._copy_and_process(
ann, recv_relationship, **extra_kwargs)
```
|
{
"source": "jfuruness/lib_secure_monitoring_service",
"score": 2
}
|
#### File: lib_secure_monitoring_service/lib_secure_monitoring_service/__main__.py
```python
from lib_bgp_simulator import Simulator, BGPAS, Graph
from lib_rovpp import ROVPPV1SimpleAS
from lib_secure_monitoring_service.engine_inputs import V4SubprefixHijack
from lib_secure_monitoring_service.rov_sms import ROVSMS, ROVSMSK1, ROVSMSK2, ROVSMSK3
from lib_secure_monitoring_service.v4_graph import V4Graph
def main():
Simulator().run(graphs=[V4Graph(percent_adoptions=[0,5,10,20,40,60,80,100],
adopt_as_classes=[ROVPPV1SimpleAS, ROVSMS, ROVSMSK1, ROVSMSK2, ROVSMSK3],
EngineInputCls=V4SubprefixHijack,
num_trials=20,
BaseASCls=BGPAS)]
)
if __name__ == "__main__":
main()
```
#### File: lib_secure_monitoring_service/lib_secure_monitoring_service/v4_scenario.py
```python
from lib_bgp_simulator.simulator.scenario import Scenario
from lib_bgp_simulator.enums import Relationships
from lib_secure_monitoring_service.rov_sms import ROVSMS
from lib_secure_monitoring_service.sim_logger import sim_logger as logger
class V4Scenario(Scenario):
def apply_blackholes_from_avoid_list(self, subgraphs):
logger.debug(f"Inside apply_blackholes_from_avoid_list")
# Create a flag to check if avoid_list has been created
avoid_list_created_flag = False
for subg_name, subgraph_asns in subgraphs.items():
for asn in subgraph_asns:
as_obj = self.engine.as_dict[asn]
# TODO: try isInstance
if hasattr(as_obj, "trusted_server"):
# Create the avoid list if it hasn't been
# created yet
if not avoid_list_created_flag:
as_obj.trusted_server.create_recs()
as_obj._force_add_blackholes_from_avoid_list(self.engine_input)
def run(self, subgraphs, propagation_round: int):
# Run engine
self.engine.run(propagation_round=propagation_round,
engine_input=self.engine_input)
# Add blackholes from avoid list
self.apply_blackholes_from_avoid_list(subgraphs)
# Calculate outcomes
traceback_outcomes = self._collect_data(subgraphs)
# Don't count these for diagrams and such
for uncountable_asn in self.engine_input.uncountable_asns:
traceback_outcomes.pop(uncountable_asn, None)
# delete engine from attrs so that garbage collector can come
# NOTE that if there are multiple propagation rounds, the engine
# Will still be there
del self.engine
# Delete the adopting_asns for the same reason.
# This does not cause problems for multiple rounds because the AS
# classes are already set.
if hasattr(self.engine_input, 'adopting_asns'):
del self.engine_input.adopting_asns
return traceback_outcomes
```
|
{
"source": "jfuruness/lib_utils",
"score": 3
}
|
#### File: lib_utils/lib_utils/base_classes.py
```python
from datetime import datetime, timedelta
from multiprocessing import cpu_count
import os
from pathlib import Path
from .helper_funcs import mp_call
class Base:
"""Base collector for classes that perform ETL"""
def __init__(self, **kwargs):
# Save kwargs in case you need to initialize other Base Classes
self.kwargs = kwargs
# Gets default download time if not otherwise set
self.dl_time = kwargs.get("dl_time", self._default_dl_time())
# Sets directory to download files to and write parsed files
self.base_dir = Path(kwargs.get("_dir", "/tmp/")) / datetime.now().strftime("%Y.%m.%d.%H.%M.%S.%f")
_dir = self.base_dir / self.__class__.__name__
self._dir = Path(_dir)
self._dir.mkdir(parents=True,
exist_ok=kwargs.get("_dir_exist_ok", False))
# Path to output file
self.tsv_path = self._dir / f"{self.__class__.__name__}.tsv"
# CPUs for downloading files (I/O bound)
self.dl_cpus = kwargs.get("dl_cpus", cpu_count() * 4)
# CPUs for processing.
# Some funcs go haywire if you use every core. cores-1 seems fine
self.parse_cpus = kwargs.get("parse_cpus", cpu_count())
# Store in the db or not
self.db = kwargs.get("db", False)
# Debug info
self.debug = kwargs.get("debug", False)
assert hasattr(self, "run"), "Needs a run function"
def parse_mp(self, func, args: list, desc=None, parse_cpus=None):
"""Calls a parsing function using multiprocessing
parse cpus is by default equal to cpu_count() - 1
"""
self._mp(func, args, desc, "parsing", parse_cpus, self.parse_cpus)
def download_mp(self, func, args: list, desc=None, dl_cpus=None):
"""Calls a download function using multiprocessing
Download cpus is by default equal to cpu_count() * 4
This should be an I/O bound process
"""
self._mp(func, args, desc, "downloading", dl_cpus, self.dl_cpus)
def _mp(self, func, args: list, desc, default_action, cpus, default_cpus):
"""Call a function with multiprocessing"""
if cpus is None:
cpus = default_cpus
if desc is None:
desc = f"{self.__class__.__name__} is {default_action}"
mp_call(func, args, desc, cpus=cpus)
def _default_dl_time(self):
"""Returns default DL time.
For most things, we download from 4 days ago
And for collectors, time must be divisible by 4/8
"""
# 7 days because sometimes caida takes a while to upload
dl_time = datetime.utcnow() - timedelta(days=7)
return dl_time.replace(hour=0, minute=0, second=0, microsecond=0)
```
#### File: lib_utils/lib_utils/print_funcs.py
```python
from datetime import datetime
import functools
import logging
from pathlib import Path
import multiprocessing_logging
logging_set = False
def config_logging(level=logging.INFO, section="main", mp=False) -> Path:
"""Configures logging to log to a file and screen
mp stands for multiprocessing, didn't want to override that package
"""
# NOTE: it turns out that subprocess calls, pytest, etc
# Seems to automatically add handlers, even if they are not set
# The only real way to check if we have handlers set
# Is to check if we have specific handlers that are set, or a global
global logging_set
if not logging_set:
path = _get_log_path(section)
# without this it doesn't work
logging.root.handlers = []
logging.basicConfig(level=level,
format='%(asctime)s-%(levelname)s: %(message)s',
handlers=[logging.StreamHandler(),
logging.FileHandler(path)])
logging.captureWarnings(True)
# If you need multiprocessing install this
# Otherwise it slows it down, and additionally doesn't flush
# after every call, which ruins logging unit tests
# . See: https://github.com/jruere/
# multiprocessing-logging/issues/51#issue-925800880
if mp:
multiprocessing_logging.install_mp_handler()
logging.debug("initialized logger")
logging_set = True
return path
def _get_log_path(section: str) -> Path:
"""Returns path to log file"""
fname = f"{section}_{datetime.now().strftime('%Y_%m_%d_%M_%S.%f')}.log"
log_dir = f"/var/log/{section}/"
path = Path(log_dir)
path.mkdir(parents=True, exist_ok=True)
return path / fname
# Used in lib_browser
# This decorator prints exception upon err
def print_err(err, msg="{}"):
"""This decorator deletes files before and after a function.
This is very useful for installation procedures.
"""
def my_decorator(func):
@functools.wraps(func)
def function_that_runs_func(*args, **kwargs):
try:
# Run the function
return func(*args, **kwargs)
except err as e:
logging.error(msg.format(e))
return function_that_runs_func
return my_decorator
```
|
{
"source": "jfuruness/lib_youtube_cd_burner",
"score": 3
}
|
#### File: lib_youtube_cd_burner/lib_youtube_cd_burner/__main__.py
```python
from argparse import ArgumentParser
import logging
from lib_utils.print_funcs import config_logging
from .app import App
from .youtube_playlist import YoutubePlaylist
def main():
parser = ArgumentParser(description='CD Burner')
url = "https://www.youtube.com/watch?v=jLtbFWJm9_M"
parser.add_argument('--run', action="store", default=False)
parser.add_argument('--urls', action="store", default=url)
parser.add_argument('--dl_path', action="store", default="/tmp/yt")
parser.add_argument('--save_path', action="store", default="/tmp/yt2")
parser.add_argument('--song_format', action="store", default="mp3")
args = parser.parse_args()
config_logging(logging.DEBUG)
if args.run:
YoutubePlaylist(args.urls.split(","),
args.dl_path).generate_audio_medium(save_path=args.save_path,
song_format=args.song_format)
else:
App().create_app()
```
#### File: lib_youtube_cd_burner/lib_youtube_cd_burner/old_app.py
```python
from enum import Enum
from tkinter import *
from tkinter import filedialog
pad = {"padx": 5, "pady": 5}
class Action(Enum):
BURN = 1
SAVE = 2
class Language(Enum):
ENGLISH = 1
RUSSIAN = 2
class App:
def __init__(self):
self.texts = []
self.root = Tk()
self.root_text = StringVar()
self.root.title(self.root_text.get())
self.frame = LabelFrame(self.root, text=self.root_text.get(), **pad)
self.frame.pack(**pad)
def create_app(self):
self.burn_cds_text = StringVar()
self.save_files_text = StringVar()
self._add_languages()
self._add_urls()
self._add_actions()
self._add_save_dir()
self._add_submit()
self._change_language()
self.root.mainloop()
###############
### Actions ###
###############
def _take_action(self):
if self.action_var.get() == Action.SAVE.value:
song_format = "mp3"
else:
song_format = "wav"
pl = YoutubePlaylist(self.urls_entry.get().strip(), "/tmp/yt")
pl.generate_audio_medium(save_path=self.frame.filename,
song_format=song_format)
def _browse_files(self):
kwargs = {"initialdir": "/tmp/yt2",
"title": self.select_folder.text.get()}
filename = filedialog.askdirectory(**kwargs)
self.file_label_entry.delete(0, END)
self.file_label_entry.insert(0, filename)
def _remove_browse_files(self):
self.file_label_entry.grid_forget()
self.file_button.grid_forget()
self.submit_text.set("Burn CD(s)")
def _add_browse_files(self, submit_text_update=True):
self.file_label_entry.grid(row=2, column=1)
self.file_button.grid(row=2, column=0)
if submit_text_update:
self.submit_text.set("Save File(s)")
def _change_language(self):
if self.language_var == Language.ENGLISH:
self._add_english_text()
elif self.language_var == Language.RUSSIAN:
self._add_russian_text()
def _add_english_text(self):
self.root.text.set("Youtube Music")
self.select_folder_text.set("Select save folder")
self.burn_cds_text.set("Burn CD(s)")
self.save_files_text.set("Save File(s)")
self.enter_url_text.set("Enter (playlist) URL:")
self.root.title(self.root_text.get())
self.frame["text"] = self.root_text.get()
def _add_russian_text(self):
pass
##############
### Format ###
##############
def _add_languages(self):
self.language_var = IntVar()
self.language_var.set(Language.ENGLISH.value)
Radiobutton(self.frame,
text="English",
variable=self.language_var,
value=Language.ENGLISH.value,
command=self._change_language).grid(row=0, column=0)
Radiobutton(self.frame,
text="русский",
variable=self.language_var,
value=Language.RUSSIAN.value,
command=self._change_language).grid(row=0, column=1)
def _add_actions(self):
self.action_var = IntVar()
self.action_var.set(Action.SAVE.value)
Radiobutton(self.frame,
textvariable=self.save_files_text,
variable=self.action_var,
value=Action.SAVE.value,
command=self._add_browse_files).grid(row=1, column=0)
Radiobutton(self.frame,
textvariable=self.burn_cds_text,
variable=self.action_var,
value=Action.BURN.value,
command=self._remove_browse_files).grid(row=1, column=1)
def _add_urls(self):
self.enter_url_text = StringVar()
# URLs
self.urls_label = Label(self.frame,
textvariable=self.enter_url_text,
**pad)
self.urls_entry = Entry(self.frame, width=50)
#self.urls_entry.insert(0, ("ex: https://www.youtube.com/"
# "watch?v=jLtbFWJm9_M"))
self.urls_label.grid(row=1, column=0)
self.urls_entry.grid(row=1, column=1)
def _add_save_dir(self):
self.select_folder_text = StringVar()
self.file_label_entry = Entry(self.frame, width=50)
self.file_button = Button(self.frame,
textvariable=self.select_folder_text,
bd=1,
command=self._browse_files)
self._add_browse_files(submit_text_update=False)
def _add_submit(self):
if self.action_var.get() == Action.SAVE.value:
text = self.save_files_text.get()
else:
text = self.burn_cds_text.get()
# https://stackoverflow.com/a/1918054/8903959
self.submit_text = StringVar()
self.submit_text.set(text)
footer = Button(self.frame,
textvariable=self.submit_text,
bd=1,
#relief=SUNKEN,
#anchor=E,
command=self._take_action)
footer.grid(row=4, column=0, columnspan=3, sticky=W+E)
"""
button = Button(root, text="click me", padx=50, pady=50, command=myClick, fg="blue", bg="red")
button.pack()
root.mainloop()
# state disabled
# grid forget
"""
```
#### File: lib_youtube_cd_burner/lib_youtube_cd_burner/playlist.py
```python
import logging
import os
from lib_utils.file_funcs import makedirs
from random import shuffle
from shutil import move
from .cd import CD, CDFullError
from .song_holder import SongHolder
class Playlist(SongHolder):
"""Playlist class that allows for download and manipulation of songs
The playlist class can download songs, generate songs from downloads,
format songs, generate cds, burn cds, and self destruct to remove files.
"""
def __init__(self, urls: list, dl_path: str):
"""initializes playlist and directories"""
self.dl_path = dl_path
self.urls = urls
super(Playlist, self).__init__()
def generate_audio_medium(self,
cd_capacity=60*79+59,
remove_silence=False,
randomize=False,
save_path=None, # If path is none CD gets burned
song_format="wav",
normalize_audio=True,
add_silence=True):
"""Takes a playlist and generates cds from it.
remove_silence removes silence at the end of songs. randomize is
that the songs are downloaded in order as they are in the
playlist. cd_capacity is the amount of seconds that can go on a cd.
"""
self.download_songs()
# Removes silence, randomizes, changes song format
self.format_1(remove_silence, randomize, song_format)
if save_path is None:
self.save_as_cds(cd_capacity, normalize_audio, add_silence)
else:
self.save_as_files(normalize_audio, add_silence, save_path)
self.clean_up()
def save_as_cds(self, cd_capacity, normalize_audio, add_silence):
"""Burns all cds, if normalize_audio, the cds are normalized volume"""
self.generate_cds(cd_capacity)
self.format_2(normalize_audio, add_silence)
for cd in self.cds:
cd.burn()
def generate_cds(self, cd_capacity):
"""Generate CDs"""
# If a song is too long for cd it's moved to the next CD
for song in self.songs:
try:
self.cds[-1].add_track(song)
except (CDFullError, IndexError):
# If the cd is full or there are no cds, then create one
self.cds.append(CD(cd_capacity))
def save_as_files(self, normalize_audio, add_silence, save_path):
"""Saves audio as files"""
self.format_2(normalize_audio, add_silence)
makedirs(save_path)
logging.info("moving songs now")
for song in self.songs:
song.add_metadata(save_path)
dest = os.path.join(save_path,
f"{song.name.strip()}.{song.extension}")
move(song.path, dest)
def clean_up(self):
"""Deletes paths"""
for song in self.songs:
del song
self.songs = []
```
#### File: lib_youtube_cd_burner/lib_youtube_cd_burner/youtube_playlist.py
```python
import logging
import math
import os
from lib_utils.file_funcs import delete_paths
from .playlist import Playlist
from .song import Song
from .youtube_dl_fix import Youtube_dl_fix
class MyLogger(object):
"""Logger class use by youtube_dl"""
def debug(self, msg):
print(msg)
def warning(self, msg):
print(msg)
def error(self, msg):
print(msg)
class YoutubePlaylist(Playlist):
"""inits a youtube playlist instance"""
def download_songs(self):
"""Downloads songs and adds to self.songs"""
# Options for the downloader
ydl_opts = {
'ignoreerrors': True,
'age_limit': 25,
'retries': 3,
'format': 'bestaudio[asr=44100]/best',
'outtmpl': '_%(playlist_index)s- %(title)s.%(ext)s',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'wav',
'preferredquality': '192',
}],
'logger': MyLogger(),
'progress_hooks': [self.my_hook]
}
try:
og = ydl_opts['outtmpl']
for i, url in enumerate(self.urls):
zeroes = int(math.log10(len(self.urls))) + 1
url_indicator = "{:0" + str(zeroes) + "d}"
url_indicator = url_indicator.format(i)
# Download songs
ydl_opts['outtmpl'] = self.dl_path + "/" + url_indicator + og
logging.debug(f"path fmt is {ydl_opts['outtmpl']}")
with Youtube_dl_fix(ydl_opts) as ydl:
ydl.download([url])
except Exception as e:
print(f"dl songs {e}")
logging.warning(f"Video download failed. Probably webm file {e}")
def my_hook(self, d):
"""What happens when a song is downloaded"""
name = d['filename'].rsplit('.', 1)[0]
if d['status'] == 'finished':
logging.info(f'Done downloading {name}')
song = Song(d['filename'], name)
# Makes sure that the song downloaded and has volume
if os.path.exists(song.path):
if song.volume > -float('Inf'):
self.songs.append(song)
else:
logging.warning(f"{name} has no volume. Not including")
delete_paths(song.path)
# If it didn't download don't include it
else:
logging.warning(f"{name} didn't download properly")
```
|
{
"source": "jfuruness/Wasatch.PY",
"score": 3
}
|
#### File: Wasatch.PY/wasatch/Overrides.py
```python
import logging
import json
log = logging.getLogger(__name__)
##
# A mechanism to override the normal FID commands sent to a USB device
# with arbitrary binary data stored in an external JSON configuration file.
#
# @par Theory of Operation
#
# Say we want to control an experimental spectrometer, which operates
# more-or-less like our standard units, except that a couple settings
# need to be handled uniquely.
#
# An elegant solution would be to give the new device a custom PID, then
# extend a new UniqueSpectrometer from FeatureIdentificationDevice and
# simply overload the custom behavior. And probably there will be cases
# where we want to do that. Before going there, I'd really want to setup
# an ABC heirarchy over both FID and SP devices.
#
# However, that approach means that any tweaking of the custom behavior
# would have to be in Python source code. That means a new ENLIGHTEN
# installer would need to be built for each test. It also means the custom
# code would presumably appear in Wasatch.PY, an open-source project.
#
# So, another option is to simply provide an "overrides" framework which
# allows the runtime user to point to an external JSON file containing the
# custom opcodes and byte streams to be passed into the spectrometer. This
# is less elegant and more error-prone, but it provides more power and
# flexibility to the operator, all without requiring continuous minute
# changes to ENLIGHTEN or Wasatch.PY.
#
# For this particular instance, we wanted to let the user take explicit
# control of setting the integration time of a custom sensor. Two opcodes
# are being provided in the firmware to read and write arbitrary octet
# strings over I2C.
#
class Overrides(object):
def __init__(self, pathname):
self.pathname = pathname
self.description = None
self.startup = None
self.comms_init = None
self.min_delay_us = 0
self.settings = {}
if self.pathname:
try:
self.load()
except:
log.error("Could not load and parse %s", self.pathname, exc_info=1)
def empty(self):
return len(self.settings) == 0
def has_override(self, setting):
return setting in self.settings
def valid_value(self, setting, value):
if not self.has_override(setting):
return False
if str(value) in self.settings[setting]:
return True
if self.normalized_value(setting, value) is not None:
return True
return False
def get_override(self, setting, value):
if not self.valid_value(setting, value):
return None
normalized = self.normalized_value(setting, value)
return self.settings[setting][str(normalized)]
def load(self):
log.debug("loading %s", self.pathname)
with open(self.pathname) as infile:
config = json.loads(infile.read())
for k in ["description", "startup", "comms_init"]:
if k in config:
setattr(self, k, config[k])
del(config[k])
k = "min_delay_us"
if k in config:
self.min_delay_us = int(config[k])
del(config[k])
# any remaining keys are assumed to be settings which ENLIGHTEN
# would normally pass to WasatchDeviceWrapper
for setting in sorted(config.keys()):
self.settings[setting] = {}
values = sorted(config[setting].keys())
for value in values:
self.settings[setting][value] = config[setting][value]
log.debug("loaded overrides for settings: %s", sorted(self.settings.keys()))
log.debug("full overrides: %s", self.__dict__)
# ##########################################################################
# Private methods
# ##########################################################################
## for case where "15" is an override, but "15.0" is not
# @private
def normalized_value(self, setting, value):
if not self.has_override(setting):
return None
if str(value) in self.settings[setting]:
return value
if isinstance(value, float):
if value - int(value) == 0.0:
if str(int(value)) in self.settings[setting]:
return int(value)
return None
```
|
{
"source": "jfuss/aws-serverlessrepo-python",
"score": 3
}
|
#### File: aws-serverlessrepo-python/serverlessrepo/application_metadata.py
```python
from .exceptions import InvalidApplicationMetadataError
class ApplicationMetadata(object):
"""Class representing SAR metadata."""
# SAM template SAR metadata properties
NAME = 'Name'
DESCRIPTION = 'Description'
AUTHOR = 'Author'
SPDX_LICENSE_ID = 'SpdxLicenseId'
LICENSE_URL = 'LicenseUrl'
README_URL = 'ReadmeUrl'
LABELS = 'Labels'
HOME_PAGE_URL = 'HomePageUrl'
SEMANTIC_VERSION = 'SemanticVersion'
SOURCE_CODE_URL = 'SourceCodeUrl'
def __init__(self, app_metadata):
"""
Initialize the object given SAR metadata properties.
:param app_metadata: Dictionary containing SAR metadata properties
:type app_metadata: dict
"""
self.template_dict = app_metadata # save the original template definitions
self.name = app_metadata.get(self.NAME)
self.description = app_metadata.get(self.DESCRIPTION)
self.author = app_metadata.get(self.AUTHOR)
self.spdx_license_id = app_metadata.get(self.SPDX_LICENSE_ID)
self.license_url = app_metadata.get(self.LICENSE_URL)
self.readme_url = app_metadata.get(self.README_URL)
self.labels = app_metadata.get(self.LABELS)
self.home_page_url = app_metadata.get(self.HOME_PAGE_URL)
self.semantic_version = app_metadata.get(self.SEMANTIC_VERSION)
self.source_code_url = app_metadata.get(self.SOURCE_CODE_URL)
def __eq__(self, other):
"""Return whether two ApplicationMetadata objects are equal."""
return isinstance(other, type(self)) and self.__dict__ == other.__dict__
def validate(self, required_props):
"""
Check if the required application metadata properties have been populated.
:param required_props: List of required properties
:type required_props: list
:return: True, if the metadata is valid
:raises: InvalidApplicationMetadataError
"""
missing_props = [p for p in required_props if not getattr(self, p)]
if missing_props:
missing_props_str = ', '.join(sorted(missing_props))
raise InvalidApplicationMetadataError(properties=missing_props_str)
return True
```
|
{
"source": "jfussell/chargecalculator",
"score": 2
}
|
#### File: jfussell/chargecalculator/calculator.py
```python
def nimh(capacity, current, loss):
return ((capacity / current) * loss) / 10
```
|
{
"source": "jfveronelli/sqink",
"score": 2
}
|
#### File: sqink/python/register.py
```python
import sys
from winreg import *
version= sys.version[:3]
installpath= sys.prefix
regpath= "SOFTWARE\\Python\\Pythoncore\\%s\\" % version
installkey= "InstallPath"
pythonkey= "PythonPath"
pythonpath= "%s\\Lib;%s\\DLLs" % (installpath, installpath)
def RegisterPy():
try:
reg= OpenKey(HKEY_LOCAL_MACHINE, regpath)
except EnvironmentError:
try:
reg= CreateKey(HKEY_LOCAL_MACHINE, regpath)
SetValue(reg, installkey, REG_SZ, installpath)
SetValue(reg, pythonkey, REG_SZ, pythonpath)
CloseKey(reg)
except:
print("*** Unable to register!")
return
print("--- Python %s is now registered!" % version)
return
if (QueryValue(reg, installkey) == installpath and QueryValue(reg, pythonkey) == pythonpath):
CloseKey(reg)
print("=== Python %s is already registered!" % version)
return
CloseKey(reg)
print("*** Unable to register!")
print("*** You probably have another Python installation!")
def UnRegisterPy():
try:
reg= OpenKey(HKEY_LOCAL_MACHINE, regpath)
except EnvironmentError:
print("*** Python not registered?!")
return
try:
DeleteKey(reg, installkey)
DeleteKey(reg, pythonkey)
DeleteKey(HKEY_LOCAL_MACHINE, regpath)
except:
print("*** Unable to un-register!")
else:
print("--- Python %s is no longer registered!" % version)
if __name__ == "__main__":
RegisterPy()
```
#### File: crossknight/sqink/markdown.py
```python
from datetime import datetime
from datetime import timedelta
from datetime import timezone
import markdown
import mistune
from time import time
def _formatDatetime(dt):
now = time()
tzOffset = (datetime.fromtimestamp(now) - datetime.utcfromtimestamp(now)).total_seconds()
tzInfo = timezone(timedelta(seconds=tzOffset))
return str(dt.replace(tzinfo=timezone.utc).astimezone(tzInfo).replace(tzinfo=None))
def renderHtml(note):
docTitle = note.title if note.title else ""
title = ""
if note.title or note.starred:
title = "<h2 class=\"note-info\">"
if note.starred:
title += "<img src=\"images/star-16x16.png\"/> "
if note.title:
title += note.title
title += "</h2>"
dates = ""
if note.createdOn or note.lastModified:
dates += "<div class=\"note-info note-dates\">"
if note.createdOn:
dates += " <img src=\"images/created-16x16.png\"/> <span>" + _formatDatetime(note.createdOn) +\
"</span>  "
if note.lastModified:
dates += " <img src=\"images/modified-16x16.png\"/> <span>" + _formatDatetime(note.lastModified) + "</span>"
dates += "</div>"
tags = ""
if note.tags:
tags = "<div class=\"note-info note-tags\"> <img src=\"images/tag-16x16.png\"/> "
for tag in note.tags:
tags += "<span>" + tag + "</span> "
tags += "</div>"
separator = "<div class=\"note-separator\"></div>" if title or dates or tags else ""
photo = "<p class=\"note-photo\"><img src=\"notes/" + note.uuid + ".jpg\"/></p>" if note.photo is not None else ""
content = _renderHtmlWithMistune(note.text) if note.text else ""
html = "<!DOCTYPE html><html><head>" +\
"<meta charset=\"UTF-8\"/>" +\
"<link type=\"text/css\" rel=\"stylesheet\" href=\"styles/note.css\"/>" +\
"<title>" + docTitle + "</title>" +\
"</head><body class=\"note\">" +\
title +\
dates +\
tags +\
separator +\
photo +\
content +\
"</body></html>"
note.html = html
return note
def _renderHtmlWithMarkdown(text):
return markdown.markdown(text, output_format="xhtml1")
def _renderHtmlWithMistune(text):
return mistune.markdown(text, escape=True, use_xhtml=True)
```
#### File: sqink/provider/dropbox.py
```python
from crossknight.sqink import createLogger
from crossknight.sqink.domain import isUuid
from crossknight.sqink.domain import NoteStatus
from crossknight.sqink.markdown import renderHtml
from crossknight.sqink.plist import marshalNote
from crossknight.sqink.plist import unmarshalNote
from crossknight.sqink.provider import AConnectionError
from crossknight.sqink.provider import InvalidProxyError
from crossknight.sqink.provider import RemoteNoteProvider
from crossknight.sqink.provider import TokenExpiredError
from dropbox import create_session
from dropbox import Dropbox
from dropbox.exceptions import ApiError
from dropbox.exceptions import InternalServerError
from dropbox.files import FileMetadata
from dropbox.files import WriteMode
from dropbox.oauth import DropboxOAuth2FlowNoRedirect
from requests.exceptions import ConnectionError
from requests.exceptions import HTTPError
from requests.exceptions import ProxyError
_LOG = createLogger("dbox")
def _proxies(proxyHost, proxyPort, proxyUser, proxyPassword):
proxies = {}
if proxyHost:
proxy = proxyHost + ":" + str(proxyPort or 80)
if proxyUser:
proxy = proxyUser + ":" + (proxyPassword or "") + "@" + proxy
proxies["http"] = proxy
proxies["https"] = proxy
return proxies
def online(f):
"""Raises InvalidProxyError when a HTTP proxy is required, or AConnectionError when connection fails."""
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except ProxyError:
raise InvalidProxyError
except ConnectionError:
raise AConnectionError
return wrapper
def expires(f):
"""Raises TokenExpiredError when authorization token is expired or revoked."""
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except InternalServerError:
raise TokenExpiredError
return wrapper
class SyncFolder:
DayOne = "/apps/Day One/journal.dayone"
Narrate = "/apps/Narrate"
class DropboxAuthorizator:
__APP_KEY = "<KEY>"
__APP_SECRET = "<KEY>"
def __init__(self, proxyHost=None, proxyPort=None, proxyUser=None, proxyPassword=None):
self.__proxies = _proxies(proxyHost, proxyPort, proxyUser, proxyPassword)
self.__oauth = DropboxOAuth2FlowNoRedirect(self.__APP_KEY, self.__APP_SECRET)
self.__oauth.requests_session.proxies = self.__proxies
def authorizationUrl(self):
return self.__oauth.start()
@online
def authorize(self, code):
try:
return self.__oauth.finish(code.strip()).access_token
except HTTPError:
raise TokenExpiredError
def checkFolder(self, accessToken, folder):
try:
client = Dropbox(accessToken, session=create_session(proxies=self.__proxies))
self.__createFolder(client, folder + "/entries/deleted")
self.__createFolder(client, folder + "/photos/deleted")
except ApiError:
return False
return True
@staticmethod
def __createFolder(client, path):
_LOG.info("Creating folder: %s", path)
try:
client.files_create_folder(path)
except ApiError as e:
if e.error.is_path() and e.error.get_path().is_conflict() and e.error.get_path().get_conflict().is_folder():
_LOG.info("Folder %s already exists", path)
else:
raise e
class FileEntry:
def __init__(self, folder, name, lastModified):
self.folder = folder
self.name = name
self.lastModified = lastModified
@staticmethod
def fromMetadata(metadata):
if isinstance(metadata, FileMetadata):
tokens = metadata.path_display.rsplit("/", 1)
folder, name = (tokens[0] or "/", tokens[1]) if len(tokens) == 2 else ("/", tokens[0])
return FileEntry(folder, name, metadata.client_modified)
return None
class DropboxNoteProvider(RemoteNoteProvider):
__DAY_ONE_EXTENSION = ".doentry"
def __init__(self, accessToken, folder, proxyHost=None, proxyPort=None, proxyUser=None, proxyPassword=None):
proxies = _proxies(proxyHost, proxyPort, proxyUser, proxyPassword)
self.__token = accessToken
self.__basePath = folder
self.__notesPath = folder + "/entries"
self.__removedNotesPath = self.__notesPath + "/deleted"
self.__photosPath = folder + "/photos"
self.__client = Dropbox(self.__token, session=create_session(proxies=proxies))
self.__notesCache = {}
self.__dayOneFlavor = folder == SyncFolder.DayOne
@online
@expires
def sync(self):
_LOG.info("Listing all notes and photos")
folder = self.__client.files_list_folder(self.__basePath, recursive=True)
files = list(filter(lambda e: e is not None, map(FileEntry.fromMetadata, folder.entries)))
while folder.has_more:
folder = self.__client.files_list_folder_continue(folder.cursor)
files.extend(filter(lambda e: e is not None, map(FileEntry.fromMetadata, folder.entries)))
notes = {}
for file in filter(lambda f: f.folder == self.__notesPath and isUuid(self.__normalizeNoteName(f.name)), files):
uuid = self.__normalizeNoteName(file.name)
notes[uuid] = NoteStatus(uuid, file.lastModified)
for file in filter(lambda f: f.folder == self.__photosPath and f.name.endswith(".jpg"), files):
uuid = file.name[:-4]
if uuid in notes:
notes[uuid].hasPhoto = True
for file in filter(lambda f: f.folder == self.__removedNotesPath and isUuid(self.__normalizeNoteName(f.name)),
files):
uuid = self.__normalizeNoteName(file.name)
if uuid in notes:
if file.lastModified >= notes[uuid].lastModified:
_LOG.warning("Sync integrity check deleting note: %s", uuid)
try:
self.__client.files_delete(self.__notePath(uuid))
except ApiError:
_LOG.warning("Note %s not found", uuid)
if notes[uuid].hasPhoto:
_LOG.warning("Sync integrity check deleting photo: %s", uuid)
try:
self.__client.files_delete(self.__photoPath(uuid))
except ApiError:
_LOG.warning("Photo %s not found", uuid)
del notes[uuid]
else:
_LOG.warning("Sync integrity check deleting REMOVED note: %s", uuid)
try:
self.__client.files_delete(self.__removedNotePath(uuid))
except ApiError:
_LOG.warning("REMOVED note %s not found", uuid)
continue
notes[uuid] = NoteStatus(uuid, file.lastModified, True)
self.__notesCache = notes
return notes
@online
@expires
def get(self, uuid):
_LOG.info("Getting note: %s", uuid)
metadata, response = self.__client.files_download(self.__notePath(uuid))
with response:
note = unmarshalNote(response.content, metadata.client_modified)
if uuid not in self.__notesCache or self.__notesCache[uuid].hasPhoto:
_LOG.info("Getting photo: %s", uuid)
try:
with self.__client.files_download(self.__photoPath(uuid))[1] as response:
note.photo = response.content
except ApiError as e:
if e.error.is_path() and e.error.get_path().is_not_found():
_LOG.warning("Photo %s does not exist", uuid)
else:
raise e
return renderHtml(note)
@online
@expires
def add(self, note):
uuid = note.uuid
_LOG.info("Adding note: %s", uuid)
self.__uploadFile(self.__notePath(uuid), note.lastModified, marshalNote(note), overwrite=False)
if note.photo:
_LOG.info("Adding photo: %s", uuid)
self.__uploadFile(self.__photoPath(uuid), note.lastModified, note.photo)
elif uuid in self.__notesCache and self.__notesCache[uuid].hasPhoto:
_LOG.info("Deleting photo: %s", uuid)
try:
self.__client.files_delete(self.__photoPath(uuid))
except ApiError:
_LOG.warning("Photo %s not found", uuid)
# Clean removed note if exists
if uuid in self.__notesCache and self.__notesCache[uuid].removed:
_LOG.info("Deleting REMOVED note: %s", uuid)
try:
self.__client.files_delete(self.__removedNotePath(uuid))
except ApiError:
_LOG.warning("REMOVED note %s not found", uuid)
@online
@expires
def update(self, note):
uuid = note.uuid
# Check if note exists
if self.__notesCache and (uuid not in self.__notesCache or self.__notesCache[uuid].removed):
raise RuntimeError("Note[uuid=%s] does not exist" % uuid)
# Dropbox does not update the client_modifed date when the content hasn't changed. So the note is removed first.
_LOG.info("Trying to delete old note before updating: %s", uuid)
try:
self.__client.files_delete(self.__notePath(uuid))
except ApiError:
pass
_LOG.info("Updating note: %s", uuid)
self.__uploadFile(self.__notePath(uuid), note.lastModified, marshalNote(note))
if note.photo:
_LOG.info("Updating photo: %s", uuid)
self.__uploadFile(self.__photoPath(uuid), note.lastModified, note.photo)
elif uuid not in self.__notesCache or self.__notesCache[uuid].hasPhoto:
_LOG.info("Deleting photo: %s", uuid)
try:
self.__client.files_delete(self.__photoPath(uuid))
except ApiError:
_LOG.warning("Photo %s not found", uuid)
@online
@expires
def remove(self, note):
uuid = note.uuid
# Remove note if exists
if uuid in self.__notesCache and not self.__notesCache[uuid].removed:
_LOG.info("Deleting note: %s", uuid)
try:
self.__client.files_delete(self.__notePath(uuid))
except ApiError:
_LOG.warning("Note %s not found", uuid)
# Remove photo if exists
if uuid in self.__notesCache and self.__notesCache[uuid].hasPhoto:
_LOG.info("Deleting photo: %s", uuid)
try:
self.__client.files_delete(self.__photoPath(uuid))
except ApiError:
_LOG.warning("Photo %s not found", uuid)
_LOG.info("Adding REMOVED note: %s", uuid)
self.__uploadFile(self.__removedNotePath(uuid), note.lastModified, b"")
def __uploadFile(self, path, lastModified, content, overwrite=True):
mode = WriteMode.overwrite if overwrite else WriteMode.add
self.__client.files_upload(content, path, mode=mode, client_modified=lastModified)
def __normalizeNoteName(self, name):
if self.__dayOneFlavor and name.endswith(self.__DAY_ONE_EXTENSION):
name = name[:-(len(self.__DAY_ONE_EXTENSION))]
return name
def __buildNotePath(self, parentPath, uuid):
path = parentPath + "/" + uuid
if self.__dayOneFlavor:
path += self.__DAY_ONE_EXTENSION
return path
def __notePath(self, uuid):
return self.__buildNotePath(self.__notesPath, uuid)
def __removedNotePath(self, uuid):
return self.__buildNotePath(self.__removedNotesPath, uuid)
def __photoPath(self, uuid):
return self.__photosPath + "/" + uuid + ".jpg"
```
#### File: sqink/provider/gdrive.py
```python
from base64 import b64encode
from crossknight.sqink import createLogger
from crossknight.sqink.domain import isUuid
from crossknight.sqink.domain import NoteStatus
from crossknight.sqink.markdown import renderHtml
from crossknight.sqink.plist import marshalNote
from crossknight.sqink.plist import unmarshalNote
from crossknight.sqink.provider import AConnectionError
from crossknight.sqink.provider import InvalidProxyError
from crossknight.sqink.provider import RemoteNoteProvider
from crossknight.sqink.provider import TokenExpiredError
from datetime import datetime
from googleapiclient.discovery import build
from googleapiclient.http import MediaIoBaseUpload
from httplib2 import Http
from httplib2 import HttpLib2Error
from httplib2 import HTTPSConnectionWithTimeout
from httplib2 import ProxyInfo
from httplib2 import SCHEME_TO_CONNECTION
from io import BytesIO
from oauth2client import GOOGLE_TOKEN_URI
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import FlowExchangeError
from oauth2client.client import OAuth2Credentials
from oauth2client.client import OAuth2WebServerFlow
_LOG = createLogger("gdrive")
def online(f):
"""Raises InvalidProxyError when a HTTP proxy is required, or AConnectionError when connection fails."""
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except (ConnectionError, TimeoutError, HttpLib2Error):
raise AConnectionError
except OSError as e:
if "407" in str(e):
raise InvalidProxyError
raise e
return wrapper
def expires(f):
"""Raises TokenExpiredError when authorization token is expired or revoked."""
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except (AccessTokenRefreshError, FlowExchangeError):
raise TokenExpiredError
return wrapper
def createHttp(proxyHost=None, proxyPort=None, proxyUser=None, proxyPassword=None):
proxyInfo = None
if proxyHost:
proxyInfo = ProxyInfo(3, proxyHost, proxyPort if proxyPort else 80, proxy_user=proxyUser,
proxy_pass=proxyPassword)
return Http(proxy_info=proxyInfo)
class HTTPSProxyConnectionWithTimeout(HTTPSConnectionWithTimeout):
def __init__(self, host, port=None, key_file=None, cert_file=None, timeout=None, proxy_info=None, *args, **kwargs):
if isinstance(proxy_info, ProxyInfo) and proxy_info.proxy_type == 3 and proxy_info.proxy_host\
and proxy_info.proxy_port:
headers = {}
if proxy_info.proxy_user and proxy_info.proxy_pass:
credentials = "%s:%s" % (proxy_info.proxy_user, proxy_info.proxy_pass)
credentials = b64encode(credentials.encode("utf-8")).strip().decode("utf-8")
headers["Proxy-Authorization"] = "Basic " + credentials
HTTPSConnectionWithTimeout.__init__(self, proxy_info.proxy_host, port=proxy_info.proxy_port,
key_file=key_file, cert_file=cert_file, timeout=timeout,
proxy_info=proxy_info, *args, **kwargs)
self.set_tunnel(host, port if port else 443, headers)
else:
HTTPSConnectionWithTimeout.__init__(self, host, port=port, key_file=key_file, cert_file=cert_file,
timeout=timeout, proxy_info=proxy_info, *args, **kwargs)
# override in httplib2 to support proxies and proxy basic authentication
SCHEME_TO_CONNECTION["https"] = HTTPSProxyConnectionWithTimeout
class GoogleDriveAuthorizator:
__APP_ID = "897700201444-0oju80gfjirsuogns9brqctnnf9tquq6.apps.googleusercontent.com"
__APP_SECRET = "<KEY>
__SCOPE = "https://www.googleapis.com/auth/drive.appdata"
__REDIRECT_URI = "urn:ietf:wg:oauth:2.0:oob"
def __init__(self, proxyHost=None, proxyPort=None, proxyUser=None, proxyPassword=None):
self.__http = createHttp(proxyHost, proxyPort, proxyUser, proxyPassword)
self.__oauth = OAuth2WebServerFlow(self.__APP_ID, self.__APP_SECRET, self.__SCOPE,
redirect_uri=self.__REDIRECT_URI)
def authorizationUrl(self):
return self.__oauth.step1_get_authorize_url()
@online
@expires
def authorize(self, code):
credentials = self.__oauth.step2_exchange(code, http=self.__http)
token = credentials.refresh_token
_LOG.info("Acquired token: %s", token)
return token
@classmethod
def tokenToCredentials(cls, refreshToken):
return OAuth2Credentials(client_id=cls.__APP_ID, client_secret=cls.__APP_SECRET, token_uri=GOOGLE_TOKEN_URI,
refresh_token=refreshToken, access_token=None, token_expiry=None, user_agent=None)
class GoogleDriveNoteProvider(RemoteNoteProvider):
FOLDER = "appDataFolder"
NOTE_MIMETYPE = "application/xml"
NOTE_EXTENSION = ".entry"
PHOTO_MIMETYPE = "image/jpeg"
PHOTO_EXTENSION = ".jpg"
REMOVED_NOTE_MIMETYPE = "application/octet-stream"
REMOVED_NOTE_EXTENSION = ".deleted"
def __init__(self, refreshToken, proxyHost=None, proxyPort=None, proxyUser=None, proxyPassword=None):
self.__noteStatusCache = {}
self.__credentials = GoogleDriveAuthorizator.tokenToCredentials(refreshToken)
self.__http = self.__credentials.authorize(createHttp(proxyHost, proxyPort, proxyUser, proxyPassword))
self.__service = None
@online
@expires
def sync(self):
noteStatus = {}
pageToken = None
while True:
_LOG.info("Listing all notes and photos")
fields = "files(id,name,modifiedTime),nextPageToken"
result = self._service().list(spaces=self.FOLDER, fields=fields, pageSize=1000, pageToken=pageToken)\
.execute()
pageToken = result.get("nextPageToken")
for item in filter(lambda i: i["name"].endswith(self.NOTE_EXTENSION), result["files"]):
name = item["name"][:-6]
if isUuid(name):
ns = NoteStatus(name, self.__lastModified(item))
ns.noteId = item["id"]
noteStatus[name] = ns
for item in filter(lambda i: i["name"].endswith(self.PHOTO_EXTENSION), result["files"]):
name = item["name"][:-4]
if name in noteStatus:
ns = noteStatus[name]
ns.hasPhoto = True
ns.photoId = item["id"]
for item in filter(lambda i: i["name"].endswith(self.REMOVED_NOTE_EXTENSION), result["files"]):
name = item["name"][:-8]
if isUuid(name):
ns = NoteStatus(name, self.__lastModified(item), True)
ns.noteId = item["id"]
if name in noteStatus:
nso = noteStatus[name]
if ns.lastModified >= nso.lastModified:
_LOG.warning("Sync integrity check deleting note: %s", name)
self.__remove(nso.noteId)
if nso.hasPhoto:
_LOG.warning("Sync integrity check deleting photo: %s", name)
self.__remove(nso.photoId)
else:
_LOG.warning("Sync integrity check deleting REMOVED note: %s", name)
self.__remove(ns.noteId)
continue
noteStatus[name] = ns
if not pageToken:
break
self.__noteStatusCache = noteStatus
return noteStatus
@online
@expires
def get(self, uuid):
ns = self.__noteStatusCache.get(uuid)
if not ns or ns.removed:
raise RuntimeError("Note[uuid=%s] does not exist" % uuid)
_LOG.info("Getting note: %s", uuid)
content = self._service().get_media(fileId=ns.noteId).execute()
note = unmarshalNote(content, ns.lastModified)
if ns.hasPhoto:
_LOG.info("Getting photo: %s", uuid)
note.photo = self._service().get_media(fileId=ns.photoId).execute()
return renderHtml(note)
@online
@expires
def add(self, note):
uuid = note.uuid
if uuid in self.__noteStatusCache:
ns = self.__noteStatusCache[uuid]
if not ns.removed:
raise RuntimeError("Note[uuid=%s] already exists" % uuid)
else:
_LOG.info("Deleting REMOVED note: %s", uuid)
self.__remove(ns.noteId)
metadata = {
"name": uuid + self.NOTE_EXTENSION,
"modifiedTime": self.__modifiedTime(note),
"mimeType": self.NOTE_MIMETYPE,
"parents": [self.FOLDER]
}
content = MediaIoBaseUpload(BytesIO(marshalNote(note)), mimetype=self.NOTE_MIMETYPE)
_LOG.info("Adding note: %s", uuid)
self._service().create(body=metadata, media_body=content, fields="id").execute()
if note.photo:
self.__uploadPhoto(note)
@online
@expires
def update(self, note):
uuid = note.uuid
ns = self.__noteStatusCache.get(uuid)
if not ns or ns.removed:
raise RuntimeError("Note[uuid=%s] does not exist" % uuid)
if ns.hasPhoto:
_LOG.info("Deleting photo: %s", uuid)
self.__remove(ns.photoId)
_LOG.info("Updating note: %s", uuid)
metadata = {"modifiedTime": self.__modifiedTime(note)}
content = MediaIoBaseUpload(BytesIO(marshalNote(note)), mimetype=self.NOTE_MIMETYPE)
self._service().update(fileId=ns.noteId, body=metadata, media_body=content, fields="id").execute()
if note.photo:
self.__uploadPhoto(note)
@online
@expires
def remove(self, note):
uuid = note.uuid
if uuid in self.__noteStatusCache:
ns = self.__noteStatusCache[uuid]
_LOG.info("Deleting note: %s", uuid)
self.__remove(ns.noteId)
if ns.hasPhoto:
_LOG.info("Deleting photo: %s", uuid)
self.__remove(ns.photoId)
metadata = {
"name": uuid + self.REMOVED_NOTE_EXTENSION,
"modifiedTime": self.__modifiedTime(note),
"mimeType": self.REMOVED_NOTE_MIMETYPE,
"parents": [self.FOLDER]
}
_LOG.info("Adding REMOVED note: %s", uuid)
self._service().create(body=metadata, fields="id").execute()
def _service(self):
if self.__service is None:
self.__service = build("drive", "v3", http=self.__http).files()
return self.__service
@staticmethod
def __lastModified(metadata):
return datetime.strptime(metadata["modifiedTime"], "%Y-%m-%dT%H:%M:%S.%fZ").replace(microsecond=0)
@staticmethod
def __modifiedTime(note):
return note.lastModified.strftime("%Y-%m-%dT%H:%M:%S.000Z")
def __uploadPhoto(self, note):
_LOG.info("Adding photo: %s", note.uuid)
metadata = {
"name": note.uuid + self.PHOTO_EXTENSION,
"mimeType": self.PHOTO_MIMETYPE,
"modifiedTime": self.__modifiedTime(note),
"parents": [self.FOLDER]
}
content = MediaIoBaseUpload(BytesIO(note.photo), mimetype=self.PHOTO_MIMETYPE)
self._service().create(body=metadata, media_body=content, fields="id").execute()
def __remove(self, fileId):
self._service().delete(fileId=fileId).execute()
```
#### File: sqink/test/filesystemtest.py
```python
from crossknight.sqink.provider.filesystem import FilesystemNoteProvider
from os.path import dirname
from os.path import normpath
from unittest import TestCase
class FilesystemNoteProviderTest(TestCase):
def testListShouldSucceed(self):
path = normpath(dirname(__file__) + "/resources")
provider = FilesystemNoteProvider(path)
notes = provider.list()
self.assertEqual(1, len(notes))
self.assertEqual("Probando 1,2,3...", notes[0].title)
```
|
{
"source": "jfvilaro/rpg_feature_tracking_analysis",
"score": 2
}
|
#### File: rpg_feature_tracking_analysis/feature_track_visualizer/visualizer.py
```python
from os.path import isfile
import os
import cv2
from os.path import join
import numpy as np
import tqdm
import random
from big_pun.tracker_utils import filter_first_tracks, getTrackData
def component():
return random.randint(0, 255)
class FeatureTracksVisualizer:
def __init__(self, gt_file,track_file, dataset, params):
self.params = params
self.dataset = dataset
self.tracks, self.feature_ids, self.colors, self.colors_id = self.loadFeatureTracks(gt_file,track_file)
self.min_time_between_screen_refresh_ms = 5
self.max_time_between_screen_refresh_ms = 100
self.is_paused = False
self.is_looped = False
self.marker = params["marker"]
self.computeMinMaxSpeed()
self.updateDisplayRate()
self.times = np.linspace(self.min_stamp, self.max_stamp, int(self.params['framerate'] * (self.max_stamp - self.min_stamp)))
self.time_index = 0
self.cv2_window_name = 'tracks'
cv2.namedWindow(self.cv2_window_name, cv2.WINDOW_NORMAL)
def cropGT(self, gt, predictions):
gt = {i: g for i,g in gt.items() if i in predictions}
predictions = {i: p for i,p in predictions.items() if i in gt}
for i, gt_track in gt.items():
prediction_track = predictions[i]
t_max = prediction_track[-1,0]
gt_track = gt_track[gt_track[:,0]<=t_max]
gt[i] = gt_track
return gt
def discardOnThreshold(self, predictions, gt, thresh):
assert set(gt.keys()) == set(predictions.keys())
for i, gt_track in gt.items():
pred_track = predictions[i]
x_p_interp = np.interp(gt_track[:,0], pred_track[:,0], pred_track[:,1])
y_p_interp = np.interp(gt_track[:,0], pred_track[:,0], pred_track[:,2])
error = np.sqrt((x_p_interp-gt_track[:,1])**2 + (y_p_interp-gt_track[:,2])**2)
idxs = np.where(error > thresh)[0]
if len(idxs) == 0:
continue
t_max = gt_track[idxs[0],0]
gt[i] = gt_track[:idxs[0]]
predictions[i] = pred_track[pred_track[:,0]<t_max]
return predictions, gt
def loadFeatureTracks(self, gt_file, track_file, method="estimation", color=[0, 255, 0], gt_color=[255, 0, 255]):
tracks = {}
colors = {}
colors_id = {}
color = [r for r in reversed(color)]
if self.params['visualisation_mode'] == "estimation":
# load track
colors["estimation"] = color
data = np.genfromtxt(track_file, delimiter=" ")
first_len_tracks = len(data)
valid_ids, data = filter_first_tracks(data, filter_too_short=True)
track_data = {i: data[data[:, 0] == i, 1:] for i in valid_ids}
tracks[method] = track_data
for i in valid_ids: # Define a different random color for each id.
colors_id[i] = [component(), component(), component()]
if len(track_data) < first_len_tracks:
print("WARNING: This package only supports evaluation of tracks which have been initialized at the same"
"time. All tracks except the first have been discarded.")
# load gt
tracks_csv = join(gt_file)
if isfile(tracks_csv):
gt = getTrackData(tracks_csv)
colors["gt"] = gt_color
# if true, crop all tracks from gt to have the same length as the predictions.
if self.params["crop_to_predictions"]:
gt = self.cropGT(gt, tracks[method])
if self.params["error_threshold"] > 0:
tracks[method], gt = self.discardOnThreshold(tracks[method], gt, self.params["error_threshold"])
tracks["gt"] = gt
elif self.params['visualisation_mode'] == "gt":
# load gt
data = np.genfromtxt(gt_file, delimiter=" ")
first_len_tracks = len(data)
valid_ids, data = filter_first_tracks(data, filter_too_short=True)
track_data = {i: data[data[:, 0] == i, 1:] for i in valid_ids}
tracks["gt"] = track_data
for i in valid_ids: # Define a different random color for each id.
colors_id[i] = [component(), component(), component()]
if len(track_data) < first_len_tracks:
print("WARNING: This package only supports evaluation of tracks which have been initialized at the same"
"time. All tracks except the first have been discarded.")
colors["gt"] = gt_color
elif self.params['visualisation_mode'] == "track":
# load track
data = np.genfromtxt(track_file, delimiter=" ")
first_len_tracks = len(data)
valid_ids, data = filter_first_tracks(data, filter_too_short=True)
track_data = {i: data[data[:, 0] == i, 1:] for i in valid_ids}
tracks["track"] = track_data
for i in valid_ids: # Define a different random color for each id.
colors_id[i] = [component(), component(), component()]
if len(track_data) < first_len_tracks:
print("WARNING: This package only supports evaluation of tracks which have been initialized at the same"
"time. All tracks except the first have been discarded.")
colors["track"] = gt_color
feature_ids = {label: list(tracks_dict.keys()) for label, tracks_dict in tracks.items()}
max_stamp = -1
min_stamp = 10**1000
for label, tracks_dict in tracks.items():
for i, track in tracks_dict.items():
min_stamp = min([min_stamp, min(track[:,0])])
max_stamp = max([max_stamp, max(track[:,0])])
self.min_stamp = min_stamp
self.max_stamp = max_stamp
return tracks, feature_ids, colors, colors_id
def pause(self):
self.is_paused = True
def unpause(self):
self.is_paused= False
def togglePause(self):
self.is_paused = not self.is_paused
def toggleLoop(self):
self.is_looped = not self.is_looped
def forward(self, num_timesteps = 1):
if self.is_looped:
self.time_index = (self.time_index + 1) % len(self.times)
else:
self.time_index = min(self.time_index + num_timesteps, len(self.times) - 1)
def backward(self, num_timesteps = 1):
self.time_index = max(self.time_index - num_timesteps, 0)
def goToBegin(self):
self.time_index = 0
def goToEnd(self):
self.time_index = len(self.times) - 1
def increaseTrackHistoryLength(self):
self.params['track_history_length'] = self.params['track_history_length'] * 1.25
def decreaseTrackHistoryLength(self):
self.params['track_history_length'] = self.params['track_history_length'] / 1.25
def computeMinMaxSpeed(self):
self.max_speed = 1000.0 / (self.min_time_between_screen_refresh_ms * self.params['framerate'])
self.min_speed = 1000.0 / (self.max_time_between_screen_refresh_ms * self.params['framerate'])
def updateDisplayRate(self):
self.params['speed'] = np.clip(self.params['speed'], self.min_speed, self.max_speed)
self.time_between_screen_refresh_ms = int(1000.0 / (self.params['speed'] * self.params['framerate']))
def writeToVideoFile(self, f):
height, width, _ = self.dataset.images[0].shape
height, width = int(self.params["scale"]*height), int(self.params["scale"]*width)
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
self.video_writer = cv2.VideoWriter(f, fourcc, self.params['framerate']*self.params["speed"], (width, height))
for t in tqdm.tqdm(self.times):
image_to_display = self.update(t)
self.video_writer.write(image_to_display)
self.cleanup()
def visualizationLoop(self):
while True:
t = self.times[self.time_index]
image_to_display = self.update(t)
cv2.imshow(self.cv2_window_name, image_to_display)
if not self.is_paused:
self.forward(1)
c = cv2.waitKey(self.time_between_screen_refresh_ms)
key = chr(c & 255)
if c == 27: # 'q' or 'Esc': Quit
break
elif key == 'r': # 'r': Reset
self.goToBegin()
self.unpause()
elif key == 'p' or c == 32: # 'p' or 'Space': Toggle play/pause
self.togglePause()
elif key == "a": # 'Left arrow': Go backward
self.backward(1)
self.pause()
elif key == "d": # 'Right arrow': Go forward
self.forward(1)
self.pause()
elif key == "s": # 'Down arrow': Go to beginning
self.goToBegin()
self.pause()
elif key == "w": # 'Up arrow': Go to end
self.goToEnd()
self.pause()
elif key == "e":
self.increaseTrackHistoryLength()
elif key == "q":
self.decreaseTrackHistoryLength()
elif key == 'l': # 'l': Toggle looping
self.toggleLoop()
self.cleanup()
def cleanup(self):
cv2.destroyAllWindows()
if hasattr(self, 'video_writer'):
self.video_writer.release()
def update(self, t, track_history_length = None):
if track_history_length == None:
track_history_length = self.params['track_history_length']
return self.plotBetween(t - track_history_length, t)
def getImageClosestTo(self, t):
image_index = np.searchsorted(self.dataset.times, t, side="left") - 1
return self.dataset.images[image_index]
def drawMarker(self, img, x, y, color):
c = int(3 * self.params["scale"])
t = int(1 * self.params["scale"])
if self.marker == "cross":
cv2.line(img, (x-c, y), (x+c, y), color, thickness=t)
cv2.line(img, (x, y-c), (x, y+c), color, thickness=t)
elif self.marker == "circle":
cv2.circle(img, center=(x, y), radius=c, color=color, thickness=t)
def drawLegend(self, image, legend, size):
s = self.params["scale"]
off_x = int(size[1])
t = int(10 * s)
n = int(70 *s)
for label, color in legend.items():
if self.params['visualisation_mode'] == "gt" or label == "gt":
label = "ground truth"
if self.params['visualisation_mode'] == "track" or label == "track":
label = "track"
cv2.putText(image, label, (off_x-n, t), cv2.FONT_HERSHEY_COMPLEX, int(s/4), color)
t += int(10 *s)
return image
def plotBetween(self, t0, t1):
image = self.getImageClosestTo(t1).copy()
# resize
h,w,_ = image.shape
s = self.params["scale"]
image = cv2.resize(image, dsize=(int(w*s), int(h*s)))
image = self.drawLegend(image, self.colors, image.shape[:2])
for label, tracks_dict in self.tracks.items():
for feature_id, track in tracks_dict.items():
t = track[:,0]
track_segment = track[(t<=t1) & (t>=t0)]
if len(track_segment) > 0:
for point in track_segment[:-1]:
_, x, y = (s*point).astype(int)
#trail_marker = "cross" if label == "gt" else "dot"
trail_marker = "dot"
if self.params["visualisation_mode"] == "gt" or self.params["visualisation_mode"] == "track":
self.drawTrail(image, x, y, self.colors_id[feature_id], marker=trail_marker)
else:
self.drawTrail(image, x, y, self.colors[label], marker=trail_marker)
_, x, y = (s*track_segment[-1]).astype(int)
if self.params["visualisation_mode"] == "gt" or self.params["visualisation_mode"] == "track":
self.drawMarker(image, x, y, self.colors_id[feature_id])
else:
self.drawMarker(image, x, y, self.colors[label])
return image
def drawTrail(self, img, x, y, entry, marker="dot"):
c = 0*self.params["scale"]
if marker=="dot":
x_min = int(max([x - c, 0]))
x_max = int(min([x + c+self.params["scale"], img.shape[1]]))
y_min = int(max([y - c, 0]))
y_max = int(min([y + c+self.params["scale"], img.shape[0]]))
img[y_min:y_max,x_min:x_max,:] = np.array(entry)
elif marker=="cross":
c = int(2 * self.params['scale'])
t = int(.5 * self.params["scale"])
x_min = max([x - c, 0])
x_max = int(min([x + c+self.params["scale"], img.shape[1]]))
y_min = max([y - c, 0])
y_max = int(min([y + c+self.params["scale"], img.shape[0]]))
xmi =x-t/2
ymi =y-t/2
xma =x + t / 2
yma =y + t / 2
if x < 0 or x > img.shape[1]-1 or y < 0 or x > img.shape[0]-1:
return
img[y_min:y_max,xmi:xma, :] = np.array(entry)
img[ymi:yma,x_min:x_max, :] = np.array(entry)
```
|
{
"source": "jfw225/ptlib",
"score": 3
}
|
#### File: ptlib/core/job.py
```python
import numpy as np
from typing import Hashable, Tuple, Any
class JobSpec:
""" Job specification. *** COME BACK *** """
def __init__(self,
shape: Tuple[int] = None,
dtype: np.dtype = None,
*,
name: Hashable = None,
example: np.ndarray = None):
"""
Parameters:
shape -- Tuple[int]
The shape of the data.
dtype -- np.dtype
The type of the data.
name -- Hashable
The key that will be used to index the `SubJob` in the `Job`.
example -- ndarray
An example data to infer structure and space requirements of
the data (if provided, shape and dtype will be ignored)
"""
self.next = None
# if no shape nor example is given, assumed to be an empty spec
self._is_empty = shape is None and example is None
if self._is_empty:
self.shape = (0,)
self.dtype = None
return
self.name = name
if example is not None:
example = np.array(example)
shape, dtype = example.shape, example.dtype
self.shape = np.array(shape, dtype=np.ulonglong)
self.dtype = dtype
self.nbytes = int(np.nbytes[self.dtype] * np.product(self.shape))
@classmethod
def from_output_job(cls, output_job: "Job" or np.ndarray):
"""
Alternative constructor for determing job specifications from an
instance of `ptlib.core.job.Job`.
"""
job_spec_list = [cls(example=job) for job in output_job]
job_specs = job_spec_list.pop(0)
for next_job_spec in job_spec_list:
job_specs += next_job_spec
return job_specs
def get_nbytes(self, capacity: int = 1):
"""
Calculates the number of bytes required to allocate a `ptlib.Job`
specified by `self` in a `ptlib.Queue` of size `capacity`.
"""
return capacity * sum([js.nbytes for js in self])
def __iter__(self):
js = self
while js is not None:
yield js
js = js.next
def __add__(self, other_js: "JobSpec") -> "JobSpec":
"""
LinkedList implementation allows for JobSpec objects to be combined
using the addition operator `+`.
"""
if not isinstance(other_js, JobSpec):
return self
if self._is_empty:
return other_js
js = self
while js.next is not None:
js = js.next
js.next = other_js
return self
def __repr__(self):
return "[" + str(self) + "]"
def __str__(self):
s = " | ".join(
[f"Name: {js.name}, Shape: {js.shape}, DType: {js.dtype}" for js in self])
return "[" + s + "]"
class Job(dict):
"""
The Job object. Can be used to infer structure of jobs and as a job
itself.
"""
def __init__(self, job_spec: JobSpec = None):
super().__init__()
# if no job spec is passed, then this instance is used for inferance
if job_spec is None:
# if dictionary is set-sliced with object v, v is stored in this
self._subjob = None
return
# create local buffer for entire job
self._buffer = np.ndarray(job_spec.get_nbytes(), dtype=np.int8)
# create subjobs
offset = 0
for js in job_spec:
self[js.name] = np.ndarray(shape=js.shape,
dtype=js.dtype,
buffer=self._buffer,
offset=offset)
offset += js.nbytes
def infer(self):
"""
Infers the structure of each `SubJob` and returns a `JobSpec` object.
Calling this function also collapses each embedded `Job` object into
a `SubJob`.
"""
job_spec = JobSpec()
for key, subjob in self.items():
if isinstance(subjob, Job):
subjob = self[key] = subjob.collapse()
job_spec = job_spec + JobSpec(name=key, example=subjob)
return job_spec, self
def collapse(self):
"""
Recursively collapses embedded `Job` objects into a single numpy
array.
"""
if self._subjob is not None:
return self._subjob
subjobs = list()
for subjob in self.values():
if isinstance(subjob, Job):
subjob = subjob.collapse()
subjobs.append(subjob)
return np.array(subjobs)
def __getitem__(self, k):
# print(k)
if k not in self:
self[k] = Job()
return super().__getitem__(k)
def __setitem__(self, k, v) -> None:
# print(k, v)
# if `k` is a slice, then
if not isinstance(k, slice):
return super().__setitem__(k, v)
self._subjob = v
def __getattribute__(self, __name: str):
"""
Overloaded to ensure that inferencing of job structure is not stopped
due to a method call that does not pertain to the structure. For
example, if `subjob1.fill(0)` is called (where `fill` is a method of a
`numpy.ndarray`), we want to continue to infer the structure of the
subjob without throwing an error.
"""
try:
return super().__getattribute__(__name)
except AttributeError:
return Job()
def __call__(self, *args: Any, **kwds: Any) -> Any:
"""
Overloaded to protect runtime interruption during structure inference.
"""
try:
return super().__call__(*args, **kwds)
except TypeError and AttributeError:
return None
```
#### File: ptlib/core/queue.py
```python
import numpy as np
from multiprocessing.shared_memory import SharedMemory
from multiprocessing import Lock
from ptlib.core.job import JobSpec, Job
class BaseQueue:
""" Queue. *** COME BACK *** """
class Wait:
""" Flag for lock acquisition wait. """
class Empty:
""" Flag for emtpy queue. """
class Full:
""" Flag for full queue. """
class Closed:
""" Flag for closed queue. """
def __init__(self, *args, **kwargs):
self.job = None
def get(self):
return []
def put(self):
return True
def close(self):
pass
def _link_mem(self, create_local=False):
pass
class FIFOQueue(BaseQueue):
""" Object to faciliate memory management across parallel processes.
Attributes: (** update with jobs and replace arr with buffer or buf)
_job_buffer -- ptlib.core.job.Job or (capacity x shape)-darray
Array of arrays linked to the shared memory buffer for asynchronous
data transfer.
_local_job_buffer -- ptlib.core.job.Job or shape-darray
Local copy of a single job used to load a temporary copy of an
_job_buffer[i]. This is only created if `_link_mem` is called with
kwarg `create_local=True`. Very important that this is created if
queue is acting as an input or else data in `_job_buffer` might get
overwritten by an upstream task before it is used.
_arr_sel -- (3 x 1)-darray
Buffer with following elements:
=+= `arr_sel[0]=i` indicates that `put()` will store data in
`arr_dat[i]`
=+= `arr_sel[1]=j` indicates that `get()` will retreive data
from `arr_dat[j]`.
=+= `arr_sel[0]=k` where `k==0` indicates queue is open and
`k==1` indicates queue is closed.
_arr_chk -- (capacity x 1)-darray
Buffer where each `arr_chk[i]` is 0 if `arr_dat[i]` is full or 1 if
`arr_dat[i]` is empty.
"""
def __init__(self,
capacity: int,
job_spec: JobSpec):
"""
Parameters:
capacity -- int
The number of data-sized slots to send data.
job_spec -- ptlib.core.job.JobSpec
Specification for the structure of the input job into the queue.
"""
self.capacity = capacity
self.job_spec = job_spec
# define local job buffer
self._job_buffer = None
# create shared memory objects
self._shm_dat = SharedMemory(
create=True, size=job_spec.get_nbytes(capacity))
self._shm_sel = SharedMemory(create=True, size=3)
self._shm_chk = SharedMemory(create=True, size=capacity)
# initialize selection and check arrays
np.ndarray(2, dtype=np.int8, buffer=self._shm_sel.buf).fill(0)
np.ndarray(capacity, dtype=np.int8, buffer=self._shm_chk.buf).fill(0)
# define arrays
self._arr_dat = np.ndarray(0)
self._arr_sel = np.ndarray(0)
self._arr_chk = np.ndarray(0)
# create selection lock
self._lock_sel = Lock()
# flag to check if arrays have been linked in current context
self._is_linked = False
def get(self):
"""
Attemps to retreive data from shared memory. If the selected index is
empty, this returns `BaseQueue.Empty`. If the queue is closed, then
`BaseQueue.Closed` is returned. Otherwise, this loads data into
`self._arr_dat` and returns True.
"""
# acquire selection lock
self._lock_sel.acquire()
# get index
sel_index = self._arr_sel[1]
# if get index == set index and check[set] is low, wait for check[set] to be high
if self._arr_chk[sel_index] == 0: # queue is either empty or closed
self._lock_sel.release()
# print(f"sel get: {sel_index} returning False", self._arr_chk[sel_index])
return BaseQueue.Empty if self._arr_sel[2] == 0 else BaseQueue.Closed
# increment get index
self._arr_sel[1] = (sel_index + 1) % self.capacity
# print(f"sel get: {sel_index}", self._arr_chk[sel_index])
# get payload (must copy because buffer might change in other process)
self._job_buffer[:] = self._arr_dat[sel_index]
# release selection lock
self._lock_sel.release()
# set check[get] to low
self._arr_chk[sel_index] = 0
return True
def put(self):
"""
Attempts to load `self._job_buffer` into shared memory. If the selected
index is full, then this returns `BaseQueue.Full`. If the queue is
closed, then `BaseQueue.Closed` is returned. Otherwise, this loads data
into `self._arr_dat` and returns True.
"""
# acquire selection lock
self._lock_sel.acquire()
# set index
sel_index = self._arr_sel[0]
# if set index == get index and check[get] is high, wait for check[get] to be low
if self._arr_chk[sel_index] == 1: # queue is either full or closed
self._lock_sel.release()
# print(f"sel put: {sel_index} returning False", self._arr_chk[sel_index])
return BaseQueue.Full if self._arr_sel[2] == 0 else BaseQueue.Closed
# increment set index
self._arr_sel[0] = (sel_index + 1) % self.capacity
# print(f"sel put: {sel_index}", self._arr_chk[sel_index])
# print(self._arr_dat, self._job_buffer)
self._arr_dat[sel_index][:] = self._job_buffer
# release selection lock
self._lock_sel.release()
# set check[set] to high
self._arr_chk[sel_index] = 1
return True
def close(self):
"""
Closes queue by setting `self._arr_sel[2]` to HIGH.
"""
# link selection array if it isn't already
if not self._is_linked:
self._arr_sel = np.ndarray(
3, dtype=np.int8, buffer=self._shm_sel.buf)
# set `self._arr_sel[2]` to HIGH
self._arr_sel[2] = 1
def _link_mem(self):
# create local array connected to shared memory buffer
self._arr_dat = np.ndarray((self.capacity, self.job_spec.get_nbytes()),
dtype=np.int8,
buffer=self._shm_dat.buf)
# create local job buffer
local_job = Job(self.job_spec)
self._job_buffer = local_job._buffer
# link selection and check arrays to buffers in memory
self._arr_sel = np.ndarray(
3, dtype=np.int8, buffer=self._shm_sel.buf)
self._arr_chk = np.ndarray(
self.capacity, dtype=np.int8, buffer=self._shm_chk.buf)
# set linked flag to HIGH
self._is_linked = True
return local_job
# def _generate_dynamic_put(self):
# print("generating dynamic put")
# f = open("_dptest.py", "w")
# f.write("def _put(_job_buffer, buffer, sel_index):\n")
# for i in self._iter:
# f.write(f"\t_job_buffer[{i}][sel_index]=buffer[{i}]\n")
# f.close()
# dp = __import__("_dptest", fromlist=["_put"])
# self._put = dp._put
def Queue(job_spec: JobSpec = None,
*,
capacity: int = 1):
"""
Returns BaseQueue or FIFOQueue depending on the inputs.
"""
queue = BaseQueue if job_spec is None else FIFOQueue
return queue(capacity, job_spec)
```
#### File: ptlib/utils/diagram.py
```python
import os
import numpy as np
import matplotlib.pyplot as plt
from random import random
from time import time_ns
from pickle import dump
import ptlib._backend as PTCONFIG
from ptlib.core.metadata import MetadataManager
class Diagram:
""" Parallel Timing Diagram *** come back """
# spacing between timing curves in the PTD
PTD_SPACING = PTCONFIG._DIAGRAM.PTD_SPACING
# width of strings printed to console
CONSOLE_WIDTH = PTCONFIG._DIAGRAM.CONSOLE_WIDTH
# time scaling divider
TIME_DIV = PTCONFIG._DIAGRAM.TIME_DIV
def __init__(self,
total_jobs: int = None,
start_time: int = None,
finish_time: int = None,
process_names: dict[str] = None,
metadata: dict[int, int] = None,
*,
meta_manager: MetadataManager = None):
"""
Parameters:
total_jobs -- int
The number of jobs processed by the pipeline.
start_time -- int
The start time of the main process in nanoseconds.
finish_time: int
The finish time of the main process in nanoseconds.
process_names: dict[task_id, worker_id]
Dict where each entry is the string name for worker with
`id=worker_id` assigned to the task with `id=task_id`.
metadata -- dict[task_id, worker_id][i] = (job_start, job_finish)
Look at `ptlib.core.metadata.MetadataManager` for a more
detailed explanation.
"""
assert isinstance(
meta_manager, MetadataManager), f"meta_manager is the wrong type: {type(meta_manager)}"
self._total_jobs = meta_manager._total_jobs if meta_manager else total_jobs
self._start = meta_manager._start if meta_manager else start_time
self._finish = meta_manager._finish if meta_manager else finish_time
# combine names and metadata
names = meta_manager._names if meta_manager else process_names
meta = meta_manager._meta if meta_manager else metadata
self._meta = {index: (names[index], meta[index])
for index in names.keys()}
# create diagram plots
self.ax_ptd = plt.subplot2grid((2, 3), (0, 0), colspan=5)
self.ax_jobs = plt.subplot2grid((2, 3), (1, 0), colspan=1)
self.ax_times = plt.subplot2grid((2, 3), (1, 1), colspan=1)
self.ax_rates = plt.subplot2grid((2, 3), (1, 2), colspan=1)
def show(self):
"""
Displays current graphs by using `plt.show()`.
"""
return plt.show()
def graph_all(self, save_path: str = ""):
"""
Generates each of the three graphs. If `save_path` is empty, then
the graphs are shown. Otherwise, the graphs are written to
`save_path` as a .pkl file.
"""
self.graph_ptd()
self.graph_stats()
self.table_stats()
if save_path == "":
self.show()
else:
path = os.path.join(save_path + ".pkl")
print(f"Saving Parallel Timing Diagram to: {path}")
dump(plt.gcf(), open(path, "wb"))
def graph_ptd(self):
"""
Generates parallel timing diagram.
"""
print("Generating PTD Graph...")
# set finish time if it is None
self._finish = self._finish or time_ns()
# the y value of the top most timing curve
y = self.PTD_SPACING * len(self._meta)
for name, metadata in self._meta.values():
# create PTD curves and decrement y position
if True:
self._create_ptd_lines(name, metadata, y)
y -= self.PTD_SPACING
# set graph labels
self.ax_ptd.set_ylabel("High/Low")
self.ax_ptd.set_xlabel("Unix Time (ns)")
self.ax_ptd.legend()
def graph_stats(self):
"""
Displays the PTD statistics as a bar graph.
"""
print("Generating PTD Stats Bar Graph...")
stats = self.get_stats(graph=True)
names, worker_ids, num_jobs, \
times_on, times_off, rates_on, rates_off = zip(*stats)
x = np.arange(len(names))
width = 0.5
# jobs
jobs_bar = self.ax_jobs.bar(x, num_jobs, width)
self.ax_jobs.bar_label(jobs_bar, padding=2)
self.ax_jobs.set_ylabel("n")
self.ax_jobs.set_xticks(x)
self.ax_jobs.set_xticklabels(names, rotation=315)
self.ax_jobs.set_title("Number of Jobs")
# times
times_on = np.around(np.array(times_on) / self.TIME_DIV, decimals=2)
times_off = np.around(np.array(times_off) / self.TIME_DIV, decimals=2)
times_on_bar = self.ax_times.bar(
x - (width - .15) / 2, times_on, (width - .15), label="On")
times_off_bar = self.ax_times.bar(
x + (width - .15) / 2, times_off, (width - .15), label="Off")
self.ax_times.bar_label(times_on_bar, padding=2)
self.ax_times.bar_label(times_off_bar, padding=2)
self.ax_times.set_ylabel("Time (s)")
self.ax_times.set_xticks(x)
self.ax_times.set_xticklabels(names, rotation=315)
self.ax_times.set_title("Time Spent")
self.ax_times.legend()
# rates
rates_on = np.around(np.array(rates_on) * self.TIME_DIV, decimals=2)
rates_off = np.around(np.array(rates_off) * self.TIME_DIV, decimals=2)
rates_on_bar = self.ax_rates.bar(
x - (width - .15) / 2, rates_on, (width - .15), label="On")
rates_off_bar = self.ax_rates.bar(
x + (width - .15) / 2, rates_off, (width - .15), label="Off")
self.ax_rates.bar_label(rates_on_bar, padding=2)
self.ax_rates.bar_label(rates_off_bar, padding=2)
self.ax_rates.set_ylabel("Rate (jobs / s)")
self.ax_rates.set_xticks(x)
self.ax_rates.set_xticklabels(names, rotation=315)
self.ax_rates.set_title("Job Rate")
self.ax_rates.legend()
def table_stats(self):
"""
Creates a table containing PTD statistics.
"""
print("Generating PTD Stats Table...")
def get_stats(self, graph=False):
"""
Generates and returns PTD statistics.
"""
self._finish = self._finish or time_ns()
stats = list()
for (task_id, worker_id), (name, metadata) in self._meta.items():
num_jobs = len(metadata)
time_on = time_off = 0
# Start and finish times are specificed by first pair
assert len(
metadata) > 0, f"Error: Worker has no metadata stored. | Name: {name}, Metadata: {metadata}"
off_since, finish = metadata[0]
metadata = metadata[1:]
# increment accumulators
for ont, offt in metadata:
time_off += ont - off_since
time_on += offt - ont
off_since = offt
# check edge case
if finish > off_since:
time_off += finish - off_since
# calculate rates
rate_on = num_jobs / time_on if time_on else 0
rate_off = num_jobs / time_off if time_on else 0
stats.append((name, worker_id, num_jobs, time_on,
time_off, rate_on, rate_off))
return stats
def _create_ptd_lines(self, name: str, metadata: list, y: float):
"""
Creates curve on the timing diagram for a single worker.
"""
# Start and finish times are specificed by first pair
assert len(
metadata) > 0, f"Error: PTProcess has no metadata stored. | Name: {name}, Metadata: {metadata}"
# set inital on/off times
off_since, finish = metadata[0]
metadata = metadata[1:]
# give curve a random color
color = (random(), random(), random())
# add label
self.ax_ptd.hlines(y, off_since, off_since, color=color, label=name)
# create vertical and horizontal lines
for ont, offt in metadata:
self.ax_ptd.hlines(y, off_since, ont, color=color)
self.ax_ptd.vlines(ont, y, y + 1, color=color)
self.ax_ptd.hlines(y + 1, ont, offt, color=color)
self.ax_ptd.vlines(offt, y, y + 1, color=color)
off_since = offt
# extend LOW line
if finish > off_since:
self.ax_ptd.hlines(y, off_since, finish, color=color)
def __str__(self):
""" Formats the timing diagram statistics in a readable way. """
s = "=" * self.CONSOLE_WIDTH + "\n"
# print overall performance
runtime = (self._finish - self._start) / self.TIME_DIV
jps = self._total_jobs / runtime
s += "\n*** OVERALL PERFORMANCE: "
s += f" | Pipeline Runtime: {runtime} s"
s += f" | Processed {self._total_jobs} jobs @ {jps:.2f} j/s ***\n\n"
# print job specific performance
for name, worker_id, num_jobs, time_on, time_off, rate_on, rate_off in self.get_stats():
stat_array = [time_on / self.TIME_DIV, time_off / self.TIME_DIV,
rate_on * self.TIME_DIV, rate_off * self.TIME_DIV]
time_on, time_off, rate_on, rate_off = np.around(
stat_array, decimals=2)
s += f"Task: {name} & Worker ID: {worker_id} -- {num_jobs} jobs"
s += f" | {time_on} s on, {time_off} s off"
s += f" | {rate_on} j/s on, {rate_off} j/s off\n"
s += "\n" + "=" * self.CONSOLE_WIDTH
return s
```
|
{
"source": "jfwallin/JSPAM",
"score": 3
}
|
#### File: JSPAM/python/IOUtil.py
```python
class IOUtil:
@staticmethod
def outputParticles(filename,x0):
fo = open(filename,'w')
IOUtil.outputParticlesToFile(fo,x0)
@staticmethod
def outputParticlesToFile(fo,x0):
size = len(x0)
for i in range(size):
dtmp = x0[i]
for j in range(6):
fo.write(IOUtil.formatDouble(dtmp[j]))
# new line
fo.write("\n")
fo.close()
@staticmethod
def formatDouble(num):
return "%16.8e"%(num)
```
|
{
"source": "jfwm2/aerospike-admin",
"score": 2
}
|
#### File: lib/collectinfo_analyzer/collectinfo_command_controller.py
```python
from lib.base_controller import CommandController
class CollectinfoCommandController(CommandController):
log_handler = None
def __init__(self, log_handler):
CollectinfoCommandController.log_handler = log_handler
```
#### File: collectinfo_analyzer/collectinfo_handler/collectinfo_reader.py
```python
from lib.utils.util import shell_command
class CollectinfoReader:
cinfo_log_file_identifier_key = "=ASCOLLECTINFO"
cinfo_log_file_identifiers = [
"Configuration~~~\|Configuration (.*)~",
"Statistics~\|Statistics (.*)~",
]
system_log_file_identifier_key = "=ASCOLLECTINFO"
system_log_file_identifiers = [
"hostname -I",
"uname -a",
"ip addr",
"Data collection for get_awsdata in progress",
"top -n",
"cat /var/log/syslog",
]
def is_cinfo_log_file(self, log_file=""):
if not log_file:
return False
try:
out, err = shell_command(['head -n 30 "%s"' % (log_file)])
except Exception:
return False
if err or not out:
return False
lines = out.strip().split("\n")
found = False
for line in lines:
try:
if self.cinfo_log_file_identifier_key in line:
found = True
break
except Exception:
pass
if not found:
return False
for search_string in self.cinfo_log_file_identifiers:
try:
out, err = shell_command(
['grep -m 1 "%s" "%s"' % (search_string, log_file)]
)
except Exception:
return False
if err or not out:
return False
return True
def is_system_log_file(self, log_file=""):
if not log_file:
return False
try:
out, err = shell_command(['head -n 30 "%s"' % (log_file)])
except Exception:
return False
if err or not out:
return False
lines = out.strip().split("\n")
found = False
for line in lines:
try:
if self.system_log_file_identifier_key in line:
found = True
break
except Exception:
pass
if not found:
return False
for search_string in self.system_log_file_identifiers:
try:
out, err = shell_command(
['grep -m 1 "%s" "%s"' % (search_string, log_file)]
)
except Exception:
continue
if err or not out:
continue
else:
return True
return False
```
#### File: lib/live_cluster/asinfo_controller.py
```python
from lib.utils import util
from lib.base_controller import CommandHelp, ShellException
from .live_cluster_command_controller import LiveClusterCommandController
@CommandHelp(
'"asinfo" provides raw access to the info protocol.',
" Options:",
" -v <command> - The command to execute",
" -p <port> - Port to use in the case of an XDR info command and XDR is",
" not in asd",
' -l - Replace semicolons ";" with newlines. If output does',
' not contain semicolons "-l" will attempt to use',
' colons ":" followed by commas ",".',
" --no_node_name - Force to display output without printing node names.",
)
class ASInfoController(LiveClusterCommandController):
def __init__(self):
self.modifiers = set(["with", "like"])
@CommandHelp("Executes an info command.")
def _do_default(self, line):
mods = self.parse_modifiers(line)
line = mods["line"]
nodes = self.nodes
value = None
line_sep = False
xdr = False
show_node_name = True
tline = line[:]
try:
while tline:
word = tline.pop(0)
if word == "-v":
value = tline.pop(0)
elif word == "-l":
line_sep = True
elif word == "-p":
port = tline.pop(0)
if port == "3004": # ugly Hack
xdr = True
elif word == "--no_node_name":
show_node_name = False
else:
raise ShellException(
"Do not understand '%s' in '%s'" % (word, " ".join(line))
)
except Exception:
self.logger.warning(
"Do not understand '%s' in '%s'" % (word, " ".join(line))
)
return
if value is not None:
value = value.translate(str.maketrans("", "", "'\""))
if xdr:
results = self.cluster.xdr_info(value, nodes=nodes)
else:
results = self.cluster.info(value, nodes=nodes)
return util.Future(
self.view.asinfo, results, line_sep, show_node_name, self.cluster, **mods
)
```
#### File: live_cluster/client/client_util.py
```python
import re
import itertools
import threading
from time import time
def info_to_dict(value, delimiter=";", ignore_field_without_key_value_delimiter=True):
"""
Simple function to convert string to dict
"""
if not value:
return {}
if isinstance(value, Exception):
return value
stat_dict = {}
_tmp_value_list = info_to_list(value, delimiter)
_value_list = []
delimiter2 = "="
if ignore_field_without_key_value_delimiter:
_value_list = _tmp_value_list
else:
# Sometimes value contains confusing delimiter
# In such cases, after splitting on delimiter, we get items without next delimiter (=).
# By default we ignore such items. But in some cases like dc configs we need to accept those and append to previous item.
# For ex. "dc-name=REMOTE_DC_1:nodes=2000:10:3:0:0:0:100:d+3000:int-ext-ipmap=172.16.58.3...."
# In this example, first split will give ["dc-name=REMOTE_DC_1", "nodes=2000", "10", "3",
# "0", "0", "100", "d+3000", "int-ext-ipmap=172.16.58.3", ....]. In such cases we need to append items
# (10, 3, 0, 0, 100, "d+3000") to previous valid item ("nodes=2000") with delimiter (":").
# It gives "nodes=2000:10:3:0:0:0:100:d+3000".
for _v in _tmp_value_list:
if delimiter2 not in _v:
try:
_value_list[-1] = str(_value_list[-1]) + delimiter + str(_v)
except Exception:
pass
else:
_value_list.append(_v)
stat_param = [info_to_tuple(sp, delimiter2) for sp in _value_list]
for g in itertools.groupby(stat_param, lambda x: x[0]):
try:
value = [v[1] for v in g[1]]
value = ",".join(sorted(value)) if len(value) > 1 else value[0]
stat_dict[g[0]] = value
except Exception:
# NOTE: 3.0 had a bug in stats at least prior to 3.0.44. This will
# ignore that bug.
# Not sure if this bug is fixed or not.. removing this try/catch
# results in things not working. TODO: investigate.
pass
return stat_dict
def info_to_dict_multi_level(
value,
keyname,
delimiter1=";",
delimiter2=":",
ignore_field_without_key_value_delimiter=True,
):
"""
Simple function to convert string to dict where string is format like
field1_section1=value1<delimiter2>field2_section1=value2<delimiter2>... <delimiter1> field1_section2=value3<delimiter2>field2_section2=value4<delimiter2>...
"""
if not value:
return {}
if isinstance(value, Exception):
return value
if not isinstance(keyname, list):
keyname = [keyname]
value_list = info_to_list(value, delimiter1)
value_dict = {}
if not isinstance(keyname, list):
return value_dict
for v in value_list:
values = info_to_dict(
v,
delimiter2,
ignore_field_without_key_value_delimiter=ignore_field_without_key_value_delimiter,
)
if not values or isinstance(values, Exception):
continue
for _k in keyname:
if _k not in values.keys():
continue
value_dict[values[_k]] = values
return value_dict
def info_colon_to_dict(value):
"""
Simple function to convert colon separated string to dict
"""
return info_to_dict(value, ":")
def info_to_list(value, delimiter=";"):
if isinstance(value, Exception):
return []
return re.split(delimiter, value)
def info_to_tuple(value, delimiter=":"):
return tuple(info_to_list(value, delimiter))
def info_valid(info_command_output):
return "" != info_command_output or "Error" not in info_command_output
def find_dns(endpoints):
if not endpoints:
return None
for e in endpoints:
if not e:
continue
if e.startswith("[") or e[0].isdigit():
continue
try:
return e.split(":")[0].strip()
except Exception:
pass
return None
def parse_peers_string(
peers_str, delim=",", ignore_chars_start="[", ignore_chars_end="]"
):
peers_list = []
if not peers_str or isinstance(peers_str, Exception):
return peers_list
peers_str = peers_str.strip()
if not peers_str:
return peers_list
if peers_str[0] in ignore_chars_start and peers_str[-1] in ignore_chars_end:
peers_str = peers_str[1:-1]
if not peers_str:
return peers_list
push_bracket = ignore_chars_start
pop_bracket = ignore_chars_end
b_stack = []
current_str = ""
for i in peers_str:
if i == delim:
if len(b_stack) > 0:
current_str += i
else:
peers_list.append(current_str.strip())
current_str = ""
continue
if i in push_bracket:
current_str += i
b_stack.append(i)
continue
if i in pop_bracket:
current_str += i
b_stack.pop()
continue
current_str += i
if current_str:
peers_list.append(current_str.strip())
return peers_list
def concurrent_map(func, data):
"""
Similar to the builtin function map(). But spawn a thread for each argument
and apply 'func' concurrently.
Note: unlie map(), we cannot take an iterable argument. 'data' should be an
indexable sequence.
"""
N = len(data)
result = [None] * N
# Uncomment following line to run single threaded.
# return [func(datum) for datum in data]
# wrapper to dispose the result in the right slot
def task_wrapper(i):
result[i] = func(data[i])
threads = [threading.Thread(target=task_wrapper, args=(i,)) for i in range(N)]
for t in threads:
t.start()
for t in threads:
t.join()
return result
class cached(object):
# Doesn't support lists, dicts and other unhashables
# Also doesn't support kwargs for reason above.
def __init__(self, func, ttl=0.5):
self.func = func
self.ttl = ttl
self.cache = {}
def __setitem__(self, key, value):
self.cache[key] = (value, time() + self.ttl)
def __getitem__(self, key):
if key in self.cache:
value, eol = self.cache[key]
if eol > time():
return value
self[key] = self.func(*key)
return self.cache[key][0]
def __call__(self, *args):
return self[args]
def flatten(list1):
"""
Simple function to flatten peers list
Format: [((node1 endpoint1 tuple), (node1 endpoint2 tuple), ..., (node1 endpointm tuple)), ....]
Example: [(("172.17.0.1",3000,None),), (("2001:db8:85a3::8a2e",6666,None), ("172.17.0.3",3004,None))]
"""
f_list = []
for i in list1:
if isinstance(i[0], tuple):
for j in i:
f_list.append(j)
else:
f_list.append(i)
return f_list
def remove_suffix(input_string, suffix):
"""
Simple function to remove suffix from input_string if available
"""
try:
input_string = input_string.strip()
if not input_string.endswith(suffix):
return input_string
return input_string[0 : input_string.rfind(suffix)]
except Exception:
return input_string
```
#### File: lib/live_cluster/collectinfo_controller.py
```python
import copy
import json
import shutil
from distutils.version import LooseVersion
import time
from os import sys
from lib.view.sheet.render import get_style_json, set_style_json
from lib.view.terminal import terminal
from lib.utils import common, constants, util
from lib.base_controller import CommandHelp
from lib.collectinfo_analyzer.collectinfo_root_controller import (
CollectinfoRootController,
)
from lib.get_controller import (
GetStatisticsController,
GetConfigController,
GetUsersController,
GetRolesController,
GetLatenciesController,
GetPmapController,
)
from .features_controller import FeaturesController
from .info_controller import InfoController
from .show_controller import ShowController
from .live_cluster_command_controller import LiveClusterCommandController
@CommandHelp(
'"collectinfo" is used to collect cluster info, aerospike conf file and system stats.'
)
class CollectinfoController(LiveClusterCommandController):
get_pmap = False
def __init__(self):
self.modifiers = set(["with"])
self.aslogfile = ""
self.aslogdir = ""
def _collect_local_file(self, src, dest_dir):
self.logger.info("Copying file %s to %s" % (src, dest_dir))
try:
shutil.copy2(src, dest_dir)
except Exception as e:
raise e
def _collectinfo_content(self, func, parm="", alt_parms=""):
name = ""
capture_stdout = util.capture_stdout
sep = constants.COLLECTINFO_SEPERATOR
old_style_json = get_style_json()
set_style_json()
try:
name = func.__name__
except Exception:
pass
info_line = constants.COLLECTINFO_PROGRESS_MSG % (
name,
"%s" % (" %s" % (str(parm)) if parm else ""),
)
self.logger.info(info_line)
if parm:
sep += str(parm) + "\n"
if func == "cluster":
o = self.cluster.info(parm)
else:
if self.nodes and isinstance(self.nodes, list):
parm += ["with"] + self.nodes
o = capture_stdout(func, parm)
util.write_to_file(self.aslogfile, sep + str(o))
set_style_json(old_style_json)
return ""
def _write_version(self, line):
print("asadm version " + str(self.asadm_version))
def _collect_logs_from_systemd_journal(self, as_logfile_prefix):
asd_pids = common.get_asd_pids()
for pid in asd_pids:
try:
journalctl_cmd = [
'journalctl _PID=%s --since "24 hours ago" -q -o cat' % (pid)
]
self.aslogfile = as_logfile_prefix + "aerospike_%s.log" % (pid)
self.logger.info(
"Data collection for %s to %s in progress..."
% (str(journalctl_cmd), self.aslogfile)
)
o, e = util.shell_command(journalctl_cmd)
if e:
self.logger.error(str(e))
else:
util.write_to_file(self.aslogfile, str(o))
except Exception as e1:
self.logger.error(str(e1))
sys.stdout = sys.__stdout__
def _parse_namespace(self, namespace_data):
"""
This method will return set of namespaces present given namespace data
@param namespace_data: should be a form of dict returned by info protocol for namespace.
"""
namespaces = set()
for _value in namespace_data.values():
for ns in _value.split(";"):
namespaces.add(ns)
return namespaces
###########################################################################
# Functions for dumping json
def _restructure_set_section(self, stats):
for node, node_data in stats.items():
if "set" not in node_data.keys():
continue
for key, val in node_data["set"].items():
ns_name = key[0]
setname = key[1]
if ns_name not in node_data["namespace"]:
continue
ns = node_data["namespace"][ns_name]
if "set" not in ns.keys():
ns["set"] = {}
ns["set"][setname] = copy.deepcopy(val)
del node_data["set"]
def _restructure_sindex_section(self, stats):
# Due to new server feature namespace add/remove with rolling restart,
# there is possibility that different nodes will have different namespaces and
# old sindex info available for node which does not have namespace for that sindex.
for node, node_data in stats.items():
if "sindex" not in node_data.keys():
continue
for key, val in node_data["sindex"].items():
key_list = key.split()
ns_name = key_list[0]
sindex_name = key_list[2]
if ns_name not in node_data["namespace"]:
continue
ns = node_data["namespace"][ns_name]
if "sindex" not in ns.keys():
ns["sindex"] = {}
ns["sindex"][sindex_name] = copy.deepcopy(val)
del node_data["sindex"]
def _restructure_bin_section(self, stats):
for node, node_data in stats.items():
if "bin" not in node_data.keys():
continue
for ns_name, val in node_data["bin"].items():
if ns_name not in node_data["namespace"]:
continue
ns = node_data["namespace"][ns_name]
ns["bin"] = copy.deepcopy(val)
del node_data["bin"]
def _init_stat_ns_subsection(self, data):
for node, node_data in data.items():
if "namespace" not in node_data.keys():
continue
ns_map = node_data["namespace"]
for ns, data in ns_map.items():
ns_map[ns]["set"] = {}
ns_map[ns]["bin"] = {}
ns_map[ns]["sindex"] = {}
def _restructure_ns_section(self, data):
for node, node_data in data.items():
if "namespace" not in node_data.keys():
continue
ns_map = node_data["namespace"]
for ns, data in ns_map.items():
stat = {}
stat[ns] = {}
stat[ns]["service"] = data
ns_map[ns] = stat[ns]
def _remove_exception_from_section_output(self, data):
for section in data:
for node in data[section]:
if isinstance(data[section][node], Exception):
data[section][node] = {}
def _get_as_data_json(self):
as_map = {}
getter = GetStatisticsController(self.cluster)
stats = getter.get_all(nodes=self.nodes)
getter = GetConfigController(self.cluster)
config = getter.get_all(nodes=self.nodes, flip=False)
getter = GetUsersController(self.cluster)
getter = GetRolesController(self.cluster)
# All these section have have nodeid in inner level
# flip keys to get nodeid in upper level.
# {'namespace': 'test': {'ip1': {}, 'ip2': {}}} -->
# {'namespace': {'ip1': {'test': {}}, 'ip2': {'test': {}}}}
stats["namespace"] = util.flip_keys(stats["namespace"])
stats["set"] = util.flip_keys(stats["set"])
stats["bin"] = util.flip_keys(stats["bin"])
stats["dc"] = util.flip_keys(stats["dc"])
stats["sindex"] = util.flip_keys(stats["sindex"])
self._remove_exception_from_section_output(stats)
self._remove_exception_from_section_output(config)
# flip key to get node ids in upper level and sections inside them.
# {'namespace': {'ip1': {'test': {}}, 'ip2': {'test': {}}}} -->
# {'ip1':{'namespace': {'test': {}}}, 'ip2': {'namespace': {'test': {}}}}
new_stats = util.flip_keys(stats)
new_config = util.flip_keys(config)
# Create a new service level for all ns stats.
# {'namespace': 'test': {<stats>}} -->
# {'namespace': 'test': {'service': {<stats>}}}
self._restructure_ns_section(new_stats)
# ns stats would have set and bin data too, service level will
# consolidate its service stats and put sets, sindex, bin stats
# in namespace section
self._init_stat_ns_subsection(new_stats)
self._restructure_set_section(new_stats)
self._restructure_sindex_section(new_stats)
self._restructure_bin_section(new_stats)
# No config for set, sindex, bin
self._restructure_ns_section(new_config)
# check this 'XDR': {'STATISTICS': {'192.168.112.194:3000':
# Type_error('expected str
as_map["statistics"] = new_stats
as_map["config"] = new_config
new_as_map = util.flip_keys(as_map)
return new_as_map
def _check_for_exception_and_set(self, data, section_name, nodeid, result_map):
if nodeid in data:
if not isinstance(data[nodeid], Exception):
result_map[nodeid][section_name] = data[nodeid]
else:
result_map[nodeid][section_name] = ""
def _get_as_metadata(self):
metamap = {}
builds = util.Future(self.cluster.info, "build", nodes=self.nodes).start()
editions = util.Future(self.cluster.info, "version", nodes=self.nodes).start()
xdr_builds = util.Future(
self.cluster.info_build_version, nodes=self.nodes
).start()
node_ids = util.Future(self.cluster.info_node, nodes=self.nodes).start()
ips = util.Future(self.cluster.info_ip_port, nodes=self.nodes).start()
endpoints = util.Future(
self.cluster.info_service_list, nodes=self.nodes
).start()
services = util.Future(
self.cluster.info_peers_flat_list, nodes=self.nodes
).start()
udf_data = util.Future(self.cluster.info_udf_list, nodes=self.nodes).start()
health_outliers = util.Future(
self.cluster.info_health_outliers, nodes=self.nodes
).start()
builds = builds.result()
editions = editions.result()
xdr_builds = xdr_builds.result()
node_ids = node_ids.result()
ips = ips.result()
endpoints = endpoints.result()
services = services.result()
udf_data = udf_data.result()
health_outliers = health_outliers.result()
for nodeid in builds:
metamap[nodeid] = {}
self._check_for_exception_and_set(builds, "asd_build", nodeid, metamap)
self._check_for_exception_and_set(editions, "edition", nodeid, metamap)
self._check_for_exception_and_set(xdr_builds, "xdr_build", nodeid, metamap)
self._check_for_exception_and_set(node_ids, "node_id", nodeid, metamap)
self._check_for_exception_and_set(ips, "ip", nodeid, metamap)
self._check_for_exception_and_set(endpoints, "endpoints", nodeid, metamap)
self._check_for_exception_and_set(services, "services", nodeid, metamap)
self._check_for_exception_and_set(udf_data, "udf", nodeid, metamap)
self._check_for_exception_and_set(
health_outliers, "health", nodeid, metamap
)
return metamap
def _get_as_histograms(self):
histogram_map = {}
hist_list = [
("ttl", "ttl", False),
("objsz", "objsz", False),
("objsz", "object-size", True),
]
hist_dumps = [
util.Future(
self.cluster.info_histogram,
hist[0],
logarithmic=hist[2],
raw_output=True,
nodes=self.nodes,
).start()
for hist in hist_list
]
for hist, hist_dump in zip(hist_list, hist_dumps):
hist_dump = hist_dump.result()
for node in hist_dump:
if node not in histogram_map:
histogram_map[node] = {}
if not hist_dump[node] or isinstance(hist_dump[node], Exception):
continue
histogram_map[node][hist[1]] = hist_dump[node]
return histogram_map
def _get_as_latency(self):
latency_getter = GetLatenciesController(self.cluster)
latencies_data = latency_getter.get_all(
self.nodes, buckets=17, exponent_increment=1, verbose=1
)
latency_map = {}
for node in latencies_data:
if node not in latency_map:
latency_map[node] = {}
if not latencies_data[node] or isinstance(latencies_data[node], Exception):
continue
latency_map[node] = latencies_data[node]
return latency_map
def _get_as_pmap(self):
getter = GetPmapController(self.cluster)
return getter.get_pmap(nodes=self.nodes)
def _get_as_access_control_list(self):
acl_map = {}
principal_node = self.cluster.get_expected_principal()
getter = GetUsersController(self.cluster)
users_map = getter.get_users(nodes=[principal_node])
getter = GetRolesController(self.cluster)
roles_map = getter.get_roles(nodes=[principal_node])
for node in users_map:
acl_map[node] = {}
self._check_for_exception_and_set(users_map, "users", node, acl_map)
self._check_for_exception_and_set(roles_map, "roles", node, acl_map)
return acl_map
def _dump_in_json_file(self, as_logfile_prefix, dump):
self.logger.info("Dumping collectinfo in JSON format.")
self.aslogfile = as_logfile_prefix + "ascinfo.json"
try:
json_dump = json.dumps(dump, indent=4, separators=(",", ":"))
with open(self.aslogfile, "w") as f:
f.write(json_dump)
except Exception as e:
self.logger.error("Failed to write JSON file: " + str(e))
def _get_collectinfo_data_json(
self,
default_user,
default_pwd,
default_ssh_port,
default_ssh_key,
credential_file,
enable_ssh,
):
self.logger.debug("Collectinfo data to store in collectinfo_*.json")
dump_map = {}
meta_map = self._get_as_metadata()
histogram_map = self._get_as_histograms()
latency_map = self._get_as_latency()
acl_map = self._get_as_access_control_list()
if CollectinfoController.get_pmap:
pmap_map = self._get_as_pmap()
sys_map = self.cluster.info_system_statistics(
default_user=default_user,
default_pwd=<PASSWORD>,
default_ssh_key=default_ssh_key,
default_ssh_port=default_ssh_port,
credential_file=credential_file,
nodes=self.nodes,
collect_remote_data=enable_ssh,
)
cluster_names = util.Future(self.cluster.info, "cluster-name").start()
as_map = self._get_as_data_json()
for node in as_map:
dump_map[node] = {}
dump_map[node]["as_stat"] = as_map[node]
if node in sys_map:
dump_map[node]["sys_stat"] = sys_map[node]
if node in meta_map:
dump_map[node]["as_stat"]["meta_data"] = meta_map[node]
if node in histogram_map:
dump_map[node]["as_stat"]["histogram"] = histogram_map[node]
if node in latency_map:
dump_map[node]["as_stat"]["latency"] = latency_map[node]
if CollectinfoController.get_pmap and node in pmap_map:
dump_map[node]["as_stat"]["pmap"] = pmap_map[node]
# ACL requests only go to principal therefor we are storing it only
# for the principal
if node in acl_map:
dump_map[node]["as_stat"]["acl"] = acl_map[node]
# Get the cluster name and add one more level in map
cluster_name = "null"
cluster_names = cluster_names.result()
# Cluster name.
for node in cluster_names:
if not isinstance(cluster_names[node], Exception) and cluster_names[
node
] not in ["null"]:
cluster_name = cluster_names[node]
break
snp_map = {}
snp_map[cluster_name] = dump_map
return snp_map
def _dump_collectinfo_json(
self,
timestamp,
as_logfile_prefix,
default_user,
default_pwd,
default_ssh_port,
default_ssh_key,
credential_file,
enable_ssh,
snp_count,
wait_time,
):
snpshots = {}
for i in range(snp_count):
snp_timestamp = time.strftime("%Y-%m-%d %H:%M:%S UTC", time.gmtime())
self.logger.info(
"Data collection for Snapshot: " + str(i + 1) + " in progress..."
)
snpshots[snp_timestamp] = self._get_collectinfo_data_json(
default_user,
default_pwd,
default_ssh_port,
default_ssh_key,
credential_file,
enable_ssh,
)
time.sleep(wait_time)
self._dump_in_json_file(as_logfile_prefix, snpshots)
###########################################################################
# Functions for dumping pretty print files
def _dump_collectinfo_pretty_print(
self, timestamp, as_logfile_prefix, config_path=""
):
# getting service port to use in ss/netstat command
port = 3000
try:
_, port, _ = self.cluster.get_seed_nodes()[0]
except Exception:
port = 3000
collect_output = time.strftime("%Y-%m-%d %H:%M:%S UTC\n", timestamp)
dignostic_info_params = ["network", "namespace", "set", "xdr", "dc", "sindex"]
dignostic_features_params = ["features"]
dignostic_show_params = [
"config",
"config xdr",
"config dc",
"config cluster",
"distribution",
"distribution eviction",
"distribution object_size -b",
"latencies -v -e 1 -b 17",
"statistics",
"statistics xdr",
"statistics dc",
"statistics sindex",
]
if CollectinfoController.get_pmap:
dignostic_show_params.append("pmap")
dignostic_aerospike_cluster_params = [
"service",
"services",
"peers-clear-std",
"peers-clear-alt",
"peers-tls-std",
"peers-tls-alt",
"alumni-clear-std",
"alumni-tls-std",
"peers-generation",
"roster:",
]
summary_params = ["summary"]
summary_info_params = ["network", "namespace", "set", "xdr", "dc", "sindex"]
health_params = ["health -v"]
# find version
as_version = None
try:
as_version = self.cluster.info("build").popitem()[1]
except Exception:
as_version = None
if isinstance(as_version, Exception):
as_version = None
# find all namespaces
try:
namespaces = self._parse_namespace(self.cluster.info("namespaces"))
except Exception:
namespaces = []
# add hist-dump or histogram command to collect list
hist_list = ["ttl", "object-size", "object-size-linear"]
hist_dump_info_str = "histogram:namespace=%s;type=%s"
try:
if LooseVersion(as_version) < LooseVersion("4.2.0"):
# histogram command introduced in 4.2.0
# use hist-dump command for older versions
hist_list = ["ttl", "objsz"]
hist_dump_info_str = "hist-dump:ns=%s;hist=%s"
except Exception: # probably failed to get build version, node may be down
pass
for ns in namespaces:
for hist in hist_list:
dignostic_aerospike_cluster_params.append(
hist_dump_info_str % (ns, hist)
)
####### Dignostic info ########
self.aslogfile = as_logfile_prefix + "ascollectinfo.log"
util.write_to_file(self.aslogfile, collect_output)
try:
self._collectinfo_content(self._write_version)
except Exception as e:
util.write_to_file(self.aslogfile, str(e))
sys.stdout = sys.__stdout__
try:
info_controller = InfoController()
for info_param in dignostic_info_params:
self._collectinfo_content(info_controller, [info_param])
except Exception as e:
util.write_to_file(self.aslogfile, str(e))
sys.stdout = sys.__stdout__
try:
show_controller = ShowController()
for show_param in dignostic_show_params:
self._collectinfo_content(show_controller, show_param.split())
except Exception as e:
util.write_to_file(self.aslogfile, str(e))
sys.stdout = sys.__stdout__
try:
features_controller = FeaturesController()
for cmd in dignostic_features_params:
self._collectinfo_content(features_controller, [cmd])
except Exception as e:
util.write_to_file(self.aslogfile, str(e))
sys.stdout = sys.__stdout__
try:
for cmd in dignostic_aerospike_cluster_params:
self._collectinfo_content("cluster", cmd)
except Exception as e:
util.write_to_file(self.aslogfile, str(e))
sys.stdout = sys.__stdout__
####### Summary ########
collectinfo_root_controller = CollectinfoRootController(
asadm_version=self.asadm_version, clinfo_path=self.aslogdir
)
self.aslogfile = as_logfile_prefix + "summary.log"
util.write_to_file(self.aslogfile, collect_output)
try:
self._collectinfo_content(self._write_version)
except Exception as e:
util.write_to_file(self.aslogfile, str(e))
sys.stdout = sys.__stdout__
try:
for summary_param in summary_params:
self._collectinfo_content(
collectinfo_root_controller.execute, [summary_param]
)
except Exception as e:
util.write_to_file(self.aslogfile, str(e))
sys.stdout = sys.__stdout__
try:
info_controller = InfoController()
for info_param in summary_info_params:
self._collectinfo_content(info_controller, [info_param])
except Exception as e:
util.write_to_file(self.aslogfile, str(e))
sys.stdout = sys.__stdout__
####### Health ########
self.aslogfile = as_logfile_prefix + "health.log"
util.write_to_file(self.aslogfile, collect_output)
try:
for health_param in health_params:
self._collectinfo_content(
collectinfo_root_controller.execute, health_param.split()
)
except Exception as e:
util.write_to_file(self.aslogfile, str(e))
sys.stdout = sys.__stdout__
####### System info ########
self.aslogfile = as_logfile_prefix + "sysinfo.log"
self.failed_cmds = common.collect_sys_info(
port=port, timestamp=collect_output, outfile=self.aslogfile
)
##### aerospike conf file #####
conf_path = config_path
self.aslogfile = as_logfile_prefix + "aerospike.conf"
if not conf_path:
conf_path = "/etc/aerospike/aerospike.conf"
# Comparing with this version because prior to this it was
# citrusleaf.conf
try:
if LooseVersion(as_version) <= LooseVersion("3.0.0"):
conf_path = "/etc/citrusleaf/citrusleaf.conf"
self.aslogfile = as_logfile_prefix + "citrusleaf.conf"
except Exception: # probably failed to get build version, node may be down
pass
try:
self._collect_local_file(conf_path, self.aslogfile)
except Exception as e:
self.logger.warning(str(e))
util.write_to_file(self.aslogfile, str(e))
sys.stdout = sys.__stdout__
###########################################################################
# Collectinfo caller functions
def _run_collectinfo(
self,
default_user,
default_pwd,
default_ssh_port,
default_ssh_key,
credential_file,
snp_count,
wait_time,
enable_ssh=False,
output_prefix="",
config_path="",
):
# JSON collectinfo snapshot count check
if snp_count < 1:
self.logger.error("Wrong collectinfo snapshot count")
return
timestamp = time.gmtime()
self.aslogdir, as_logfile_prefix = common.set_collectinfo_path(
timestamp, output_prefix=output_prefix
)
# Coloring might writes extra characters to file, to avoid it we need to disable terminal coloring
terminal.enable_color(False)
# list of failed system commands
self.failed_cmds = []
# JSON collectinfo
self._dump_collectinfo_json(
timestamp,
as_logfile_prefix,
default_user,
default_pwd,
default_ssh_port,
default_ssh_key,
credential_file,
enable_ssh,
snp_count,
wait_time,
)
# Pretty print collectinfo
self._dump_collectinfo_pretty_print(
timestamp, as_logfile_prefix, config_path=config_path
)
# Archive collectinfo directory
common.archive_log(self.aslogdir)
# printing collectinfo summary
common.print_collecinto_summary(self.aslogdir, failed_cmds=self.failed_cmds)
def _collect_info(self, line):
snp_count = util.get_arg_and_delete_from_mods(
line=line,
arg="-n",
return_type=int,
default=1,
modifiers=self.modifiers,
mods=self.mods,
)
wait_time = util.get_arg_and_delete_from_mods(
line=line,
arg="-t",
return_type=int,
default=5,
modifiers=self.modifiers,
mods=self.mods,
)
enable_ssh = util.check_arg_and_delete_from_mods(
line=line,
arg="--enable-ssh",
default=False,
modifiers=self.modifiers,
mods=self.mods,
)
default_user = util.get_arg_and_delete_from_mods(
line=line,
arg="--ssh-user",
return_type=str,
default=None,
modifiers=self.modifiers,
mods=self.mods,
)
default_pwd = util.get_arg_and_delete_from_mods(
line=line,
arg="--ssh-pwd",
return_type=str,
default=None,
modifiers=self.modifiers,
mods=self.mods,
)
default_ssh_port = util.get_arg_and_delete_from_mods(
line=line,
arg="--ssh-port",
return_type=int,
default=None,
modifiers=self.modifiers,
mods=self.mods,
)
default_ssh_key = util.get_arg_and_delete_from_mods(
line=line,
arg="--ssh-key",
return_type=str,
default=None,
modifiers=self.modifiers,
mods=self.mods,
)
credential_file = util.get_arg_and_delete_from_mods(
line=line,
arg="--ssh-cf",
return_type=str,
default=None,
modifiers=self.modifiers,
mods=self.mods,
)
output_prefix = util.get_arg_and_delete_from_mods(
line=line,
arg="--output-prefix",
return_type=str,
default="",
modifiers=self.modifiers,
mods=self.mods,
)
output_prefix = util.strip_string(output_prefix)
config_path = util.get_arg_and_delete_from_mods(
line=line,
arg="--asconfig-file",
return_type=str,
default="",
modifiers=self.modifiers,
mods=self.mods,
)
config_path = util.strip_string(config_path)
self._run_collectinfo(
default_user,
default_pwd,
default_ssh_port,
default_ssh_key,
credential_file,
snp_count,
wait_time,
enable_ssh=enable_ssh,
output_prefix=output_prefix,
config_path=config_path,
)
@CommandHelp(
"Collects cluster info, aerospike conf file for local node and system stats from all nodes if",
"remote server credentials provided. If credentials are not available then it will collect system",
"stats from",
"local node only.",
" Options:",
" -n <int> - Number of snapshots. Default: 1",
" -s <int> - Sleep time in seconds between each snapshot. Default: 5 sec",
" --enable-ssh - Enables the collection of system statistics from the remote server.",
" --ssh-user <string> - Default user ID for remote servers. This is the ID of a user of the",
" system not the ID of an Aerospike user.",
" --ssh-pwd <string> - Default password or passphrase for key for remote servers. This is",
" the user's password for logging into the system, not a password for",
" logging into Aerospike.",
" --ssh-port <int> - Default SSH port for remote servers. Default: 22",
" --ssh-key <string> - Default SSH key (file path) for remote servers.",
" --ssh-cf <string> - Remote System Credentials file path.",
" If the server credentials are not in the credentials file, then",
" authentication is attempted with the default credentials.",
" File format : each line must contain <IP[:PORT]>,<USER_ID>",
" <PASSWORD or PASSPHRASE>,<SSH_KEY>",
" Example: 172.16.17.32,uid,pwd",
" 1.2.3.4:3232,uid,pwd",
" 1.2.3.4:3232,uid,,key_path",
" 1.2.3.4:3232,uid,passphrase,key_path",
" [2001::1234:10],uid,pwd",
" [2001::1234:10]:3232,uid,,key_path",
" --output-prefix <string> - Output directory name prefix.",
" --asconfig-file <string> - Aerospike config file path to collect.",
" Default: /etc/aerospike/aerospike.conf",
)
def _do_default(self, line):
self._collect_info(line=line)
```
#### File: lib/live_cluster/live_cluster_command_controller.py
```python
from lib.base_controller import CommandController
class LiveClusterCommandController(CommandController):
cluster = None
def __init__(self, cluster):
LiveClusterCommandController.cluster = cluster
```
#### File: lib/live_cluster/summary_controller.py
```python
from lib.utils import common, util
from lib.base_controller import CommandHelp
from .live_cluster_command_controller import LiveClusterCommandController
@CommandHelp(
"Displays summary of Aerospike cluster.",
" Options:",
" -l - Enable to display namespace output in List view. Default: Table view",
" --enable-ssh - Enables the collection of system statistics from a remote server.",
" --ssh-user <string> - Default user ID for remote servers. This is the ID of a user of the",
" system, not the ID of an Aerospike user.",
" --ssh-pwd <string> - Default password or passphrase for key for remote servers. This is the",
" user's password for logging into the system, not a password for logging",
" into Aerospike.",
" --ssh-port <int> - Default SSH port for remote servers. Default: 22",
" --ssh-key <string> - Default SSH key (file path) for remote servers.",
" --ssh-cf <string> - Remote System Credentials file path.",
" If the server credentials are not in the credentials file, then",
" authentication is attempted with the default credentials.",
" File format : each line should contain <IP[:PORT]>,<USER_ID>,",
" <PASSWORD or PASSPHRASE>,<SSH_KEY>",
" Example: 192.168.127.12,uid,pwd",
" 192.168.127.12:3232,uid,pwd",
" 192.168.127.12:3232,uid,,key_path",
" 192.168.127.12:3232,uid,passphrase,key_path",
" [2001::1234:10],uid,pwd",
" [2001::1234:10]:3232,uid,,key_path",
)
class SummaryController(LiveClusterCommandController):
def __init__(self):
self.modifiers = set(["with"])
def _do_default(self, line):
enable_list_view = util.check_arg_and_delete_from_mods(
line=line, arg="-l", default=False, modifiers=self.modifiers, mods=self.mods
)
enable_ssh = util.check_arg_and_delete_from_mods(
line=line,
arg="--enable-ssh",
default=False,
modifiers=self.modifiers,
mods=self.mods,
)
default_user = util.get_arg_and_delete_from_mods(
line=line,
arg="--ssh-user",
return_type=str,
default=None,
modifiers=self.modifiers,
mods=self.mods,
)
default_pwd = util.get_arg_and_delete_from_mods(
line=line,
arg="--ssh-pwd",
return_type=str,
default=None,
modifiers=self.modifiers,
mods=self.mods,
)
default_ssh_port = util.get_arg_and_delete_from_mods(
line=line,
arg="--ssh-port",
return_type=int,
default=None,
modifiers=self.modifiers,
mods=self.mods,
)
default_ssh_key = util.get_arg_and_delete_from_mods(
line=line,
arg="--ssh-key",
return_type=str,
default=None,
modifiers=self.modifiers,
mods=self.mods,
)
credential_file = util.get_arg_and_delete_from_mods(
line=line,
arg="--ssh-cf",
return_type=str,
default=None,
modifiers=self.modifiers,
mods=self.mods,
)
service_stats = util.Future(
self.cluster.info_statistics, nodes=self.nodes
).start()
namespace_stats = util.Future(
self.cluster.info_all_namespace_statistics, nodes=self.nodes
).start()
set_stats = util.Future(
self.cluster.info_set_statistics, nodes=self.nodes
).start()
service_configs = util.Future(
self.cluster.info_get_config, nodes=self.nodes, stanza="service"
).start()
namespace_configs = util.Future(
self.cluster.info_get_config, nodes=self.nodes, stanza="namespace"
).start()
cluster_configs = util.Future(
self.cluster.info_get_config, nodes=self.nodes, stanza="cluster"
).start()
os_version = self.cluster.info_system_statistics(
nodes=self.nodes,
default_user=default_user,
default_pwd=<PASSWORD>,
default_ssh_key=default_ssh_key,
default_ssh_port=default_ssh_port,
credential_file=credential_file,
commands=["lsb"],
collect_remote_data=enable_ssh,
)
kernel_version = self.cluster.info_system_statistics(
nodes=self.nodes,
default_user=default_user,
default_pwd=<PASSWORD>,
default_ssh_key=default_ssh_key,
default_ssh_port=default_ssh_port,
credential_file=credential_file,
commands=["uname"],
collect_remote_data=enable_ssh,
)
server_version = util.Future(
self.cluster.info, "build", nodes=self.nodes
).start()
server_edition = util.Future(
self.cluster.info, "version", nodes=self.nodes
).start()
cluster_name = util.Future(
self.cluster.info, "cluster-name", nodes=self.nodes
).start()
service_stats = service_stats.result()
namespace_stats = namespace_stats.result()
set_stats = set_stats.result()
service_configs = service_configs.result()
namespace_configs = namespace_configs.result()
cluster_configs = cluster_configs.result()
server_version = server_version.result()
server_edition = server_edition.result()
cluster_name = cluster_name.result()
metadata = {}
metadata["server_version"] = {}
metadata["server_build"] = {}
metadata["cluster_name"] = {}
for node, version in server_version.items():
if not version or isinstance(version, Exception):
continue
metadata["server_build"][node] = version
if (
node in server_edition
and server_edition[node]
and not isinstance(server_edition[node], Exception)
):
if "enterprise" in server_edition[node].lower():
metadata["server_version"][node] = "E-%s" % (str(version))
elif "community" in server_edition[node].lower():
metadata["server_version"][node] = "C-%s" % (str(version))
else:
metadata["server_version"][node] = version
else:
metadata["server_version"][node] = version
if (
node in cluster_name
and cluster_name[node]
and not isinstance(cluster_name[node], Exception)
):
metadata["cluster_name"][node] = cluster_name[node]
try:
try:
kernel_version = util.flip_keys(kernel_version)["uname"]
except Exception:
pass
os_version = util.flip_keys(os_version)["lsb"]
if kernel_version:
for node, version in os_version.items():
if not version or isinstance(version, Exception):
continue
if (
node not in kernel_version
or not kernel_version[node]
or isinstance(kernel_version[node], Exception)
):
continue
try:
ov = version["description"]
kv = kernel_version[node]["kernel_release"]
version["description"] = str(ov) + " (%s)" % str(kv)
except Exception:
pass
except Exception:
pass
metadata["os_version"] = os_version
return util.Future(
self.view.print_summary,
common.create_summary(
service_stats=service_stats,
namespace_stats=namespace_stats,
set_stats=set_stats,
metadata=metadata,
service_configs=service_configs,
ns_configs=namespace_configs,
cluster_configs=cluster_configs,
),
list_view=enable_list_view,
)
```
#### File: lib/log_analyzer/grep_file_controller.py
```python
import logging
from lib.utils import util, constants
from lib.base_controller import ShellException
from .log_analyzer_command_controller import LogAnalyzerCommandController
class _GrepFile(LogAnalyzerCommandController):
def __init__(self, modifiers):
self.modifiers = modifiers
self.logger = logging.getLogger("asadm")
def do_show(self, line):
if not line:
raise ShellException(
"Could not understand log request, " + "see 'help log'"
)
mods = self.parse_modifiers(line, duplicates_in_line_allowed=True)
line = mods["line"]
tline = line[:]
search_strs = []
ignore_strs = []
output_page_size = 10
start_tm = "head"
duration = ""
sources = []
is_and = False
is_casesensitive = True
reading_strings = None
uniq = False
system_grep = False
while tline:
string_read = False
word = tline.pop(0)
if word == "-s":
reading_strings = search_strs
string_read = True
elif word == "-a":
is_and = True
elif word == "-v":
reading_strings = ignore_strs
string_read = True
elif word == "-i":
is_casesensitive = False
elif word == "-u":
uniq = True
elif word == "-sg":
system_grep = True
elif word == "-f":
start_tm = tline.pop(0)
start_tm = util.strip_string(start_tm)
elif word == "-d":
duration = tline.pop(0)
duration = util.strip_string(duration)
elif word == "-p":
try:
output_page_size = int(util.strip_string(tline.pop(0)))
except Exception:
self.logger.warning("Wrong output page size, setting default value")
elif word == "-n":
try:
sources = [
int(i) for i in util.strip_string(tline.pop(0)).split(",")
]
except Exception:
sources = []
elif reading_strings is not None:
try:
reading_strings.append(util.strip_string(word))
except Exception:
pass
string_read = True
else:
raise ShellException(
"Do not understand '%s' in '%s'" % (word, " ".join(line))
)
if not string_read:
reading_strings = None
if not search_strs:
return
logs = self.log_handler.get_logs_by_index(sources)
if not logs:
self.logger.info("No log files added. Use add command to add log files.")
show_results = self.log_handler.grep(
logs,
search_strs,
ignore_strs=ignore_strs,
is_and=is_and,
is_casesensitive=is_casesensitive,
start_tm_arg=start_tm,
duration_arg=duration,
uniq=uniq,
output_page_size=output_page_size,
system_grep=system_grep,
)
page_index = 1
for show_res in show_results:
if show_res:
self.view.show_grep("", show_res[constants.SHOW_RESULT_KEY])
page_index += 1
show_results.close()
def do_count(self, line):
if not line:
raise ShellException(
"Could not understand log request, " + "see 'help log'"
)
mods = self.parse_modifiers(line, duplicates_in_line_allowed=True)
line = mods["line"]
tline = line[:]
search_strs = []
ignore_strs = []
output_page_size = 10
is_and = False
is_casesensitive = True
start_tm = "head"
duration = ""
slice_duration = "600"
sources = []
reading_strings = None
title_every_nth = 0
uniq = False
system_grep = False
while tline:
string_read = False
word = tline.pop(0)
if word == "-s":
reading_strings = search_strs
string_read = True
elif word == "-a":
is_and = True
elif word == "-v":
reading_strings = ignore_strs
string_read = True
elif word == "-i":
is_casesensitive = False
elif word == "-u":
uniq = True
elif word == "-sg":
system_grep = True
elif word == "-p":
try:
output_page_size = int(util.strip_string(tline.pop(0)))
except Exception:
self.logger.warning("Wrong output page size, setting default value")
elif word == "-r":
try:
title_every_nth = int(util.strip_string(tline.pop(0)))
except Exception:
self.logger.warning(
"Wrong output title repetition value, setting default value"
)
elif word == "-f":
start_tm = tline.pop(0)
start_tm = util.strip_string(start_tm)
elif word == "-d":
duration = tline.pop(0)
duration = util.strip_string(duration)
elif word == "-t":
slice_duration = tline.pop(0)
slice_duration = util.strip_string(slice_duration)
elif word == "-n":
try:
sources = [
int(i) for i in util.strip_string(tline.pop(0)).split(",")
]
except Exception:
sources = []
elif reading_strings is not None:
try:
reading_strings.append(util.strip_string(word))
except Exception:
pass
string_read = True
else:
raise ShellException(
"Do not understand '%s' in '%s'" % (word, " ".join(line))
)
if not string_read:
reading_strings = None
if not search_strs:
return
logs = self.log_handler.get_logs_by_index(sources)
if not logs:
self.logger.info("No log files added. Use add command to add log files.")
count_results = self.log_handler.grep_count(
logs,
search_strs,
ignore_strs=ignore_strs,
is_and=is_and,
is_casesensitive=is_casesensitive,
start_tm_arg=start_tm,
duration_arg=duration,
uniq=uniq,
slice_duration=slice_duration,
output_page_size=output_page_size,
system_grep=system_grep,
)
page_index = 1
for count_res in count_results:
if count_res:
self.view.show_grep_count(
"%s(Page-%d)" % ("cluster ", page_index),
count_res,
title_every_nth=title_every_nth,
)
page_index += 1
count_results.close()
def do_diff(self, line):
if not line:
raise ShellException(
"Could not understand log request, " + "see 'help log'"
)
mods = self.parse_modifiers(line, duplicates_in_line_allowed=True)
line = mods["line"]
tline = line[:]
search_strs = []
start_tm = "head"
duration = ""
slice_tm = "10"
output_page_size = 10
show_count = 1
limit = ""
sources = []
is_casesensitive = True
title_every_nth = 0
reading_search_strings = False
search_string_read = False
while tline:
search_string_read = False
word = tline.pop(0)
if word == "-s":
try:
search_strs.append(util.strip_string(tline.pop(0)))
reading_search_strings = True
search_string_read = True
except Exception:
search_strs = []
elif word == "-f":
start_tm = tline.pop(0)
start_tm = util.strip_string(start_tm)
elif word == "-d":
duration = tline.pop(0)
duration = util.strip_string(duration)
elif word == "-t":
slice_tm = tline.pop(0)
slice_tm = util.strip_string(slice_tm)
elif word == "-k":
show_count = tline.pop(0)
show_count = int(util.strip_string(show_count))
elif word == "-i":
is_casesensitive = False
elif word == "-p":
try:
output_page_size = int(util.strip_string(tline.pop(0)))
except Exception:
self.logger.warning("Wrong output page size, setting default value")
elif word == "-r":
try:
title_every_nth = int(util.strip_string(tline.pop(0)))
except Exception:
self.logger.warning(
"Wrong output title repetition value, setting default value"
)
elif word == "-n":
try:
sources = [
int(i) for i in util.strip_string(tline.pop(0)).split(",")
]
except Exception:
sources = []
elif word == "-l" and tline:
limit = tline.pop(0)
limit = int(util.strip_string(limit))
elif reading_search_strings:
try:
search_strs.append(util.strip_string(word))
except Exception:
pass
search_string_read = True
else:
raise ShellException(
"Do not understand '%s' in '%s'" % (word, " ".join(line))
)
if not search_string_read:
reading_search_strings = False
if not search_strs:
return
logs = self.log_handler.get_logs_by_index(sources)
if not logs:
self.logger.info("No log files added. Use add command to add log files.")
diff_results = self.log_handler.grep_diff(
logs,
search_strs,
is_casesensitive=is_casesensitive,
start_tm_arg=start_tm,
duration_arg=duration,
slice_duration=slice_tm,
every_nth_slice=show_count,
upper_limit_check=limit,
output_page_size=output_page_size,
)
page_index = 1
for diff_res in diff_results:
if diff_res:
self.view.show_grep_diff(
"%s Diff (Page-%d)" % (search_strs[-1], page_index),
diff_res,
title_every_nth=title_every_nth,
)
page_index += 1
diff_results.close()
def do_latency(self, line):
if not line:
raise ShellException(
"Could not understand latency request, " + "see 'help log'"
)
mods = self.parse_modifiers(line, duplicates_in_line_allowed=True)
line = mods["line"]
tline = line[:]
hist = ""
start_tm = "head"
duration = ""
slice_tm = "10"
output_page_size = 10
bucket_count = 3
every_nth_bucket = 3
sources = []
time_rounding = True
title_every_nth = 0
ns = None
show_relative_stats = False
while tline:
word = tline.pop(0)
if word == "-h":
hist = tline.pop(0)
hist = util.strip_string(hist)
elif word == "-f":
start_tm = tline.pop(0)
start_tm = util.strip_string(start_tm)
elif word == "-d":
duration = tline.pop(0)
duration = util.strip_string(duration)
elif word == "-t":
slice_tm = tline.pop(0)
slice_tm = util.strip_string(slice_tm)
elif word == "-e":
every_nth_bucket = tline.pop(0)
every_nth_bucket = int(util.strip_string(every_nth_bucket))
elif word == "-b":
bucket_count = tline.pop(0)
bucket_count = int(util.strip_string(bucket_count))
elif word == "-p":
try:
output_page_size = int(util.strip_string(tline.pop(0)))
except Exception:
self.logger.warning("Wrong output page size, setting default value")
elif word == "-r":
try:
title_every_nth = int(util.strip_string(tline.pop(0)))
except Exception:
self.logger.warning(
"Wrong output title repetition value, setting default value"
)
elif word == "-n":
try:
sources = [
int(i) for i in util.strip_string(tline.pop(0)).split(",")
]
except Exception:
sources = []
elif word == "-o":
time_rounding = False
elif word == "-N":
try:
ns = tline.pop(0)
ns = util.strip_string(ns)
except Exception:
pass
elif word == "--relative-stats":
show_relative_stats = True
else:
raise ShellException(
"Do not understand '%s' in '%s'" % (word, " ".join(line))
)
if not hist:
return
ns_hist = ""
if ns:
ns_hist += "%s - " % (ns)
ns_hist += "%s" % (hist)
logs = self.log_handler.get_logs_by_index(sources)
if not logs:
self.logger.info(
"No log files added. Use 'add /path/to/log' command to add log files."
)
latency_results = self.log_handler.loglatency(
logs,
hist,
start_tm_arg=start_tm,
duration_arg=duration,
slice_duration=slice_tm,
bucket_count=bucket_count,
every_nth_bucket=every_nth_bucket,
rounding_time=time_rounding,
output_page_size=output_page_size,
ns=ns,
show_relative_stats=show_relative_stats,
)
page_index = 1
for latency_res in latency_results:
if latency_res:
if not self.view.show_log_latency(
"%s Latency (Page-%d)" % (ns_hist, page_index),
latency_res,
title_every_nth=title_every_nth,
):
break
page_index += 1
latency_results.close()
```
#### File: lib/log_analyzer/log_analyzer_command_controller.py
```python
from lib.base_controller import CommandController
class LogAnalyzerCommandController(CommandController):
log_handler = None
def __init__(self, log_handler):
LogAnalyzerCommandController.log_handler = log_handler
```
#### File: e2e/live_cluster/test_watch.py
```python
import unittest
import lib.live_cluster.live_cluster_root_controller as controller
import lib.utils.util as util
from test.e2e import util as test_util
from lib.view.sheet import set_style_json
set_style_json()
class TestWatch(unittest.TestCase):
@classmethod
def setUpClass(cls):
TestWatch.rc = controller.LiveClusterRootController(
user="admin", password="<PASSWORD>"
)
actual_out = util.capture_stdout(
TestWatch.rc.execute, ["watch", "1", "3", "info", "network"]
)
TestWatch.output_list = test_util.get_separate_output(actual_out)
def test_watch(self):
info_counter = 0
for item in TestWatch.output_list:
if "Network Information" in item["title"]:
info_counter += 1
self.assertEqual(info_counter, 3)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
```
#### File: unit/view/test_sheet.py
```python
import json
import unittest2 as unittest
from lib.view import sheet
from lib.view.sheet import (
Aggregators,
Converters,
DynamicFields,
Field,
FieldType,
Formatters,
Projectors,
Sheet,
SheetStyle,
Subgroup,
)
def do_render(*args, **kwargs):
do_row = kwargs.pop("do_row", True)
# Make sure column style renders without Exceptions.
kwargs["style"] = SheetStyle.columns
res = sheet.render(*args, **kwargs)
# if res is not None:
# print(res)
if do_row:
# Make sure column style renders without Exceptions.
kwargs["style"] = SheetStyle.rows
res = sheet.render(*args, **kwargs)
# if res is not None:
# print(res)
# Return the json render for testing.
kwargs["style"] = SheetStyle.json
res = sheet.render(*args, **kwargs)
if res is None:
return res
return json.loads(sheet.render(*args, **kwargs))
def do_render_column(*args, **kwargs):
kwargs["do_row"] = False
return do_render(*args, **kwargs)
class SheetTest(unittest.TestCase):
def test_sheet_json_format(self):
"""Verify JSON sheet format."""
test_sheet = Sheet(
(
Field(
"F",
Projectors.String("d", "f"),
converter=lambda edata: edata.value.upper(),
),
),
from_source=("d",),
)
sources = dict(d=dict(n0=dict(f="v")))
render = do_render(test_sheet, "test", sources)
self.assertIn("groups", render)
self.assertEqual(len(render["groups"]), 1)
group = render["groups"][0]
self.assertIn("records", group)
self.assertEqual(len(group["records"]), 1)
record = group["records"][0]
self.assertIn("F", record)
value = record["F"]
self.assertIn("raw", value)
self.assertIn("converted", value)
self.assertEqual(value["raw"], "v")
self.assertEqual(value["converted"], "V")
def test_sheet_project_string(self):
test_sheet = Sheet((Field("F", Projectors.String("d", "f")),), from_source="d")
sources = dict(d=dict(n0=dict(f="v")))
render = do_render(test_sheet, "test", sources)
value = render["groups"][0]["records"][0]["F"]
self.assertEqual(value["raw"], "v")
self.assertEqual(value["converted"], "v")
def test_sheet_project_boolean(self):
test_sheet = Sheet((Field("F", Projectors.Boolean("d", "f")),), from_source="d")
samples = [
("true", True, "True"),
("True", True, "True"),
("Garbage", True, "True"),
(True, True, "True"),
("false", False, "False"),
("False", False, "False"),
(False, False, "False"),
]
def test_it(sample):
sources = dict(d=dict(n0=dict(f=sample[0])))
render = do_render(test_sheet, "test", sources)
value = render["groups"][0]["records"][0]["F"]
self.assertEqual(value["raw"], sample[1], str(sample))
self.assertEqual(value["converted"], sample[2], str(sample))
for sample in samples:
test_it(sample)
def test_sheet_project_float(self):
test_sheet = Sheet((Field("F", Projectors.Float("d", "f")),), from_source="d")
samples = [
("1.25", 1.25, "1.25"),
(1.25, 1.25, "1.25"),
("42", 42.0, "42.0"),
(42, 42.0, "42.0"),
]
def test_it(sample):
sources = dict(d=dict(n0=dict(f=sample[0])))
render = do_render(test_sheet, "test", sources)
value = render["groups"][0]["records"][0]["F"]
self.assertEqual(value["raw"], sample[1], str(sample))
self.assertEqual(value["converted"], sample[2], str(sample))
for sample in samples:
test_it(sample)
def test_sheet_project_number(self):
test_sheet = Sheet((Field("F", Projectors.Number("d", "f")),), from_source="d")
samples = [("42", 42, "42"), (42, 42, "42"), ("1.25", 1, "1"), (1.25, 1, "1")]
def test_it(sample):
sources = dict(d=dict(n0=dict(f=sample[0])))
render = do_render(test_sheet, "test", sources)
value = render["groups"][0]["records"][0]["F"]
self.assertEqual(value["raw"], sample[1], str(sample))
self.assertEqual(value["converted"], sample[2], str(sample))
for sample in samples:
test_it(sample)
def test_sheet_project_percent(self):
samples = [
("42", False, 42, "42"),
(42, False, 42, "42"),
("1.25", False, 1, "1"),
(1.25, False, 1, "1"),
("42", True, 58, "58"), # inverts
(42, True, 58, "58"),
("1.25", True, 99, "99"),
(1.25, True, 99, "99"),
]
def test_it(sample):
test_sheet = Sheet(
(Field("F", Projectors.Percent("d", "f", invert=sample[1])),),
from_source="d",
)
sources = dict(d=dict(n0=dict(f=sample[0])))
render = do_render(test_sheet, "test", sources)
value = render["groups"][0]["records"][0]["F"]
self.assertEqual(value["raw"], sample[2], str(sample))
self.assertEqual(value["converted"], sample[3], str(sample))
for sample in samples:
test_it(sample)
def test_sheet_project_sum(self):
test_sheet = Sheet(
(
Field(
"F",
Projectors.Sum(
Projectors.Number("d", "f0"), Projectors.Number("d", "f1")
),
),
),
from_source="d",
)
samples = [("42", "58", 100, "100"), (42, 58, 100, "100")]
def test_it(sample):
sources = dict(d=dict(n0=dict(f0=sample[0], f1=sample[1])))
render = do_render(test_sheet, "test", sources)
value = render["groups"][0]["records"][0]["F"]
self.assertEqual(value["raw"], sample[2], str(sample))
self.assertEqual(value["converted"], sample[3], str(sample))
for sample in samples:
test_it(sample)
def test_sheet_project_func(self):
test_sheet = Sheet(
(
Field(
"F",
Projectors.Func(
FieldType.boolean,
lambda *values: 42 in values,
Projectors.Number("d", "f0"),
Projectors.Number("d", "f1"),
),
),
),
from_source="d",
)
samples = [
("42", "58", True, "True"),
(42, 58, True, "True"),
(422, 588, False, "False"),
]
def test_it(sample):
sources = dict(d=dict(n0=dict(f0=sample[0], f1=sample[1])))
render = do_render(test_sheet, "test", sources)
value = render["groups"][0]["records"][0]["F"]
self.assertEqual(value["raw"], sample[2], str(sample))
self.assertEqual(value["converted"], sample[3], str(sample))
for sample in samples:
test_it(sample)
def test_sheet_no_entry(self):
test_sheet = Sheet(
(Field("F", Projectors.String("d", "f"), hidden=False),), from_source=("d")
)
sources = dict(d=dict(n0={}))
render = do_render(test_sheet, "test", sources)
value = render["groups"][0]["records"][0]["F"]
self.assertEqual(value["converted"], test_sheet.no_entry)
def test_sheet_error_entry(self):
test_sheet = Sheet(
(Field("F", Projectors.String("d", "f")),), from_source=("d",)
)
sources = dict(d=dict(n0=Exception("error")))
render = do_render(test_sheet, "test", sources)
value = render["groups"][0]["records"][0]["F"]
self.assertEqual(value["converted"], test_sheet.error_entry)
def test_sheet_flat_value(self):
test_sheet = Sheet(
(Field("F", Projectors.String("d", None)),), from_source=("d",)
)
sources = dict(d=dict(n0="success"))
render = do_render(test_sheet, "test", sources)
record = render["groups"][0]["records"][0]
self.assertEqual(record["F"]["raw"], "success")
def test_sheet_indexed_value(self):
test_sheet = Sheet((Field("F", Projectors.String("d", 1)),), from_source=("d",))
sources = dict(d=dict(n0=["fail", "success", "fail"]))
render = do_render(test_sheet, "test", sources)
record = render["groups"][0]["records"][0]
self.assertEqual(record["F"]["raw"], "success")
def test_sheet_aggregation(self):
test_sheet = Sheet(
(Field("F", Projectors.Number("d", "f"), aggregator=Aggregators.sum()),),
from_source=("d",),
)
sources = dict(d=dict(n0=dict(f="1"), n1=dict(f="1"), n2=dict(f="1")))
render = do_render(test_sheet, "test", sources)
group = render["groups"][0]
self.assertIn("aggregates", group)
value = group["aggregates"]["F"]
self.assertEqual(value["raw"], 3)
def test_sheet_aggregation_no_entry(self):
test_sheet = Sheet(
(
Field(
"F",
Projectors.Number("d", "f"),
hidden=False,
aggregator=Aggregators.sum(),
),
),
from_source=("d",),
)
sources = dict(d=dict(n0=dict(), n1=dict(), n2=dict()))
render = do_render(test_sheet, "test", sources)
group = render["groups"][0]
self.assertIn("aggregates", group)
value = group["aggregates"]["F"]
self.assertEqual(value["raw"], "null")
self.assertEqual(value["converted"], test_sheet.no_entry)
def test_sheet_aggregation_error_entry(self):
test_sheet = Sheet(
(Field("F", Projectors.Number("d", "f"), aggregator=Aggregators.sum()),),
from_source=("d",),
)
sources = dict(d=dict(n0=dict(f="1"), n1=Exception("err"), n2=dict(f="1")))
render = do_render(test_sheet, "test", sources)
group = render["groups"][0]
self.assertIn("aggregates", group)
value = group["aggregates"]["F"]
self.assertEqual(value["raw"], "error")
self.assertEqual(value["converted"], test_sheet.error_entry)
def test_sheet_tuple_field(self):
test_sheet = Sheet(
(
Subgroup(
"T",
(
Field("F0", Projectors.Number("d", "f0")),
Field("F1", Projectors.Number("d", "f1")),
),
),
Field("F2", Projectors.Number("d", "f2")),
),
from_source=("d",),
)
sources = dict(d=dict(n0=dict(f0="0", f1="1", f2="2")))
render = do_render_column(test_sheet, "test", sources)
record = render["groups"][0]["records"][0]
self.assertIn("F2", record)
self.assertEqual(record["F2"]["raw"], 2)
self.assertIn("T", record)
t = record["T"]
self.assertIn("F0", t)
self.assertIn("F1", t)
self.assertEqual(t["F0"]["raw"], 0)
self.assertEqual(t["F1"]["raw"], 1)
def test_sheet_tuple_field_hide_one(self):
test_sheet = Sheet(
(
Subgroup(
"T",
(
Field("F0", Projectors.Number("d", "f0")),
Field("F1", Projectors.Number("d", "f1")),
),
),
Field("F2", Projectors.Number("d", "f2")),
),
from_source=("d",),
)
sources = dict(d=dict(n0=dict(f1="1", f2="2")))
render = do_render_column(test_sheet, "test", sources)
record = render["groups"][0]["records"][0]
self.assertIn("F2", record)
self.assertEqual(record["F2"]["raw"], 2)
self.assertIn("T", record)
t = record["T"]
self.assertNotIn("F0", t)
self.assertIn("F1", t)
self.assertEqual(t["F1"]["raw"], 1)
def test_sheet_tuple_field_hide_all(self):
test_sheet = Sheet(
(
Subgroup(
"T",
(
Field("F0", Projectors.Number("d", "f0")),
Field("F1", Projectors.Number("d", "f1")),
),
),
Field("F2", Projectors.Number("d", "f2")),
),
from_source=("d",),
)
sources = dict(d=dict(n0=dict(f2="2")))
render = do_render_column(test_sheet, "test", sources)
record = render["groups"][0]["records"][0]
self.assertIn("F2", record)
self.assertEqual(record["F2"]["raw"], 2)
self.assertNotIn("T", record)
def test_sheet_converter_with_common(self):
test_sheet = Sheet(
(
Field(
"F",
Projectors.String("d", "f"),
converter=lambda edata: "success"
if edata.value == edata.common["expected"]
else "failure",
),
),
from_source="d",
)
sources = dict(d=dict(n0=dict(f="check")))
common = dict(expected="check")
render = do_render_column(test_sheet, "test", sources, common=common)
record = render["groups"][0]["records"][0]
self.assertEqual(record["F"]["raw"], "check")
self.assertEqual(record["F"]["converted"], "success")
def test_sheet_group_by(self):
test_sheet = Sheet(
(
Field("G", Projectors.String("d", "g")),
Field("F", Projectors.Number("d", "f")),
),
from_source="d",
group_by="G",
)
sources = dict(d=dict(n0=dict(g="a", f=0), n1=dict(g="b", f=1)))
render = do_render_column(test_sheet, "test", sources)
self.assertEqual(len(render["groups"]), 2)
group0 = render["groups"][0]
group1 = render["groups"][1]
self.assertNotIn("aggregates", group0)
self.assertNotIn("aggregates", group1)
record0 = group0["records"][0]
record1 = group1["records"][0]
self.assertEqual(record0["F"]["raw"], 0)
self.assertEqual(record1["F"]["raw"], 1)
def test_sheet_group_by_aggregation(self):
test_sheet = Sheet(
(
Field("G", Projectors.String("d", "g")),
Field("F", Projectors.Number("d", "f"), aggregator=Aggregators.count()),
),
from_source="d",
group_by="G",
)
sources = dict(
d=dict(n0=dict(g="a", f=1), n1=dict(g="a", f=1), n2=dict(g="b", f=3))
)
render = do_render_column(test_sheet, "test", sources)
self.assertEqual(len(render["groups"]), 2)
aggr0 = render["groups"][0]["aggregates"]
aggr1 = render["groups"][1]["aggregates"]
self.assertEqual(aggr0["F"]["raw"], 2)
self.assertEqual(aggr0["F"]["converted"], "2")
self.assertEqual(aggr1["F"]["raw"], 1)
def test_sheet_group_by_composite(self):
test_sheet = Sheet(
(
Field("G0", Projectors.String("d", "g0")),
Field("G1", Projectors.Boolean("d", "g1")),
Field("F", Projectors.Number("d", "f"), aggregator=Aggregators.count()),
),
from_source="d",
group_by=("G0", "G1"),
)
sources = dict(
d=dict(
n0=dict(g0="a", g1="true", f=0),
n1=dict(g0="a", g1="false", f=1),
n2=dict(g0="b", g1="false", f=2),
n3=dict(g0="b", g1="false", f=3),
)
)
render = do_render_column(test_sheet, "test", sources)
groups = render["groups"]
self.assertEqual(len(groups), 3)
self.assertEqual(len(groups[0]["records"]), 1)
self.assertEqual(len(groups[1]["records"]), 1)
self.assertEqual(len(groups[2]["records"]), 2)
def test_sheet_order_by(self):
test_sheet = Sheet(
(Field("F", Projectors.Number("d", "f")),), from_source="d", order_by=("F")
)
sources = dict(d=dict(n0=dict(f=2), n1=dict(f=1), n2=dict(f=0)))
render = do_render(test_sheet, "test", sources)
records = render["groups"][0]["records"]
v0 = records[0]["F"]["raw"]
v2 = records[2]["F"]["raw"]
self.assertEqual(v0, 0)
self.assertEqual(v2, 2)
def test_sheet_order_by_composite(self):
test_sheet = Sheet(
(
Field("G", Projectors.String("d", "g")),
Field("F", Projectors.Number("d", "f")),
),
from_source="d",
order_by=("G", "F"),
)
sources = dict(
d=dict(
n0=dict(g="a", f=2),
n1=dict(g="a", f=0),
n2=dict(g="b", f=1),
n3=dict(g="b", f=3),
)
)
render = do_render(test_sheet, "test", sources)
records = render["groups"][0]["records"]
self.assertEqual(records[0]["F"]["raw"], 0)
self.assertEqual(records[1]["F"]["raw"], 2)
self.assertEqual(records[2]["F"]["raw"], 1)
self.assertEqual(records[3]["F"]["raw"], 3)
def test_sheet_for_each_flat_value(self):
test_sheet = Sheet(
(
Field("E", Projectors.String("ed", None, for_each_key=True)),
Field("F", Projectors.Number("ed", None, for_each_key=False)),
),
from_source="ed",
for_each="ed",
)
sources = dict(ed=dict(n0=dict(a=1, b=2, c=3), n1=dict(a=11)))
render = do_render(test_sheet, "test", sources)
records = render["groups"][0]["records"]
self.assertEqual(len(records), 4)
def test_sheet_for_each_key_value(self):
test_sheet = Sheet(
(
Field("E", Projectors.String("ed", None, for_each_key=True)),
Field("F", Projectors.String("ed", "f")),
),
from_source="ed",
for_each="ed",
group_by="E",
)
sources = dict(
ed=dict(
n0=dict(a=dict(f="success")),
n1=dict(a=dict(f="success"), b=dict(f="success")),
)
)
render = do_render_column(test_sheet, "test", sources)
self.assertEqual(len(render["groups"]), 2)
record0 = render["groups"][0]["records"][0]
record1 = render["groups"][1]["records"][0]
self.assertEqual(record0["E"]["raw"], "a")
self.assertEqual(record0["F"]["raw"], "success")
self.assertEqual(record1["E"]["raw"], "b")
self.assertEqual(record1["F"]["raw"], "success")
def test_sheet_for_each_indexed_value(self):
test_sheet = Sheet(
(
Field("E0", Projectors.Number("ed", 0, for_each_key=True)),
Field("E1", Projectors.Number("ed", 1, for_each_key=True)),
Field("F", Projectors.Number("ed", 1, for_each_key=False)),
),
from_source="ed",
for_each="ed",
)
sources = dict(ed=dict(n0={(0, 0): (1, 0)}))
render = do_render(test_sheet, "test", sources)
record = render["groups"][0]["records"][0]
self.assertEqual(record["E0"]["raw"], 0)
self.assertEqual(record["E1"]["raw"], 0)
self.assertEqual(record["F"]["raw"], 0)
def test_sheet_for_each_error_entry(self):
test_sheet = Sheet(
(Field("F", Projectors.Number("ed", "f")),),
from_source="ed",
for_each="ed",
)
sources = dict(ed=dict(n0=dict(a=Exception())))
render = do_render(test_sheet, "test", sources)
record = render["groups"][0]["records"][0]
self.assertEqual(record["F"]["raw"], "error")
self.assertEqual(record["F"]["converted"], test_sheet.error_entry)
def test_sheet_for_each_no_entry(self):
test_sheet = Sheet(
(Field("F", Projectors.Number("ed", "f"), hidden=False),),
from_source="ed",
for_each="ed",
)
sources = dict(ed=dict(n0=dict(a=dict()),))
render = do_render(test_sheet, "test", sources)
record = render["groups"][0]["records"][0]
self.assertEqual(record["F"]["raw"], "null")
self.assertEqual(record["F"]["converted"], test_sheet.no_entry)
def test_sheet_converter_byte(self):
converter = Converters.byte
test_sheet = Sheet(
(
Field("U", Projectors.Number("d", "u"), converter=converter),
Field("K", Projectors.Number("d", "k"), converter=converter),
Field("M", Projectors.Number("d", "m"), converter=converter),
Field("G", Projectors.Number("d", "g"), converter=converter),
Field("T", Projectors.Number("d", "t"), converter=converter),
Field("P", Projectors.Number("d", "p"), converter=converter),
),
from_source="d",
)
u = 2
k = u * 1024
m = k * 1024
g = m * 1024
t = g * 1024
p = t * 1024
sources = dict(d=dict(n0=dict(u=u, k=k, m=m, g=g, t=t, p=p)))
render = do_render(test_sheet, "test", sources)
record = render["groups"][0]["records"][0]
self.assertEqual(record["U"]["raw"], u)
self.assertEqual(record["K"]["raw"], k)
self.assertEqual(record["M"]["raw"], m)
self.assertEqual(record["G"]["raw"], g)
self.assertEqual(record["T"]["raw"], t)
self.assertEqual(record["P"]["raw"], p)
self.assertEqual(record["U"]["converted"], "2.000 B")
self.assertEqual(record["K"]["converted"], "2.000 KB")
self.assertEqual(record["M"]["converted"], "2.000 MB")
self.assertEqual(record["G"]["converted"], "2.000 GB")
self.assertEqual(record["T"]["converted"], "2.000 TB")
self.assertEqual(record["P"]["converted"], "2.000 PB")
def test_sheet_formatters(self):
red = 1000
yellow = 100
green = 10
none = 1
test_sheet = Sheet(
(
Field(
"F",
Projectors.Number("d", "f"),
formatters=(
Formatters.red_alert(lambda edata: edata.value >= red),
Formatters.yellow_alert(lambda edata: edata.value >= yellow),
Formatters.green_alert(lambda edata: edata.value >= green),
),
),
),
from_source="d",
)
sources = dict(
d=dict(n0=dict(f=red), n1=dict(f=yellow), n2=dict(f=green), n3=dict(f=none))
)
render = do_render(test_sheet, "test", sources)
records = render["groups"][0]["records"]
self.assertEqual(len(records), 4)
for record in records:
if record["F"]["raw"] == red:
self.assertEqual(record["F"]["format"], "red-alert")
elif record["F"]["raw"] == yellow:
self.assertEqual(record["F"]["format"], "yellow-alert")
elif record["F"]["raw"] == green:
self.assertEqual(record["F"]["format"], "green-alert")
elif record["F"]["raw"] == none:
self.assertNotIn("format", record["F"])
else:
assert False, "illegal record value {}".format(record)
def test_sheet_dynamic_field_exception(self):
test_sheet = Sheet((DynamicFields("d"),), from_source="d")
sources = dict(
d=dict(n0=Exception("error"), n2=dict(f=1, g=1), n3=dict(f=1, g=1))
)
render = do_render(test_sheet, "test", sources)
records = render["groups"][0]["records"]
self.assertEqual(len(records), 3)
self.assertEqual(records[0]["g"]["raw"], "error")
for record in records:
self.assertEqual(len(record), 2)
def test_sheet_dynamic_field_exception_all(self):
test_sheet = Sheet((DynamicFields("d"),), from_source="d")
sources = dict(
d=dict(n0=Exception("error"), n2=Exception("error"), n3=Exception("error"))
)
render = do_render(test_sheet, "test", sources)
self.assertEqual(render, None)
def test_sheet_dynamic_field_required(self):
test_sheet = Sheet(
(
Field("F", Projectors.Number("d", "f")),
DynamicFields("d", required=True),
),
from_source="d",
)
sources = dict(d=dict(n0=dict(f=1, g=1), n2=dict(f=1), n3=dict(f=1)))
render = do_render(test_sheet, "test", sources, selectors=["f"])
records = render["groups"][0]["records"]
self.assertEqual(len(records), 3)
render = do_render(test_sheet, "test", sources, selectors=["nothing"])
self.assertEqual(render, None)
def test_sheet_dynamic_field(self):
test_sheet = Sheet((DynamicFields("d"),), from_source="d")
sources = dict(d=dict(n0=dict(f=1, g=1), n2=dict(f=1, g=1), n3=dict(f=1, g=1)))
render = do_render(test_sheet, "test", sources)
records = render["groups"][0]["records"]
self.assertEqual(len(records), 3)
for record in records:
self.assertEqual(len(record), 2)
def test_sheet_dynamic_field_selector(self):
test_sheet = Sheet((DynamicFields("d"),), from_source="d")
sources = dict(d=dict(n0=dict(f=1, g=1), n2=dict(f=1, g=1), n3=dict(f=1, g=1)))
render = do_render(test_sheet, "test", sources, selectors=["f"])
records = render["groups"][0]["records"]
self.assertEqual(len(records), 3)
for record in records:
self.assertEqual(len(record), 1)
self.assertEqual(list(record.keys())[0], "f")
def numeric_sum_selector(self, key, is_numeric):
if is_numeric:
return Aggregators.sum()
def test_sheet_dynamic_field_aggregator(self):
test_sheet = Sheet(
(DynamicFields("d", aggregator_selector=self.numeric_sum_selector),),
from_source="d",
)
sources = dict(d=dict(n0=dict(f=1, g=1), n2=dict(f=1, g=1), n3=dict(f=1, g=1)))
render = do_render(test_sheet, "test", sources)
aggrs = render["groups"][0]["aggregates"]
self.assertEqual(len(aggrs), 2)
for aggr in aggrs.values():
self.assertEqual(aggr["raw"], 3)
def test_sheet_dynamic_field_aggregator_exception(self):
test_sheet = Sheet(
(DynamicFields("d", aggregator_selector=self.numeric_sum_selector),),
from_source="d",
)
sources = dict(
d=dict(n0=Exception("error"), n2=dict(f=1, g=1), n3=dict(f=1, g=1))
)
render = do_render(test_sheet, "test", sources)
aggrs = render["groups"][0]["aggregates"]
self.assertEqual(len(aggrs), 2)
for aggr in aggrs.values():
self.assertEqual(aggr["raw"], "error")
def test_sheet_dynamic_field_aggregator_missing(self):
test_sheet = Sheet(
(DynamicFields("d", aggregator_selector=self.numeric_sum_selector),),
from_source="d",
)
sources = dict(d=dict(n0=dict(f=1), n2=dict(f=1, g=1), n3=dict(f=1, g=1)))
render = do_render(test_sheet, "test", sources)
aggrs = render["groups"][0]["aggregates"]
self.assertEqual(len(aggrs), 2)
self.assertEqual(aggrs["g"]["raw"], 2)
self.assertEqual(aggrs["f"]["raw"], 3)
def test_sheet_dynamic_field_diff(self):
test_sheet = Sheet((DynamicFields("d"),), from_source="d")
sources = dict(
d=dict(n0=dict(f=2, g=1), n2=dict(f=1, g=1), n3=dict(meh=1), n4=Exception())
)
render = do_render(test_sheet, "test", sources, dynamic_diff=True)
records = render["groups"][0]["records"]
for record in records:
self.assertTrue("g" not in record)
self.assertTrue("f" in record)
def test_sheet_dynamic_field_diff_and_group_by(self):
test_sheet = Sheet(
(Field("group", Projectors.String("group", None)), DynamicFields("d"),),
from_source=("d", "group"),
group_by="group",
)
sources = dict(
d=dict(
n0=dict(f=1, g=2),
n1=dict(f=1, g=2),
n2=dict(f=3, g=4),
n3=dict(f=4, g=3),
n4=dict(f=5, g=6),
n5=dict(f=5, g=7),
n6=dict(f=8, g=9),
),
group=dict(
n0="group0",
n1="group0",
n2="group1",
n3="group1",
n4="group2",
n5="group2",
n6="group3",
),
)
render = do_render(test_sheet, "test", sources, dynamic_diff=True)
groups = render["groups"]
self.assertEqual(len(groups), 4)
group0 = groups[0]
for record in group0["records"]:
self.assertTrue("group" in record)
self.assertTrue("group0" in record["group"]["raw"])
self.assertEqual(record["f"]["raw"], None)
self.assertEqual(record["g"]["raw"], None)
group1 = groups[1]
group1record0 = group1["records"][0]
group1record1 = group1["records"][1]
for record in group1["records"]:
self.assertTrue("group" in record)
self.assertTrue("group1" in record["group"]["raw"])
self.assertEqual(group1record0["f"]["raw"], 3)
self.assertEqual(group1record0["g"]["raw"], 4)
self.assertEqual(group1record1["f"]["raw"], 4)
self.assertEqual(group1record1["g"]["raw"], 3)
group2 = groups[2]
group2record0 = group2["records"][0]
group2record1 = group2["records"][1]
for record in group2["records"]:
self.assertTrue("group" in record)
self.assertTrue("group2" in record["group"]["raw"])
self.assertEqual(group2record0["f"]["raw"], None)
self.assertEqual(group2record0["g"]["raw"], 6)
self.assertEqual(group2record1["f"]["raw"], None)
self.assertEqual(group2record1["g"]["raw"], 7)
group3record0 = groups[3]["records"][0]
self.assertTrue("group" in group3record0)
self.assertTrue("group3" in group3record0["group"]["raw"])
self.assertEqual(group3record0["f"]["raw"], None)
self.assertEqual(group3record0["g"]["raw"], None)
# def test_sheet_dynamic_field_every_nth_row(self):
# pass
# def test_sheet_dynamic_field_every_nth_column(self):
# pass
# def test_title_field(self):
# pass
# def test_sheet_nested_dict_source(self):
# pass
```
|
{
"source": "jfwm2/gourde",
"score": 3
}
|
#### File: gourde/example/app.py
```python
import argparse
import flask
from gourde import Gourde
# Optional API.
try:
import flask_restplus
except ImportError:
flask_restplus = None
class Error(Exception):
"""All local errors."""
pass
# This could be as simple as :
# gourde = Gourde(app)
# app = gourde.app
# More complicated example:
gourde = Gourde(__name__)
app = gourde.app
# Override the default index
@app.route("/")
def index():
return flask.render_template("index.html")
# Add a new page.
@app.route("/example")
def example():
return flask.render_template("example.html")
# Create a custom health check callbback.
def is_healthy():
"""Custom "health" check."""
import random
if random.random() > 0.5:
raise Error()
return True
if flask_restplus:
class HelloWorld(flask_restplus.Resource):
def get(self):
return {"hello": "world"}
def initialize_api(flask_app):
"""Initialize an API."""
if not flask_restplus:
return
api = flask_restplus.Api(version="1.0", title="My Example API")
api.add_resource(HelloWorld, "/hello")
blueprint = flask.Blueprint("api", __name__, url_prefix="/api")
api.init_app(blueprint)
flask_app.register_blueprint(blueprint)
def initialize_app(flask_app, args):
"""Initialize the App."""
# Setup gourde with the args.
gourde.setup(args)
# Register a custom health check.
gourde.is_healthy = is_healthy
# Add an optional API
initialize_api(flask_app)
def main():
# Setup a custom parser.
parser = argparse.ArgumentParser(description="Example")
parser = Gourde.get_argparser(parser)
args = parser.parse_args()
initialize_app(app, args)
# Start the application.
gourde.run()
if __name__ == "__main__":
main()
```
|
{
"source": "jfwood/pyrox",
"score": 4
}
|
#### File: pyrox/server/routing.py
```python
class RoutingHandler(object):
def __init__(self, default_routes):
self._default_routes = default_routes
self._last_default = 0
self._next_route = None
def set_next(self, next_route):
if next_route:
if isinstance(next_route, str):
if ':' in next_route:
split_route = next_route.split(':')
self._next_route = (split_route[0], int(split_route[1]))
else:
self._next_route = (next_route, 80)
elif isinstance(next_route, tuple) and len(next_route) == 2:
self._next_route = next_route
elif isinstance(next_route, list) and len(next_route) == 2:
self._next_route = (next_route[0], next_route[1])
else:
raise TypeError("""A route must be either a string
following the "<host>:<port>" format or a tuple or a list that contains
the host at element 0 and the port at element 1""")
def get_next(self):
if self._next_route:
next = self._next_route
self._next_route = None
else:
self._last_default += 1
idx = self._last_default % len(self._default_routes)
next = self._default_routes[idx]
return next
```
|
{
"source": "jfy133/adrsm",
"score": 2
}
|
#### File: adrsm/lib/adrsmlib.py
```python
import sys
import requests
import os
import subprocess
from numpy import random as npr
import multiprocessing
import pickle
from functools import partial
from pkg_resources import resource_filename
from . import sequencefunctions as sf
from . import markov as mk
from xopen import xopen
def parse_yes_no(astring):
if "yes" in astring:
return True
elif "no" in astring:
return False
else:
sys.exit("Please specify deamination (yes | no)")
def get_basename(file_name):
if ("/") in file_name:
basename = file_name.split("/")[-1].split(".")[0]
else:
basename = file_name.split(".")[0]
return basename
def read_fasta(file_name):
"""
READS FASTA FILE, RETURNS SEQUENCE AS STRING
INPUT:
file_name(string): path to fasta file
OUPUT:
result(string): all of the sequences in fasta file, concatenated
"""
result = ""
# fastadict = {}
if file_name.endswith(".gz"):
with xopen(file_name, "r") as f:
for line in f:
if line[0] == ">":
# seqname = line[1:]
# fastadict[seqname] = []
continue
else:
line = line.rstrip()
# fastadict[seqname].append(line)
result += line
else:
with open(file_name, "r") as f:
for line in f:
if line[0] == ">":
# seqname = line[1:]
# fastadict[seqname] = []
continue
else:
line = line.rstrip()
# fastadict[seqname].append(line)
result += line
return [result, len(result)]
def write_fastq_multi(fastq_list, outputfile, compressed=True):
if compressed:
with xopen(outputfile + ".1.fastq.gz", "ab") as f1:
with xopen(outputfile + ".2.fastq.gz", "ab") as f2:
for read in fastq_list:
f1.write(read[0].encode())
f2.write(read[1].encode())
else:
with open(outputfile + ".1.fastq", "a") as f1:
with open(outputfile + ".2.fastq", "a") as f2:
for read in fastq_list:
f1.write(read[0])
f2.write(read[1])
def markov_wrapper_fwd(times):
a = 0
while a == 0:
a = mk.mchain(
starts=MARKOV_START_FWD,
kmers=MARKOV_DICT_FWD,
readsize=READSIZE,
order=MARKOV_ORDER,
)
return a
def markov_wrapper_rev(times):
a = 0
while a == 0:
a = mk.mchain(
starts=MARKOV_START_REV,
kmers=MARKOV_DICT_REV,
readsize=READSIZE,
order=MARKOV_ORDER,
)
return a
def markov_multi_fwd(process, nreads):
myIter = range(nreads)
with multiprocessing.Pool(process) as p:
r = p.map(markov_wrapper_fwd, myIter)
return r
def markov_multi_rev(process, nreads):
myIter = range(nreads)
with multiprocessing.Pool(process) as p:
r = p.map(markov_wrapper_rev, myIter)
return r
def get_fwd_qual():
try:
ret = pickle.load(open("data/quality/fwd_qual.p", "rb"))
return ret
except FileNotFoundError:
path = resource_filename("adrsm", "/data/quality/fwd_qual.p")
ret = pickle.load(open(path, "rb"))
return ret
def get_rev_qual():
try:
ret = pickle.load(open("data/quality/fwd_qual.p", "rb"))
return ret
except FileNotFoundError:
path = resource_filename("adrsm", "/data/quality/rev_qual.p")
ret = pickle.load(open(path, "rb"))
return ret
def multi_run(
iterables,
name,
mutate,
mutrate,
damage,
geom_p,
themin,
themax,
fwd_adaptor,
rev_adaptor,
read_length,
process,
):
partial_run = partial(
sf.generate_fq,
name=name,
mutate=mutate,
mutrate=mutrate,
damage=damage,
geom_p=geom_p,
themin=themin,
themax=themax,
fwd_adaptor=fwd_adaptor,
rev_adaptor=rev_adaptor,
read_length=read_length,
)
with multiprocessing.Pool(process) as p:
r = p.map(partial_run, iterables)
return r
def run_read_simulation_multi(
INFILE,
COV,
READLEN,
INSERLEN,
NBINOM,
A1,
A2,
MINLENGTH,
MUTATE,
MUTRATE,
AGE,
DAMAGE,
GEOM_P,
THEMIN,
THEMAX,
PROCESS,
):
print("===================\n===================")
print("Genome: ", INFILE)
print("Coverage: ", COV)
print("Read length: ", READLEN)
print("Mean Insert length: ", INSERLEN)
print("n parameter for Negative Binomial insert length distribution: ", NBINOM)
print("Adaptor 1: ", A1)
print("Adaptor 2: ", A2)
print("Mutation rate (bp/year):", MUTRATE)
print("Age (years):", AGE)
print("Deamination:", DAMAGE)
nread = None
global READSIZE
global MARKOV_ORDER
global QUALIT_FWD
global MARKOV_SEED_FWD
global MARKOV_START_FWD
global MARKOV_DICT_FWD
global QUALIT_REV
global MARKOV_SEED_REV
global MARKOV_START_REV
global MARKOV_DICT_REV
READSIZE = READLEN
basename = get_basename(INFILE)
fasta = read_fasta(INFILE)
nread = int((fasta[1] / INSERLEN) * COV)
print("Number of reads: ", nread)
print("-------------------")
MARKOV_ORDER = 10
QUALIT_FWD = get_fwd_qual()
QUALIT_REV = get_rev_qual()
MARKOV_SEED_FWD = mk.generate_kmer(
qualities=QUALIT_FWD, order=MARKOV_ORDER, readsize=READLEN
)
MARKOV_SEED_REV = mk.generate_kmer(
qualities=QUALIT_REV, order=MARKOV_ORDER, readsize=READLEN
)
MARKOV_START_FWD = MARKOV_SEED_FWD[0]
MARKOV_START_REV = MARKOV_SEED_REV[0]
MARKOV_DICT_FWD = MARKOV_SEED_FWD[1]
MARKOV_DICT_REV = MARKOV_SEED_REV[1]
# negative_binomial parameters
prob = NBINOM / (NBINOM + INSERLEN)
fragment_lengths = npr.negative_binomial(NBINOM, prob, nread)
# Define Mutation rate
if MUTATE:
correct_mutrate = (MUTRATE * AGE) / fasta[1]
else:
correct_mutrate = 0
# Prepare fragments and errors
all_fragments = sf.random_insert(fasta, fragment_lengths, READLEN, MINLENGTH)
fwd_illu_err = markov_multi_fwd(process=PROCESS, nreads=len(all_fragments))
rev_illu_err = markov_multi_rev(process=PROCESS, nreads=len(all_fragments))
runlist = sf.prepare_run(
all_frag=all_fragments, all_fwd_err=fwd_illu_err, all_rev_err=rev_illu_err
)
result = multi_run(
iterables=runlist,
name=basename,
mutate=MUTATE,
mutrate=correct_mutrate,
damage=DAMAGE,
geom_p=GEOM_P,
themin=THEMIN,
themax=THEMAX,
fwd_adaptor=A1,
rev_adaptor=A2,
read_length=READLEN,
process=PROCESS,
)
# write_fastq_multi(fastq_list=result, outputfile=FASTQ_OUT)
return result, [nread * INSERLEN, INSERLEN, COV, DAMAGE]
def specie_to_taxid(specie):
"""
Takes a specie_name (ex: Mus_musculus), makes a call to JGI
taxonomy API, and returns taxonomy id.
INPUT:
specie(string) ex: "Mus musculus"
OUPUT:
taxid(str) "10090"
"""
request = "http://taxonomy.jgi-psf.org/tax/pt_name/" + specie
response = requests.get(request)
answer = response.text
return answer
def write_stat(stat_dict, stat_out):
nbases = []
for akey in stat_dict:
nbases.append(stat_dict[akey][0])
totbases = sum(nbases)
with open(stat_out, "w") as fs:
fs.write(
"Organism,taxonomy_id,percentage of metagenome,mean_insert_length,target_coverage,deamination\n"
)
for akey in stat_dict:
taxid = specie_to_taxid(akey)
fs.write(
akey
+ ","
+ str(taxid)
+ ","
+ str(round(stat_dict[akey][0] / totbases, 2))
+ ","
+ str(stat_dict[akey][1])
+ ","
+ str(stat_dict[akey][2])
+ ","
+ str(stat_dict[akey][3])
+ "\n"
)
```
|
{
"source": "jfy133/MultiQC_TestData",
"score": 3
}
|
#### File: MultiQC_TestData/unit_tests/test_samtools_flagstat.py
```python
import unittest
import sys, os, math
# This line allows the tests to run if you just naively run this script.
# But the preferred way is to use run_tests.sh
sys.path.insert(0,'../MultiQC')
from multiqc.modules.samtools.flagstat import parse_single_report
def slurp_file(fname):
with open(os.path.dirname(__file__) + '/../data/modules/samtools/flagstat/' + fname) as fh:
return fh.read()
# From samtools 1.3
rep1 = slurp_file('small.samtools13.flagstat.log.txt')
# Same BAM file in samools 1.2
rep2 = slurp_file('small.samtools12.flagstat.log.txt')
class T(unittest.TestCase):
def setUp(self):
self.maxDiff = None
def test_rep1(self):
"""Test that parsing rep1 produces expected results
"""
res1 = parse_single_report(rep1)
#I expect 13 + 13 + 3 + 3 + 1 things reported in total
self.assertEqual(len(res1), 13 + 13 + 3 + 3 + 1 )
self.assertEqual( (res1['total_passed'], res1['total_failed']),
(5414, 0) )
self.assertEqual(res1['flagstat_total'], 5414)
self.assertEqual(res1['mapped_passed_pct'], 98.82)
#I expect mapped_failed_pct to be float('nan')
self.assertTrue(math.isnan(res1['mapped_failed_pct']))
def test_rep2(self):
"""I expect rep2 to give identical results to rep1.
"""
res1 = parse_single_report(rep1)
res2 = parse_single_report(rep2)
# But since nan != nan we have to strip these out.
nans = [ k for k, v in res1.items() if math.isnan(v) ]
for k in nans:
del(res1[k])
del(res2[k])
self.assertEqual(res1, res2)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jfy133/nf-core-tools",
"score": 2
}
|
#### File: nf-core-tools/tests/test_mullled.py
```python
import pytest
from nf_core.modules import MulledImageNameGenerator
@pytest.mark.parametrize(
"specs, expected",
[
(["foo==0.1.2", "bar==1.1"], [("foo", "0.1.2"), ("bar", "1.1")]),
(["foo=0.1.2", "bar=1.1"], [("foo", "0.1.2"), ("bar", "1.1")]),
],
)
def test_target_parsing(specs, expected):
"""Test that valid specifications are correctly parsed into tool, version pairs."""
assert MulledImageNameGenerator.parse_targets(specs) == expected
@pytest.mark.parametrize(
"specs",
[
["foo<0.1.2", "bar==1.1"],
["foo=0.1.2", "bar>1.1"],
],
)
def test_wrong_specification(specs):
"""Test that unexpected version constraints fail."""
with pytest.raises(ValueError, match="expected format"):
MulledImageNameGenerator.parse_targets(specs)
@pytest.mark.parametrize(
"specs",
[
["foo==0a.1.2", "bar==1.1"],
["foo==0.1.2", "bar==1.b1b"],
],
)
def test_noncompliant_version(specs):
"""Test that version string that do not comply with PEP440 fail."""
with pytest.raises(ValueError, match="PEP440"):
MulledImageNameGenerator.parse_targets(specs)
@pytest.mark.parametrize(
"specs, expected",
[
(
[("chromap", "0.2.1"), ("samtools", "1.15")],
"mulled-v2-1f09f39f20b1c4ee36581dc81cc323c70e661633:bd74d08a359024829a7aec1638a28607bbcd8a58-0",
),
(
[("pysam", "0.16.0.1"), ("biopython", "1.78")],
"mulled-v2-3a59640f3fe1ed11819984087d31d68600200c3f:185a25ca79923df85b58f42deb48f5ac4481e91f-0",
),
(
[("samclip", "0.4.0"), ("samtools", "1.15")],
"mulled-v2-d057255d4027721f3ab57f6a599a2ae81cb3cbe3:13051b049b6ae536d76031ba94a0b8e78e364815-0",
),
],
)
def test_generate_image_name(specs, expected):
"""Test that a known image name is generated from given targets."""
assert MulledImageNameGenerator.generate_image_name(specs) == expected
```
|
{
"source": "jfy133/paleomix",
"score": 2
}
|
#### File: paleomix/atomiccmd/sets.py
```python
import time
import collections
import paleomix.atomiccmd.pprint as atomicpp
from paleomix.atomiccmd.command import AtomicCmd, CmdError
from paleomix.common.utilities import safe_coerce_to_tuple
from paleomix.common.fileutils import try_remove
class _CommandSet:
def __init__(self, commands):
self._commands = safe_coerce_to_tuple(commands)
if not self._commands:
raise CmdError("Empty list passed to command set")
self._validate_commands()
def commit(self, temp):
committed_files = set()
try:
for command in self._commands:
command.commit(temp)
committed_files.update(command.output_files)
except Exception:
# Cleanup after failed commit
for fpath in committed_files:
try_remove(fpath)
raise
def _collect_properties(key):
def _collector(self):
values = set()
for command in self._commands:
values.update(getattr(command, key))
return values
return property(_collector)
input_files = _collect_properties("input_files")
output_files = _collect_properties("output_files")
auxiliary_files = _collect_properties("auxiliary_files")
executables = _collect_properties("executables")
requirements = _collect_properties("requirements")
expected_temp_files = _collect_properties("expected_temp_files")
optional_temp_files = _collect_properties("optional_temp_files")
@property
def stdout(self):
raise CmdError(
"%s does not implement property 'stdout'!" % (self.__class__.__name__,)
)
def terminate(self):
for command in self._commands:
command.terminate()
def __str__(self):
return atomicpp.pformat(self)
def _validate_commands(self):
if len(self._commands) != len(set(self._commands)):
raise ValueError(
"Same command included multiple times in %s"
% (self.__class__.__name__,)
)
filenames = collections.defaultdict(int)
for command in self._commands:
for filename in command.expected_temp_files:
filenames[filename] += 1
for filename in command.optional_temp_files:
filenames[filename] += 1
clobbered = [filename for (filename, count) in filenames.items() if (count > 1)]
if any(clobbered):
raise CmdError(
"Commands clobber each others' files: %s" % (", ".join(clobbered),)
)
class ParallelCmds(_CommandSet):
"""This class wraps a set of AtomicCmds, running them in parallel.
This corresponds to a set of piped commands, which only terminate
when all parts of the pipe have terminated. For example:
$ dmesg | grep -i segfault | gzip > log.txt.gz
In case of any one sub-command failing, the remaining commands are
automatically terminated. This is done to ensure that commands waiting
on pipes are not left running indefinetly.
Note that only AtomicCmds and ParallelCmds are allowed as
sub-commands for this class, since the model requires non-
blocking commands."""
def __init__(self, commands):
self._joinable = False
commands = safe_coerce_to_tuple(commands)
for command in commands:
if not isinstance(command, (AtomicCmd, ParallelCmds)):
raise CmdError(
"ParallelCmds must only contain AtomicCmds or other ParallelCmds!"
)
_CommandSet.__init__(self, commands)
def run(self, temp):
for command in self._commands:
command.run(temp)
self._joinable = True
def ready(self):
return all(cmd.ready() for cmd in self._commands)
def join(self):
sleep_time = 0.05
commands = list(enumerate(self._commands))
return_codes = [[None]] * len(commands)
while commands and self._joinable:
for (index, command) in list(commands):
if command.ready():
return_codes[index] = command.join()
commands.remove((index, command))
sleep_time = 0.05
elif any(any(codes) for codes in return_codes):
command.terminate()
return_codes[index] = command.join()
commands.remove((index, command))
sleep_time = 0.05
time.sleep(sleep_time)
sleep_time = min(1, sleep_time * 2)
return sum(return_codes, [])
class SequentialCmds(_CommandSet):
"""This class wraps a set of AtomicCmds, running them sequentially.
This class therefore corresponds a set of lines in a bash script,
each of which invokes a forground job. For example:
$ bcftools view snps.bcf | bgzip > snps.vcf.bgz
$ tabix snps.vcf.bgz
The list of commands may include any type of command. Note that
the run function only returns once each sub-command has completed.
A command is only executed if the previous command in the sequence
was succesfully completed, and as a consequence the return codes
of a failed SequentialCommand may contain None."""
def __init__(self, commands):
self._ready = False
commands = safe_coerce_to_tuple(commands)
for command in commands:
if not isinstance(command, (AtomicCmd, _CommandSet)):
raise CmdError(
"ParallelCmds must only contain AtomicCmds or other ParallelCmds!"
)
_CommandSet.__init__(self, commands)
def run(self, temp):
self._ready = False
for command in self._commands:
command.run(temp)
if any(command.join()):
break
self._ready = True
def ready(self):
return self._ready
def join(self):
return_codes = []
for command in self._commands:
return_codes.extend(command.join())
return return_codes
```
#### File: paleomix/common/bedtools.py
```python
import copy
from paleomix.common.fileutils import open_ro
from paleomix.common.utilities import TotallyOrdered
def _strand_type(value):
if value not in ("+", "-"):
raise ValueError("Strand must be '+' or '-', not %r" % (value,))
return value
_BED_DEFAULTS = ("", 0, 0, "", 0, "+")
_BED_KEYS = ("contig", "start", "end", "name", "score", "strand")
_BED_TYPES = (str, int, int, str, int, _strand_type)
class BEDError(RuntimeError):
pass
class BEDRecord(TotallyOrdered):
"""Class for parsing and representing a BED records.
The class has the following properties:
.contig -> str
.start -> int (0-based)
.end -> int (1-based)
.name -> str
.score -> int
.strand -> '+' or '-'
These fields can also be accessed using the square brackets notation, which
also gives access to any additional values, after the strand column. Fields
default to 0 or empty string, except the strand which defaults to '+'.
"""
__slots__ = ["_fields"]
def __init__(self, line=None, _len=None):
"""Constructs a BED record from a line of text. The length of the
object matches the number of columns in the input line; in the case
incompatible values, a BEDError exception is raised.
The len parameter is unused, and included only for compatibility with
pysam parser objects, such as 'asBED'. No minimum number of columns are
required, and it is possible to construct an empty bed record.
"""
self._fields = []
if line:
line = line.rstrip("\r\n").split("\t")
for column, (value, func) in enumerate(zip(line, _BED_TYPES)):
try:
self._fields.append(func(value))
except ValueError:
raise BEDError(
"Error parsing column %i in BED record "
"(%r); expected type %s, but found %r."
% (column, "\t".join(line), func.__name__, value)
)
if len(line) > len(self._fields):
self._fields.extend(line[len(self._fields) :])
def freeze(self):
record = BEDRecord()
record._fields = tuple(self._fields)
return record
def __copy__(self):
"""Needed for copy.copy to work correctly as expected."""
record = BEDRecord()
record._fields = copy.copy(self._fields)
return record
def __len__(self):
"""Returns the number of fields in the record; 0 .. N."""
return len(self._fields)
def __str__(self):
"""Returns a string suitable for writing to a .bed file."""
return "\t".join(str(value) for value in self._fields)
def __repr__(self):
"""Returns a printable representation of the record."""
fields = []
for name, value in zip(_BED_KEYS, self._fields):
fields.append("%s=%r" % (name, value))
fields.extend(repr(value) for value in self._fields[len(_BED_KEYS) :])
return "BEDRecord(%s)" % (", ".join(fields))
def __getitem__(self, index):
return self._fields[index]
def __setitem__(self, index, value):
if len(self._fields) <= index:
defaults = _BED_DEFAULTS[len(self._fields) : index + 1]
self._fields.extend(defaults)
while len(self._fields) <= index:
self._fields.append("")
if index < len(_BED_TYPES):
if type(_BED_TYPES[index]) is type:
if not isinstance(value, _BED_TYPES[index]):
raise ValueError(
"Expected %s for BED field %i, got %r"
% (_BED_TYPES[index].__name__, index + 1, value)
)
else:
value = _BED_TYPES[index](value)
self._fields[index] = value
def __lt__(self, other):
if not isinstance(other, BEDRecord):
return NotImplemented
return self._fields < other._fields
@classmethod
def _set_properties(cls):
for index, name in enumerate(_BED_KEYS):
setattr(cls, name, cls._new_attr(index))
@classmethod
def _new_attr(cls, index):
"""Returns an getter / setter property for the given value."""
def _get(self):
return self._fields[index]
def _set(self, value):
self[index] = value
return property(_get, _set)
# Fill out properties for BEDRecord
BEDRecord._set_properties()
def read_bed_file(filename, min_columns=3, contigs=None):
"""Parses a (gzip/bzip2 compressed) BED file, and yields a sequence of
records. Comments and empty lines are skipped. If the number of columns in
the bed record is less than the specified ('min_columns'), a BEDError is
raised. If a dictionary of {contig: length} is supplied, and min_columns
is at least 6, then the coordinates are validated against the known contig
lengths.
"""
if min_columns < 3:
raise ValueError("'min_columns' must be >= 3 in 'read_bed_file'")
infinite = float("inf")
handle = None
try:
handle = open_ro(filename)
for (line_num, line) in enumerate(handle):
line = line.strip()
if not line or line.startswith("#"):
continue
try:
bed = BEDRecord(line)
except ValueError as error:
raise BEDError(
"Error parsing line %i in regions file:\n"
" Path = %r\n Line = %r\n\n%s"
% (line_num + 1, filename, line, error)
)
if len(bed) < min_columns:
url = "http://genome.ucsc.edu/FAQ/FAQformat.html#format1"
name = repr(bed.name) if len(bed) > 3 else "unnamed record"
raise BEDError(
"Region at line #%i (%s) does not "
"contain the expected number of fields; "
"the first %i fields are required. C.f. "
"defination at\n %s\n\nPath = %r"
% (line_num, name, min_columns, url, filename)
)
if contigs is None:
contig_len = infinite
else:
contig_len = contigs.get(bed.contig)
if contig_len is None:
raise BEDError(
"Regions file contains contig not found "
"in reference:\n Path = %r\n Contig = "
"%r\n\nPlease ensure that all contig "
"names match the reference names!" % (filename, bed.contig)
)
elif not (0 <= bed.start < bed.end <= contig_len):
raise BEDError(
"Regions file contains invalid region:\n"
" Path = %r\n Contig = %r\n"
" Start = %s\n End = %s\n\n"
"Expected 0 <= Start < End <= %i!"
% (filename, bed.contig, bed.start, bed.end, contig_len)
)
yield bed
finally:
if handle:
handle.close()
def sort_bed_by_bamfile(bamfile, regions):
"""Orders a set of BED regions, such that processing matches
(as far as possible) the layout of the BAM file. This may be
used to ensure that extraction of regions occurs (close to)
linearly."""
if not regions:
return
references = bamfile.references
indices = dict(zip(references, range(len(references))))
def _by_bam_layout(region):
return (indices[region.contig], region.start, region.end)
regions.sort(key=_by_bam_layout)
def pad_bed_records(records, padding, max_sizes={}):
infinity = float("inf")
for record in records:
max_length = max_sizes.get(record.contig, infinity)
record.start = max(0, record.start - padding)
record.end = min(record.end + padding, max_length)
def merge_bed_records(records):
records = sorted(records)
if not records:
return []
last_record = BEDRecord()
last_record._fields = records[0]._fields[:3]
results = [last_record]
for record in records:
if last_record.contig != record.contig or last_record.end < record.start:
last_record = BEDRecord()
last_record._fields = record._fields[:3]
results.append(last_record)
else:
last_record.end = record.end
return results
```
#### File: common/formats/_graph.py
```python
from paleomix.common.utilities import safe_coerce_to_frozenset, get_in, set_in
from paleomix.common.formats import FormatError
class GraphError(FormatError):
pass
class _Graph:
"""Internal representation of an unrooted graph, allowing various forms of
manipulation directly on the graph. To ensure that all manipulations can be
carried out, it is required that branch-lengths are present for ALL branches,
or for NO branches.
Note that neither the root-length, nor node-ordering is preserved."""
def __init__(self):
self.names = {}
self.connections = {}
self.has_branch_lengths = None
def is_leaf(self, node):
"""Returns true if the node is a leaf, defined as having a single connection."""
return len(self.connections[node]) == 1
def get_path_length(self, *nodes):
"""Returns the length of a path through the graph. Calling the function
with two nodes is the equivalent of getting the branch-length between
those two nodes."""
if not self.has_branch_lengths:
return None
path_length = 0.0
for (node_a, node_b) in zip(nodes, nodes[1:]):
segment_length = float(self.connections[node_a][node_b])
path_length += segment_length
return path_length
def set_name(self, node_id, name):
self.names[node_id] = name
def add_connection(self, node_id_a, node_id_b, blength=None):
if (blength is not None) and float(blength) < 0:
raise GraphError("Branch-lengths must be non-negative")
elif (blength is not None) != self.has_branch_lengths:
if self.has_branch_lengths is not None:
raise GraphError("Tree contains branches with and without lengths")
self.has_branch_lengths = blength is not None
set_in(self.connections, (node_id_a, node_id_b), blength)
set_in(self.connections, (node_id_b, node_id_a), blength)
def remove_connection(self, node_a, node_b):
length_a = self.connections[node_a].pop(node_b)
length_b = self.connections[node_b].pop(node_a)
assert length_a == length_b, (length_a, length_b)
return length_a
def remove_node(self, node):
connections = self.connections.pop(node)
for node_b in connections:
self.connections[node_b].pop(node)
self.names.pop(node)
def rebuild_tree(self, parent_id, node_id):
"""Rebuilds a tree starting at a node with id
'node_id' and a parent with id 'parent_id' (or the
same value as 'node_id' if a root node)."""
raise NotImplementedError(
"Subclasses must implement 'rebuild_nodes'."
) # pragma: no coverage
def prune_uninformative_nodes(self):
"""Removes nodes without names, and which are connected
to two other nodes, extending the branch lengths of the
two connected nodes. This process is repreated, until no
further nodes are pruned. A rooted tree will typically
contain just 1 such node, namely the old root node.
For example, the tree "(A:5,(B:6):3);" would be reduced to
the tree "(A:5,B:9);", whereas the trees "(A:5,(B:6)C:3);"
and "(A:5,(B:6,C:2):3);" would not be pruned.
For a node to be pruned, both adjacent nodes must have a
length specified, or both must not have a length specified."""
while True:
for (cur_node, connections) in self.connections.items():
if not self.names[cur_node] and (len(connections) == 2):
conn_a, conn_b = connections
blength = self.get_path_length(conn_a, cur_node, conn_b)
# Splice out the current node
self.remove_node(cur_node)
self.add_connection(conn_a, conn_b, blength)
break
else:
# Nothing was pruned this round, terminate
break
################################################################################
################################################################################
# Functions relating to NEWICK rooting on midpoint
def reroot_on_midpoint(self):
if not self.has_branch_lengths:
raise GraphError(
"Cannot reroot on midpoint for tree without branch-lengths"
)
longest_path, length = self._find_longest_path()
root = self._create_root_at(longest_path, length / 2.0)
return self.rebuild_tree(root, root)
def _find_longest_path(self):
"""This function determines the longest non-overlapping path possible,
and returns a list of the sequence of nodes in this path, as well as
the total length of this path."""
path_blengths = {}
path_guides = {}
def _collect_paths(guide, length, p_node, c_node):
length += self.get_path_length(p_node, c_node)
guide.append(c_node)
key = frozenset(guide)
path_blengths[key] = length
path_guides[key] = guide
for other in self.connections[c_node]:
if other not in key:
_collect_paths(list(guide), length, c_node, other)
for (p_node, connections) in self.connections.items():
for c_node in connections:
_collect_paths([p_node], 0, p_node, c_node)
key, length = max(path_blengths.items(), key=lambda item: item[1])
return path_guides[key], length
def _create_root_at(self, path, root_at):
"""Finds the midpoint of a path through a tree, and
either creates a new node at that point, or selects
the node already present at that point (if any). The
mid-point is assumed to be at distance of 'root_at'
from the starting node.
E.g. if the path is the longest path, and 'root_at' is
half the length of this path, then this corresponds to
rooting at the midpoint.
The id of the new / selected node is returned. New
nodes (if created) are always given the id None."""
for (c_node, n_node) in zip(path, path[1:]):
branch_length = self.get_path_length(c_node, n_node)
if branch_length > root_at:
left_len = root_at
right_len = branch_length - root_at
self.remove_connection(c_node, n_node)
self.add_connection(None, c_node, left_len)
self.add_connection(None, n_node, right_len)
return None
elif branch_length == root_at:
return n_node
root_at -= branch_length
assert False # pragma: no coverage
################################################################################
################################################################################
# Functions relating to NEWICK rooting on taxa
def reroot_on_taxa(self, taxa):
taxa = safe_coerce_to_frozenset(taxa)
if not taxa:
raise ValueError("No taxa in outgroup")
clades = self._collect_clades()
root_on = self._collect_nodes_from_names(taxa)
# Because None is the id of the root atm:
root = self._create_root_with_clade(clades, root_on)
return self.rebuild_tree(root, root)
def _collect_nodes_from_names(self, taxa):
known_taxa = set()
for (node_id, name) in self.names.items():
if self.is_leaf(node_id):
known_taxa.add(name)
unknown_taxa = taxa - known_taxa
if unknown_taxa:
raise ValueError(
"Cannot root on unknown taxa: %s" % (", ".join(unknown_taxa),)
)
elif not (known_taxa - taxa):
raise ValueError("Cannot root on every taxa in tree")
return frozenset(key for (key, name) in self.names.items() if name in taxa)
def _collect_clades(self):
clades = {}
for (node_a, connections) in self.connections.items():
for node_b in connections:
self._collect_clade_from(clades, node_a, node_b)
return clades
def _collect_clade_from(self, cache, p_node, c_node):
c_clade = get_in(cache, (p_node, c_node), set())
if not c_clade:
if self.is_leaf(c_node):
c_clade.add(c_node)
for n_node in self.connections[c_node]:
if n_node != p_node:
c_clade.update(self._collect_clade_from(cache, c_node, n_node))
set_in(cache, (p_node, c_node), frozenset(c_clade))
return c_clade
def _create_root_with_clade(self, clades, taxa):
root_key, root_clade, root_length = None, None, None
for (p_node, connections) in clades.items():
for (n_node, clade) in connections.items():
if (root_clade is None) or (len(clade) < len(root_clade)):
if taxa.issubset(clade):
root_key = (p_node, n_node)
root_clade = clade
root_length = self.get_path_length(p_node, n_node)
p_node, n_node = root_key
if root_length is not None:
root_length = float(root_length) / 2.0
self.remove_connection(p_node, n_node)
self.add_connection(None, p_node, root_length)
self.add_connection(None, n_node, root_length)
return None
################################################################################
################################################################################
# Functions relating to calculating bootstrap support
def get_clade_names(self):
result = set()
for (_, connections) in self._collect_clades().items():
for (_, clade) in connections.items():
result.add(frozenset(self.names[node_id] for node_id in clade))
return result
```
#### File: paleomix/common/rtools.py
```python
import paleomix.common.versions as versions
from paleomix.resources import rscript
def requirement(module, checks=versions.Any(), cache={}):
key = (module, checks)
result = cache.get(key)
if result is None:
filename = rscript("common", "requires.r")
result = versions.Requirement(
call=("Rscript", filename, module),
search=r"d0fd3ea6: (\d+)\.(\d+)(?:\.(\d+))?",
checks=checks,
name="R module: {}".format(module),
)
cache[(module, checks)] = result
return result
```
#### File: paleomix/common/timer.py
```python
import sys
import time
from paleomix.common.utilities import fragment, cumsum
_DESC = "Processed {Records} records ({Progress}) in {Time}, est. {Remaining} left. Last {RecordsDelta} records in {TimeDelta}, now at {Contig}: {Position}"
_FINAL = (
"Processed {Records} records in {Time}. Last {RecordsDelta} records in {TimeDelta}"
)
class BAMTimer:
def __init__(self, bamfile, desc=None, step=1e6, out=sys.stderr):
self._bam = None
self._out = out
self._desc = desc
self._step = int(step)
self._count = 0
self._last_count = 0
self._last_time = time.time()
self._start_time = self._last_time
self._last_fract = -1.0
self._bam_references = None
self._total = 0.0
self._counts = []
if bamfile and bamfile.header.get("HD", {}).get("SO", "NA") == "coordinate":
self._bam = bamfile
self._bam_references = self._bam.references
lengths = bamfile.lengths
self._total = float(sum(lengths)) or 1.0
self._counts.append(0)
self._counts.extend(cumsum(lengths))
def increment(self, count=1, read=None):
self._count += count
if (self._count - self._last_count) >= self._step:
current_time = time.time()
self._print(current_time, read)
self._last_time = current_time
self._last_count = self._count
return self
def finalize(self):
self._print(time.time(), None)
def _print(self, current_time, read):
desc = _FINAL
contig, position, progress, remaining = "NA", "NA", "NA", "NA"
if read and not read.is_unmapped and self._bam:
fraction = (read.pos + self._counts[read.tid]) / self._total
if fraction >= self._last_fract:
self._last_fract = fraction
contig = self._bam_references[read.tid]
position = self._format_int(read.pos + 1)
progress = "%.2f%%" % (fraction * 100,)
current_running = current_time - self._start_time
remaining = self._format_time(
current_running / fraction - current_running
)
desc = _DESC
else:
print(
"File appears to be unsorted, cannot estimate progress",
file=self._out,
)
self._bam = None
if self._desc:
print("%s: " % self._desc, end="", file=self._out)
print(
desc.format(
Records=self._format_int(self._count),
RecordsDelta=self._format_int(self._count - self._last_count),
Time=self._format_time(current_time - self._start_time),
TimeDelta=self._format_time(current_time - self._last_time),
Contig=contig,
Position=position,
Progress=progress,
Remaining=remaining,
),
file=self._out,
)
def _format_time(self, ftime):
utc = time.gmtime(ftime)
return "%02i:%02i:%02is" % (utc.tm_hour, utc.tm_min, utc.tm_sec)
def _format_int(self, value):
return (",".join(fragment(3, str(value)[::-1])))[::-1]
```
#### File: paleomix/common/vcfwrap.py
```python
import collections
Indel = collections.namedtuple(
"Indel", ["in_reference", "pos", "prefix", "what", "postfix"]
)
def parse_indel(vcf):
"""Parses the VCF record of an indel, and returns a tuple containing the
position (0-based) of the previous base, a boolean indicating whether or
not the subsequent sequence is found in the reference sequence, and a
string containing the bases added to / removed from the reference.
Thus (7, False, "ACGT", "AC") indicates that the sequence ACGT has been
inserted following the 8th nucleotide, compared with the reference, and
that the insertion is followed by the bases "AC" on the reference."""
if not is_indel(vcf):
raise ValueError("SNP passed to 'parse_indel'!")
elif "," in vcf.alt:
raise ValueError("VCF records with multiple indels not supported!")
elif vcf.ref[0] != vcf.alt[0]:
raise ValueError(
"Sequences do not match VCF spec, first base differs: "
"%s:%s -- %s > %s" % (vcf.contig, vcf.pos + 1, vcf.ref, vcf.alt)
)
ref_len, alt_len = len(vcf.ref), len(vcf.alt)
# The length of the insertion / deletion
len_diff = abs(alt_len - ref_len)
# Wheter or not the sequence 'what' is found in the reference
in_reference = ref_len >= alt_len
# The sequence added or removed from the reference
longest = max(vcf.ref, vcf.alt, key=len)
shortest = min(vcf.ref, vcf.alt, key=len)
what = longest[1 : len_diff + 1]
postfix = shortest[1:]
if longest[len_diff + 1 :] != postfix:
raise ValueError("Sequence postfix does not match; malformed indel!")
return Indel(in_reference, vcf.pos, vcf.ref[0], what, postfix)
def is_indel(vcf):
"""Returns true if the VCF entry represents an indel."""
# FIXME: Is this a universal key for indels?
return "INDEL" in vcf.info
# The corresponding nucleotides for each value in the VCF PL field
_genotype_indices = [(jj, ii) for ii in range(0, 10) for jj in range(0, ii + 1)]
def get_ml_genotype(vcf, sample=0):
"""Returns the most likely genotype of a sample in a vcf record. If no
single most likely genotype can be determined, the function returns 'N' for
both bases."""
genotypes = []
genotypes.extend(vcf.ref.split(","))
genotypes.extend(vcf.alt.split(","))
PL = list(map(int, get_format(vcf, sample)["PL"].split(",")))
if len(PL) == len(genotypes):
ploidy = 1
else:
expected_length = (len(genotypes) * (len(genotypes) + 1)) // 2
if len(PL) != expected_length:
raise ValueError(
"Expected %i PL values, found %i" % (expected_length, len(PL))
)
ploidy = 2
if PL.count(min(PL)) > 1:
# No single most likely genotype
return ("N", "N")
most_likely = min(range(len(PL)), key=PL.__getitem__)
if ploidy == 1:
prefix = postfix = most_likely
else:
prefix, postfix = _genotype_indices[most_likely]
return (genotypes[prefix], genotypes[postfix])
def get_format(vcf, sample=0):
return dict(zip(vcf.format.split(":"), vcf[sample].split(":")))
```
#### File: paleomix/nodes/bedtools.py
```python
from paleomix.node import Node, NodeError
from paleomix.common.bedtools import read_bed_file, pad_bed_records, merge_bed_records
from paleomix.common.fileutils import move_file, reroot_path
class PaddedBedNode(Node):
"""Simple node for padding BED records a fixed amount and merging
overlapping records. Columns beyond the 3rd column are dropped.
"""
def __init__(self, infile, outfile, fai_file, amount=0, dependencies=()):
self._amount = int(amount)
self._infile = infile
self._outfile = outfile
self._fai_file = fai_file
Node.__init__(
self,
description="<PaddedBed (%i): %r -> %r>" % (amount, infile, outfile),
input_files=(infile, fai_file),
output_files=(outfile,),
dependencies=dependencies,
)
def _run(self, config, temp):
contigs = {}
with open(self._fai_file) as handle:
for line in handle:
name, length, _ = line.split("\t", 2)
if name in contigs:
raise NodeError(
"Reference genome contains multiple "
"identically named contigs (%r)!" % (name,)
)
contigs[name] = int(length)
with open(reroot_path(temp, self._outfile), "w") as handle:
records = list(read_bed_file(self._infile, contigs=contigs))
pad_bed_records(records=records, padding=self._amount, max_sizes=contigs)
for record in merge_bed_records(records):
handle.write("%s\n" % (record,))
def _teardown(self, config, temp):
source = reroot_path(temp, self._outfile)
move_file(source, self._outfile)
```
#### File: paleomix/nodes/bwa.py
```python
import functools
import os
import paleomix.common.versions as versions
import paleomix.tools.factory as factory
from paleomix.atomiccmd.builder import (
AtomicCmdBuilder,
apply_options,
)
from paleomix.atomiccmd.command import AtomicCmd
from paleomix.atomiccmd.sets import ParallelCmds
from paleomix.common.fileutils import describe_paired_files
from paleomix.node import CommandNode, NodeError
from paleomix.nodes.samtools import SAMTOOLS_VERSION
BWA_VERSION = versions.Requirement(
call=("bwa",), search=r"Version: (\d+)\.(\d+)\.(\d+)", checks=versions.GE(0, 7, 9)
)
class BWAIndexNode(CommandNode):
def __init__(self, input_file, prefix=None, dependencies=()):
prefix = prefix if prefix else input_file
builder = _new_bwa_command(
("bwa", "index", "%(IN_FILE)s", "-p", "%(TEMP_OUT_PREFIX)s"),
prefix,
iotype="OUT",
IN_FILE=input_file,
TEMP_OUT_PREFIX=os.path.basename(prefix),
)
description = "<BWA Index '%s' -> '%s.*'>" % (input_file, prefix)
CommandNode.__init__(
self,
command=builder.finalize(),
description=description,
dependencies=dependencies,
)
class BWABacktrack(CommandNode):
def __init__(
self,
input_file,
output_file,
reference,
prefix,
threads=1,
mapping_options={},
dependencies=(),
):
threads = _get_max_threads(reference, threads)
aln = _new_bwa_command(
("bwa", "aln"), prefix, IN_FILE=input_file, OUT_STDOUT=output_file,
)
aln.add_value(prefix)
aln.add_value("%(IN_FILE)s")
aln.set_option("-t", threads)
apply_options(aln, mapping_options)
description = _get_node_description(
name="BWA",
algorithm="Backtrack",
input_files_1=input_file,
prefix=prefix,
threads=threads,
)
CommandNode.__init__(
self,
command=aln.finalize(),
description=description,
threads=threads,
dependencies=dependencies,
)
class BWASamse(CommandNode):
def __init__(
self,
input_file_fq,
input_file_sai,
output_file,
reference,
prefix,
mapping_options={},
cleanup_options={},
dependencies=(),
):
samse = _new_bwa_command(
("bwa", "samse"),
prefix,
IN_FILE_SAI=input_file_sai,
IN_FILE_FQ=input_file_fq,
OUT_STDOUT=AtomicCmd.PIPE,
)
samse.add_value(prefix)
samse.add_value("%(IN_FILE_SAI)s")
samse.add_value("%(IN_FILE_FQ)s")
cleanup = _new_cleanup_command(samse, output_file, reference)
apply_options(samse, mapping_options)
apply_options(cleanup, cleanup_options)
CommandNode.__init__(
self,
command=ParallelCmds([samse.finalize(), cleanup.finalize()]),
description=_get_node_description(
name="BWA Samse", input_files_1=input_file_fq, prefix=prefix
),
dependencies=dependencies,
)
class BWASampe(CommandNode):
def __init__(
self,
input_file_fq_1,
input_file_fq_2,
input_file_sai_1,
input_file_sai_2,
output_file,
reference,
prefix,
mapping_options={},
cleanup_options={},
dependencies=(),
):
sampe = _new_bwa_command(
(
"bwa",
"sampe",
prefix,
"%(IN_SAI_1)s",
"%(IN_SAI_2)s",
"%(IN_FQ_1)s",
"%(IN_FQ_2)s",
),
prefix,
IN_SAI_1=input_file_sai_1,
IN_SAI_2=input_file_sai_2,
IN_FQ_1=input_file_fq_1,
IN_FQ_2=input_file_fq_2,
OUT_STDOUT=AtomicCmd.PIPE,
)
cleanup = _new_cleanup_command(sampe, output_file, reference, paired_end=True)
apply_options(sampe, mapping_options)
apply_options(cleanup, cleanup_options)
CommandNode.__init__(
self,
command=ParallelCmds([sampe.finalize(), cleanup.finalize()]),
description=_get_node_description(
name="BWA Sampe",
input_files_1=input_file_fq_1,
input_files_2=input_file_fq_2,
prefix=prefix,
),
dependencies=dependencies,
)
class BWAAlgorithmNode(CommandNode):
def __init__(
self,
input_file_1,
output_file,
reference,
prefix,
input_file_2=None,
threads=1,
algorithm="mem",
mapping_options={},
cleanup_options={},
dependencies=(),
):
if algorithm not in ("mem", "bwasw"):
raise NotImplementedError("BWA algorithm %r not implemented" % (algorithm,))
threads = _get_max_threads(reference, threads)
aln = _new_bwa_command(
("bwa", algorithm, prefix, "%(IN_FILE_1)s"),
prefix,
IN_FILE_1=input_file_1,
OUT_STDOUT=AtomicCmd.PIPE,
)
if input_file_2:
aln.add_value("%(IN_FILE_2)s")
aln.set_kwargs(IN_FILE_2=input_file_2)
aln.set_option("-t", threads)
# Mark alternative hits as secondary; required by e.g. Picard
aln.set_option("-M")
cleanup = _new_cleanup_command(
aln, output_file, reference, paired_end=input_file_1 and input_file_2
)
apply_options(aln, mapping_options)
apply_options(cleanup, cleanup_options)
description = _get_node_description(
name="BWA",
algorithm="%s%s" % (algorithm.upper(), "_PE" if input_file_2 else "_SE"),
input_files_1=input_file_1,
input_files_2=input_file_2,
prefix=prefix,
)
CommandNode.__init__(
self,
command=ParallelCmds([aln.finalize(), cleanup.finalize()]),
description=description,
threads=threads,
dependencies=dependencies,
)
def _new_cleanup_command(stdin, output_file, reference, paired_end=False):
convert = factory.new("cleanup")
convert.set_option("--fasta", "%(IN_FASTA_REF)s")
convert.set_option("--temp-prefix", "%(TEMP_OUT_PREFIX)s")
convert.set_kwargs(
IN_STDIN=stdin,
IN_FASTA_REF=reference,
OUT_STDOUT=output_file,
TEMP_OUT_PREFIX="bam_cleanup",
CHECK_SAMTOOLS=SAMTOOLS_VERSION,
)
if paired_end:
convert.set_option("--paired-end")
return convert
def _new_bwa_command(call, prefix, iotype="IN", **kwargs):
_check_bwa_prefix(prefix)
kwargs["CHECK_BWA"] = BWA_VERSION
for postfix in ("amb", "ann", "bwt", "pac", "sa"):
kwargs["%s_PREFIX_%s" % (iotype, postfix.upper())] = prefix + "." + postfix
return AtomicCmdBuilder(call, **kwargs)
@functools.lru_cache()
def _get_max_threads(reference, threads):
"""Returns the maximum number of threads to use when mapping against a
given reference sequence. This is done since very little gain is obtained
when using multiple threads for a small genome (e.g. < 1MB). If the
reference falls below this size, only 1 thread is used (returned),
otherwise the requested number of threads is returned.
"""
if os.path.exists(reference) and os.path.getsize(reference) < 2 ** 20:
return 1
return threads
@functools.lru_cache()
def _check_bwa_prefix(prefix):
"""Checks that a given prefix is compatible with the currently required version of
BWA. Older index files are incompatible with BWA v0.7.x, but may be identified by
the presense of a small number of additional files not present when an index is
produced using BWA v0.7.x.
"""
if any(os.path.exists(prefix + ext) for ext in (".rbwt", ".rpac", ".rsa.")):
filenames = "\n".join(
" %s.%s" % (prefix, ext)
for ext in ("amb", "ann", "bwt", "pac", "sa", "rbwt", "rpac", "rsa")
)
raise NodeError(
"Prefix appears to be created using BWA v0.5.x or older, but PALEOMIX only "
"supports BWA v0.7.x or later.\nPlease remove the following files to allow "
"PALEOMIX to re-index the FASTA file:\n%s" % (filenames,)
)
def _get_node_description(
name, input_files_1, input_files_2=None, algorithm=None, prefix=None, threads=1
):
prefix = os.path.basename(prefix)
if prefix.endswith(".fasta") or prefix.endswith(".fa"):
prefix = prefix.rsplit(".", 1)[0]
info = [prefix]
if algorithm is not None:
info.append(algorithm)
if threads > 1:
info.append("%i threads" % (threads,))
file_desc = describe_paired_files(input_files_1, input_files_2 or ())
return "<%s (%s): %s>" % (name, ", ".join(info), file_desc)
```
#### File: paleomix/nodes/mapdamage.py
```python
import os
import paleomix.common.rtools as rtools
import paleomix.common.versions as versions
from paleomix.common.fileutils import describe_files
from paleomix.node import NodeError, CommandNode
from paleomix.nodes.samtools import merge_bam_files_command
from paleomix.atomiccmd.builder import AtomicCmdBuilder, apply_options
from paleomix.atomiccmd.sets import ParallelCmds
MAPDAMAGE_VERSION = versions.Requirement(
call=("mapDamage", "--version"),
search=r"(\d+)\.(\d+).(\d+)",
checks=versions.GE(2, 0, 1),
)
RSCRIPT_VERSION = versions.Requirement(
call=("Rscript", "--version"),
search=r"(\d+)\.(\d+).(\d+)",
checks=versions.GE(2, 15, 1),
priority=10,
)
class MapDamagePlotNode(CommandNode):
def __init__(
self,
reference,
input_files,
output_directory,
title="mapDamage",
options={},
dependencies=(),
):
merge = merge_bam_files_command(input_files)
command = AtomicCmdBuilder(
[
"mapDamage",
"--no-stats",
# Prevent references with many contigs from using excessive
# amounts of memory, at the cost of per-contig statistics:
"--merge-reference-sequences",
"-t",
title,
"-i",
"-",
"-d",
"%(TEMP_DIR)s",
"-r",
"%(IN_REFERENCE)s",
],
IN_STDIN=merge,
IN_REFERENCE=reference,
OUT_FREQ_3p=os.path.join(output_directory, "3pGtoA_freq.txt"),
OUT_FREQ_5p=os.path.join(output_directory, "5pCtoT_freq.txt"),
OUT_COMP_USER=os.path.join(output_directory, "dnacomp.txt"),
OUT_PLOT_FRAG=os.path.join(
output_directory, "Fragmisincorporation_plot.pdf"
),
OUT_PLOT_LEN=os.path.join(output_directory, "Length_plot.pdf"),
OUT_LENGTH=os.path.join(output_directory, "lgdistribution.txt"),
OUT_MISINCORP=os.path.join(output_directory, "misincorporation.txt"),
OUT_LOG=os.path.join(output_directory, "Runtime_log.txt"),
TEMP_OUT_STDOUT="pipe_mapDamage.stdout",
TEMP_OUT_STDERR="pipe_mapDamage.stderr",
CHECK_RSCRIPT=RSCRIPT_VERSION,
CHECK_MAPDAMAGE=MAPDAMAGE_VERSION,
)
apply_options(command, options)
CommandNode.__init__(
self,
command=ParallelCmds([merge, command.finalize()]),
description="<mapDamage (plots): %s -> '%s'>"
% (describe_files(merge.input_files), output_directory,),
dependencies=dependencies,
)
def _teardown(self, config, temp):
# No Length_plot.pdf file is written if there are no SE reads in the
# input_file. In that case, we write a dummy PDF to ensure that all
# expected files exist.
err_message = "No length distributions are available"
with open(os.path.join(temp, "pipe_mapDamage.stderr")) as in_handle:
if any(line.startswith(err_message) for line in in_handle):
fpath = os.path.join(temp, "Length_plot.pdf")
with open(fpath, "w") as out_handle:
out_handle.write(_DUMMY_LENGTH_PLOT_PDF)
CommandNode._teardown(self, config, temp)
class MapDamageModelNode(CommandNode):
def __init__(self, reference, directory, options={}, dependencies=()):
command = AtomicCmdBuilder(
[
"mapDamage",
"--stats-only",
"-r",
"%(IN_REFERENCE)s",
"-d",
"%(TEMP_DIR)s",
],
IN_REFERENCE=reference,
TEMP_OUT_FREQ_3p="3pGtoA_freq.txt",
TEMP_OUT_FREQ_5p="5pCtoT_freq.txt",
TEMP_OUT_COMP_USER="dnacomp.txt",
TEMP_OUT_MISINCORP="misincorporation.txt",
TEMP_OUT_LOG="Runtime_log.txt",
TEMP_OUT_STDOUT="pipe_mapDamage.stdout",
TEMP_OUT_STDERR="pipe_mapDamage.stderr",
OUT_COMP_GENOME=os.path.join(directory, "dnacomp_genome.csv"),
OUT_MCMC_PROBS=os.path.join(directory, "Stats_out_MCMC_correct_prob.csv"),
OUT_MCMC_HIST=os.path.join(directory, "Stats_out_MCMC_hist.pdf"),
OUT_MCMC_ITER=os.path.join(directory, "Stats_out_MCMC_iter.csv"),
OUT_MCMC_ITERSUM=os.path.join(
directory, "Stats_out_MCMC_iter_summ_stat.csv"
),
OUT_MCMC_POSTPRED=os.path.join(directory, "Stats_out_MCMC_post_pred.pdf"),
OUT_MCMC_TRACE=os.path.join(directory, "Stats_out_MCMC_trace.pdf"),
CHECK_RSCRIPT=RSCRIPT_VERSION,
CHECK_MAPDAMAGE=MAPDAMAGE_VERSION,
CHECK_R_INLINE=rtools.requirement("inline"),
CHECK_R_GGPLOT2=rtools.requirement("ggplot2"),
CHECK_R_RCPP=rtools.requirement("Rcpp"),
CHECK_R_GAM=rtools.requirement("gam"),
CHECK_R_RCPPGSL=rtools.requirement("RcppGSL"),
)
apply_options(command, options)
self._directory = directory
CommandNode.__init__(
self,
command=command.finalize(),
description="<mapDamage (model): %r>" % (directory,),
dependencies=dependencies,
)
def _setup(self, config, temp):
CommandNode._setup(self, config, temp)
for fname in (
"3pGtoA_freq.txt",
"5pCtoT_freq.txt",
"dnacomp.txt",
"misincorporation.txt",
):
relpath = os.path.join(self._directory, fname)
abspath = os.path.abspath(relpath)
os.symlink(abspath, os.path.join(temp, fname))
def _run(self, config, temp):
try:
CommandNode._run(self, config, temp)
except NodeError as error:
err_message = "DNA damage levels are too low"
if self._command.join() == [1]:
fpath = os.path.join(temp, "pipe_mapDamage.stdout")
with open(fpath) as handle:
for line in handle:
if err_message in line:
line = line.strip().replace("Warning:", "ERROR:")
error = NodeError("%s\n\n%s" % (error, line))
break
raise error
class MapDamageRescaleNode(CommandNode):
def __init__(
self,
reference,
input_files,
output_file,
directory,
options={},
dependencies=(),
):
stats_out_fname = "Stats_out_MCMC_correct_prob.csv"
merge = merge_bam_files_command(input_files)
command = AtomicCmdBuilder(
[
"mapDamage",
"--rescale-only",
"-i",
"-",
"-d",
"%(TEMP_DIR)s",
"-r",
"%(IN_REFERENCE)s",
"--rescale-out",
"%(OUT_BAM)s",
],
IN_STDIN=merge,
IN_REFERENCE=reference,
TEMP_OUT_LOG="Runtime_log.txt",
TEMP_OUT_CSV=stats_out_fname,
OUT_BAM=output_file,
CHECK_VERSION=MAPDAMAGE_VERSION,
)
apply_options(command, options)
self._directory = directory
CommandNode.__init__(
self,
command=ParallelCmds([merge, command.finalize()]),
description="<mapDamage (rescale): %s -> %r>"
% (describe_files(merge.input_files), output_file,),
dependencies=dependencies,
)
def _setup(self, config, temp):
CommandNode._setup(self, config, temp)
for fname in ("Stats_out_MCMC_correct_prob.csv",):
relpath = os.path.join(self._directory, fname)
abspath = os.path.abspath(relpath)
os.symlink(abspath, os.path.join(temp, fname))
# Minimal PDF written if Length_plot.pdf wasn't generated
_DUMMY_LENGTH_PLOT_PDF = """%PDF-1.4
1 0 obj
<</Type /Font /Subtype /Type1 /Encoding /WinAnsiEncoding /BaseFont /Courier >>
endobj
2 0 obj
<</Parent 4 0 R /MediaBox[0 0 450 50] /Type /Page /Contents[3 0 R ] /Resources 5 0 R >>
endobj
3 0 obj
<</Length 138 >>
stream
BT
/F0 18 Tf
20 10 Td
(Input file(s) did not contain SE reads.) Tj
0 20 Td
(Length_plot.pdf not generated:) Tj
ET
endstream
endobj
4 0 obj
<</Type /Pages /Count 1 /Kids[2 0 R ]>>
endobj
5 0 obj
<</ProcSet[/PDF /Text] /Font <</F0 1 0 R >>
>>
endobj
6 0 obj
<</Type /Catalog /Pages 4 0 R >>
endobj
xref
0 7
0000000000 65535 f
0000000010 00000 n
0000000106 00000 n
0000000211 00000 n
0000000400 00000 n
0000000457 00000 n
0000000521 00000 n
trailer
<</Size 7 /Root 6 0 R >>
startxref
571
%%EOF
"""
```
#### File: paleomix/nodes/samtools.py
```python
import os
from paleomix.node import CommandNode
from paleomix.atomiccmd.builder import AtomicCmdBuilder
from paleomix.atomiccmd.command import AtomicCmd
from paleomix.common.fileutils import reroot_path
import paleomix.common.versions as versions
_VERSION_REGEX = r"Version: (\d+)\.(\d+)(?:\.(\d+))?"
SAMTOOLS_VERSION = versions.Requirement(
call=("samtools",), search=_VERSION_REGEX, checks=versions.GE(1, 3, 0)
)
BCFTOOLS_VERSION = versions.Requirement(
call=("bcftools",), search=_VERSION_REGEX, checks=versions.GE(1, 3, 0)
)
TABIX_VERSION = versions.Requirement(
call=("tabix",), search=_VERSION_REGEX, checks=versions.GE(1, 3, 0)
)
class TabixIndexNode(CommandNode):
"""Tabix indexes a BGZip compressed VCF or pileup file.
The class currently supports the following presets:
- vcf -- BGZipped VCF file.
- pileup -- BGZipped pileup (non-binary) as produced by 'mpileup'.
"""
def __init__(self, infile, preset="vcf", dependencies=()):
assert infile.lower().endswith(".bgz")
if preset == "pileup":
call = ["tabix", "-s", 1, "-b", 2, "-e", 2]
elif preset == "vcf":
call = ["tabix", "-p", preset]
else:
assert False, "Unxpected preset: %r" % preset
self._infile = infile
cmd_tabix = AtomicCmd(
call + ["%(TEMP_IN_VCFFILE)s"],
TEMP_IN_VCFFILE=os.path.basename(infile),
IN_VCFFILE=infile,
OUT_TBI=infile + ".tbi",
CHECK_TABIX=TABIX_VERSION,
)
CommandNode.__init__(
self,
description="<TabixIndex (%s): '%s'>" % (preset, infile),
command=cmd_tabix,
dependencies=dependencies,
)
def _setup(self, config, temp):
"""See CommandNode._setup."""
infile = os.path.abspath(self._infile)
outfile = reroot_path(temp, self._infile)
os.symlink(infile, outfile)
CommandNode._setup(self, config, temp)
def _teardown(self, config, temp):
"""See CommandNode._teardown."""
os.remove(reroot_path(temp, self._infile))
CommandNode._teardown(self, config, temp)
class FastaIndexNode(CommandNode):
"""Indexed a FASTA file using 'samtools faidx'."""
def __init__(self, infile, dependencies=()):
self._infile = infile
cmd_faidx = AtomicCmd(
["samtools", "faidx", "%(TEMP_IN_FASTA)s"],
TEMP_IN_FASTA=os.path.basename(infile),
IN_FASTA=infile,
OUT_TBI=infile + ".fai",
CHECK_SAM=SAMTOOLS_VERSION,
)
CommandNode.__init__(
self,
description="<FastaIndex: '%s'>" % (infile,),
command=cmd_faidx,
dependencies=dependencies,
)
def _setup(self, config, temp):
"""See CommandNode._setup."""
infile = os.path.abspath(self._infile)
outfile = reroot_path(temp, self._infile)
os.symlink(infile, outfile)
CommandNode._setup(self, config, temp)
def _teardown(self, config, temp):
"""See CommandNode._teardown."""
os.remove(reroot_path(temp, self._infile))
CommandNode._teardown(self, config, temp)
class BAMIndexNode(CommandNode):
"""Indexed a BAM file using 'samtools index'."""
def __init__(self, infile, index_format=".bai", dependencies=()):
if index_format == ".bai":
samtools_call = ["samtools", "index", "%(IN_BAM)s", "%(OUT_IDX)s"]
elif index_format == ".csi":
samtools_call = ["samtools", "index", "-c", "%(IN_BAM)s", "%(OUT_IDX)s"]
else:
raise ValueError(
"Unknown format type %r; expected .bai or .csi" % (index_format,)
)
command = AtomicCmd(
samtools_call,
IN_BAM=infile,
OUT_IDX=infile + index_format,
CHECK_SAM=SAMTOOLS_VERSION,
)
CommandNode.__init__(
self,
description="<BAMIndex (%s): '%s'>" % (index_format[1:].upper(), infile),
command=command,
dependencies=dependencies,
)
def merge_bam_files_command(input_files):
merge = AtomicCmdBuilder(
["samtools", "merge", "-u", "-"],
OUT_STDOUT=AtomicCmd.PIPE,
CHECK_VERSION=SAMTOOLS_VERSION,
)
merge.add_multiple_values(input_files)
return merge.finalize()
```
#### File: pipelines/phylo/makefile.py
```python
import logging
import os
import pysam
import paleomix.common.makefile
from paleomix.common.makefile import (
MakefileError,
REQUIRED_VALUE,
IsDictOf,
IsListOf,
IsInt,
IsStr,
StringIn,
IsFloat,
IsUnsignedInt,
IsBoolean,
IsNone,
RemovedOption,
ValueIn,
ValuesSubsetOf,
StringStartsWith,
StringEndsWith,
CLI_PARAMETERS,
And,
Or,
Not,
)
from paleomix.common.fileutils import swap_ext
from paleomix.common.utilities import fill_dict
from paleomix.common.text import parse_padded_table
from paleomix.common.bedtools import read_bed_file, BEDError
from paleomix.common.formats.fasta import FASTA
def read_makefiles(options, commands):
logger = logging.getLogger(__name__)
steps = frozenset(key for (key, _) in commands)
makefiles = []
for filename in options.files:
logger.info("Reading makefile %r", filename)
makefile = paleomix.common.makefile.read_makefile(filename, _VALIDATION)
makefile = _mangle_makefile(options, makefile, steps)
makefiles.append(makefile)
return makefiles
def _mangle_makefile(options, mkfile, steps):
_collapse_samples(mkfile)
_update_regions(options, mkfile)
_update_subsets(mkfile, steps)
_update_filtering(mkfile)
_update_sample_sets(mkfile)
_update_genotyping(mkfile)
_update_msa(mkfile)
_update_homozygous_contigs(mkfile)
_check_bam_sequences(options, mkfile, steps)
_check_sexes(mkfile)
_update_and_check_max_read_depth(options, mkfile)
_check_indels_and_msa(mkfile)
mkfile["Nodes"] = ()
return mkfile
def _collapse_samples(mkfile):
groups, samples = {}, set()
def _collect_samples(samples_dict, path=()):
current_samples = {}
for (key, subdd) in samples_dict.items():
if key.startswith("<") and key.endswith(">"):
key = key.lstrip("<").rstrip(">")
current_samples.update(_collect_samples(subdd, path + (key,)))
elif key not in samples:
samples.add(key)
subdd["Name"] = key
current_samples[key] = subdd
else:
raise MakefileError("Duplicate sample-name: %r" % (key,))
groups[path] = current_samples
return current_samples
_collect_samples(mkfile["Project"]["Samples"])
mkfile["Project"]["Samples"] = groups.pop(())
mkfile["Project"]["Groups"] = groups
def _select_samples(select, groups, samples, path):
selection = set()
for group in select:
if group.startswith("<") and group.endswith(">"):
key = tuple(group[1:-1].split("/"))
if key not in groups:
raise MakefileError(
"Unknown group specifed for filtering %r: %r" % (path, key)
)
selection.update(groups[key])
elif group in samples:
selection.add(group)
else:
raise MakefileError(
"Unknown/Invalid group specifed for filtering %r: %r" % (path, group)
)
return selection
def _update_regions(options, mkfile):
log = logging.getLogger(__name__)
log.info("Validating regions of interest")
mkfile["Project"]["Regions"] = mkfile["Project"].pop("RegionsOfInterest")
if not mkfile["Project"]["Regions"]:
raise MakefileError(
"No regions of interest have been specified; "
"no analyses will be performed."
)
for (name, subdd) in mkfile["Project"]["Regions"].items():
if "Prefix" not in subdd:
raise MakefileError("No genome specified for regions %r" % (name,))
subdd["Name"] = name
subdd["Desc"] = "{Prefix}.{Name}".format(**subdd)
subdd["BED"] = os.path.join(options.regions_root, subdd["Desc"] + ".bed")
subdd["FASTA"] = os.path.join(options.prefix_root, subdd["Prefix"] + ".fasta")
required_files = (
("Regions file", subdd["BED"]),
("Reference sequence", subdd["FASTA"]),
)
for (desc, path) in required_files:
if not os.path.isfile(path):
raise MakefileError(
"%s does not exist for %r:\n Path = %r" % (desc, name, path)
)
# Collects seq. names / validate regions
try:
sequences = _collect_sequence_names(
bed_file=subdd["BED"], fasta_file=subdd["FASTA"]
)
except (IOError, BEDError) as error:
raise MakefileError(
"Error reading regions-of-interest %r:\n%s" % (name, error)
)
subdd["Sequences"] = {None: sequences}
subdd["SubsetFiles"] = {None: ()}
sampledd = subdd["Genotypes"] = {}
for sample_name in mkfile["Project"]["Samples"]:
fasta_file = ".".join((sample_name, subdd["Desc"], "fasta"))
sampledd[sample_name] = os.path.join(
options.destination, mkfile["Project"]["Title"], "genotypes", fasta_file
)
def _collect_fasta_contigs(filename, cache={}):
if filename in cache:
return cache[filename]
if not os.path.exists(filename + ".fai"):
log = logging.getLogger(__name__)
log.info("Indexing %r; this may take a while", filename)
cache[filename] = contigs = FASTA.index_and_collect_contigs(filename)
return contigs
def _collect_sequence_names(bed_file, fasta_file, min_columns=6):
contigs = _collect_fasta_contigs(fasta_file)
sequences = {}
for record in read_bed_file(bed_file, min_columns=6, contigs=contigs):
current = (record.contig, record.strand)
reference = sequences.setdefault(record.name, current)
if current[0] != reference[0]:
raise MakefileError(
"Regions in %r with the same name (%r) "
"are located on different contigs (%r and "
"%r); note that PALEOMIX assumes that "
"regions with the same name constitute "
"parts of a single consecutive sequence, "
"which must therefore be located on one "
"strand of a single sequence. Please "
"rename one or more of these regions to"
"continue." % (bed_file, record.name, current[0], reference[0])
)
elif current[1] != reference[1]:
raise MakefileError(
"Regions in %r with the same name (%r) "
"are located on different strands; note "
"that PALEOMIX assumes that regions with "
"the same name constitute parts of a "
"single consecutive sequence, and that "
"these must therefore be located on the "
"same strand." % (bed_file, record.name)
)
return frozenset(sequences)
def _update_subsets(mkfile, steps):
subsets_by_regions = mkfile["Project"]["Regions"]
def _collect_subsets(roi, subset, path):
if roi not in subsets_by_regions:
raise MakefileError(
"Subset of unknown region (%r) requested at %r" % (roi, path)
)
roi_fname = swap_ext(subsets_by_regions[roi]["BED"], subset + ".names")
if not os.path.isfile(roi_fname):
raise MakefileError(
"Subset file does not exist for Regions Of "
"Interest:\n Region = %r\n Subset = %r\n"
" Path = %r" % (roi, subset, roi_fname)
)
sequences = set()
with open(roi_fname) as handle:
for line in handle:
line = line.strip()
if line and not line.startswith("#"):
sequences.add(line)
known_seqs = subsets_by_regions[roi]["Sequences"][None]
unknown_seqs = sequences - known_seqs
if unknown_seqs:
message = (
"Unknown sequences in subset file:\n"
" File = %r\n Region = %r\n Subset = %r\n"
" Unknown sequence names ="
) % (roi_fname, roi, subset)
unknown_seqs = list(sorted(unknown_seqs))
if len(unknown_seqs) > 5:
unknown_seqs = unknown_seqs[:5] + ["..."]
message = "\n - ".join([message] + unknown_seqs)
raise MakefileError(message)
subsets_by_regions[roi]["SubsetFiles"][subset] = (roi_fname,)
subsets_by_regions[roi]["Sequences"][subset] = frozenset(sequences)
if "phylogeny:examl" in steps:
for (key, subdd) in mkfile["PhylogeneticInference"].items():
for (subkey, roidd) in subdd["RegionsOfInterest"].items():
if subkey not in subsets_by_regions:
message = (
"Unknown regions name in phylogenetic inference:\n"
"\tPath = PhylogeneticInference:%s:RegionsOfInterest"
"\n\tName = %s"
)
raise MakefileError(message % (key, subkey))
roidd["Name"] = subkey
if roidd.get("SubsetRegions") is not None:
path = "PhylogeneticInference:%s:RegionsOfInterest:%s" % (
key,
subkey,
)
_collect_subsets(subkey, roidd["SubsetRegions"], path)
def _update_filtering(mkfile):
samples = mkfile["Project"]["Samples"]
groups = mkfile["Project"]["Groups"]
log = logging.getLogger(__name__)
filtering = {}
for (target, filter_by) in mkfile["Project"]["FilterSingletons"].items():
if target.startswith("<") and target.endswith(">"):
raise MakefileError(
"Singleton-filtering must be specified per "
"sample, not by groups: %r" % (target,)
)
elif target not in samples:
raise MakefileError(
"Unknown/Invalid sample specifed for singleton filtering: %r"
% (target,)
)
elif target in filter_by:
raise MakefileError(
"Filtering singleton in sample using itself as comparison: %r"
% (target,)
)
path = "Project:FilterSingletons:%s" % (target,)
filtering[target] = _select_samples(filter_by, groups, samples, path)
# Implicit inclusion is allowed, since that is useful in some cases,
# where we want to filter a sample based on the group it is a member of
if target in filtering[target]:
# The target itself must be excluded, as including it is invalid
filtering[target] = filtering[target] - set((target,))
log.warning(
"Sample %r is singleton-filtered using a group it is also a member of",
target,
)
if not filtering[target]:
raise MakefileError(
"No samples specified by which to "
"singleton-filter by for %r" % (target,)
)
mkfile["Project"]["FilterSingletons"] = filtering
def _update_homozygous_contigs(mkfile):
"""Treat unspecified values for HomozygousContigs as an empty list, in
order that the user does not need to specify "[]" for empty lists.
"""
for regions in mkfile["Project"]["Regions"].values():
hcontigs = regions["HomozygousContigs"]
for key, contigs in hcontigs.items():
if contigs is None:
hcontigs[key] = []
def _check_bam_sequences(options, mkfile, steps):
"""Check that the BAM files contains the reference sequences found in the
FASTA file, matched by name and length; extra sequences are permitted. This
check is only done if genotyping is to be carried out, to reduce the
overhead of reading the BAM file headers.
"""
if ("genotype" not in steps) and ("genotyping" not in steps):
return
log = logging.getLogger(__name__)
log.info("Validating BAM files")
bam_files = {}
for regions in mkfile["Project"]["Regions"].values():
for sample in mkfile["Project"]["Samples"].values():
filename = os.path.join(
options.samples_root, "%s.%s.bam" % (sample["Name"], regions["Prefix"])
)
if os.path.exists(filename):
bam_files[filename] = _collect_fasta_contigs(regions["FASTA"])
for (filename, contigs) in bam_files.items():
with pysam.AlignmentFile(filename) as handle:
bam_contigs = dict(list(zip(handle.references, handle.lengths)))
for (contig, length) in contigs.items():
bam_length = bam_contigs.get(contig)
if bam_length is None:
message = (
"Reference sequence missing from BAM file; "
"BAM file aligned against different prefix?\n"
" BAM file = %s\n Sequence name = %s"
) % (filename, contig)
raise MakefileError(message)
elif bam_length != length:
message = (
"Length of reference sequence in FASTA differs "
"from length of sequence in BAM file; BAM file "
"aligned against different prefix?\n"
" BAM file = %s\n"
" Length in FASTA = %s\n"
" Length in BAM = %s"
) % (filename, length, bam_length)
raise MakefileError(message)
def _check_sexes(mkfile):
all_contigs = set()
contigs_sexes = set()
regions_sexes = set()
for regions in mkfile["Project"]["Regions"].values():
all_contigs.update(_collect_fasta_contigs(regions["FASTA"]))
for contigs in regions["HomozygousContigs"].values():
contigs_sexes.update(contigs)
current_sexes = set(regions["HomozygousContigs"])
if not regions_sexes:
regions_sexes = current_sexes
elif regions_sexes != current_sexes:
raise MakefileError(
"List of sexes for regions %r does not "
"match other regions" % (regions["Name"],)
)
if not regions_sexes:
raise MakefileError(
"No sexes have been specified in makefile; "
"please list all sample sexes and assosiated "
"homozygous contigs (if any)."
)
for sample in mkfile["Project"]["Samples"].values():
if sample.get("Sex") is None:
raise MakefileError(
"Please specify a sex for sample %r, or "
"'NA' if not applicable." % (sample["Name"])
)
elif sample["Sex"] not in regions_sexes:
sexes = ", ".join(map(repr, regions_sexes))
message = "Sample %r has unknown sex %r; known sexes are %s" % (
sample["Name"],
sample["Sex"],
sexes,
)
raise MakefileError(message)
unknown_contigs = contigs_sexes - all_contigs
if unknown_contigs:
log = logging.getLogger(__name__)
log.warning("Unknown contig(s) in 'HomozygousContigs':")
for name in sorted(unknown_contigs):
log.warning(" - %r", name)
log.warning("Please verify that the list(s) of contigs is correct!")
def _update_and_check_max_read_depth(options, mkfile):
if any(
subdd["VCF_Filter"]["MaxReadDepth"] == "auto"
for subdd in mkfile["Genotyping"].values()
):
log = logging.getLogger(__name__)
log.info("Determinining max-depth from depth-histograms")
for (key, settings) in mkfile["Genotyping"].items():
required_keys = set()
for sample in mkfile["Project"]["Samples"].values():
required_keys.add(sample["Name"])
max_depths = settings["VCF_Filter"]["MaxReadDepth"]
if isinstance(max_depths, dict):
# Extra keys are allowed, to make it easier
# to temporarily disable a sample
missing_keys = required_keys - set(max_depths)
if missing_keys:
missing_keys = "\n - ".join(sorted(missing_keys))
message = (
"MaxReadDepth not specified for the following "
"samples for %r:\n - %s" % (key, missing_keys)
)
raise MakefileError(message)
elif isinstance(max_depths, str):
assert max_depths.lower() == "auto", max_depths
prefix = mkfile["Project"]["Regions"][key]["Prefix"]
settings["VCF_Filter"]["MaxReadDepth"] = _read_max_depths(
options, prefix, required_keys
)
else:
max_depths = dict.fromkeys(required_keys, max_depths)
settings["VCF_Filter"]["MaxReadDepth"] = max_depths
def _read_max_depths(options, prefix, required_keys):
missing = []
max_depths = {}
for sample in required_keys:
fname = "%s.%s.depths" % (sample, prefix)
fpath = os.path.join(options.samples_root, fname)
max_depths[sample] = fpath
if not os.path.exists(fpath):
missing.append((sample, fpath))
if missing:
raise MakefileError(
"Could not determine 'MaxReadDepth' values "
"automatically; .depth files are missing for one "
"or more samples: \n - "
+ "\n - ".join("%s: %s" % item for item in missing)
+ "\n\nEnsure that the .depth files are available, "
"or specify a value for 'MaxReadDepth' manually."
)
for sample, fpath in max_depths.items():
max_depths[sample] = _read_max_depth(fpath, prefix, sample)
return max_depths
def _read_max_depth(filename, prefix, sample):
if filename in _DEPTHS_CACHE:
return _DEPTHS_CACHE[filename]
max_depth = None
max_depths = {}
try:
with open(filename) as handle:
for row in parse_padded_table(handle):
if (
row["Name"] != "*"
and row["Sample"] == "*"
and row["Library"] == "*"
and row["Contig"] == "*"
):
if row["Name"] in max_depths:
raise MakefileError(
"Depth histogram %r contains "
"multiple 'MaxDepth' records for "
"sample %r; please rebuild!" % (filename, row["Name"])
)
max_depths[row["Name"]] = row["MaxDepth"]
except (OSError, IOError) as error:
raise MakefileError(
"Error reading depth-histogram (%s): %s" % (filename, error)
)
log = logging.getLogger(__name__)
if sample in max_depths:
max_depth = max_depths[sample]
else:
name_counts = {}
name_mapping = {}
for cand_sample in max_depths:
name = cand_sample.split(".", 1)[0]
name_mapping[name] = cand_sample
name_counts[name] = name_counts.get(name, 0) + 1
if name_mapping.get(sample) == 1:
# Sample name (with some extensions) found
# This is typical if 'paleomix depths' has been run manually.
max_depth = max_depths[name_mapping[sample]]
elif len(max_depths) == 1:
# Just one sampel in the depth histogram; even though it does not
# match, we assuem that this is the correct table. This is because
# manually generating files / renaming files would otherwise cause
# failure when using 'MaxDepth: auto'.
((cand_sample, max_depth),) = max_depths.items()
log.warning(
"Name in depths file not as expected; found %r, not %r:",
cand_sample,
sample,
)
if max_depth is None:
raise MakefileError(
"MaxDepth for %r not found in depth-histogram: %r" % (sample, filename)
)
elif max_depth == "NA":
raise MakefileError(
"MaxDepth is not calculated for sample %r; "
"cannot determine MaxDepth values automatically." % (filename,)
)
elif not max_depth.isdigit():
raise MakefileError(
"MaxDepth is not a valid for sample %r in %r; "
"expected integer, found %r." % (sample, filename, max_depth)
)
max_depth = int(max_depth)
log.info("%s.%s = %i", sample, prefix, max_depth)
_DEPTHS_CACHE[filename] = max_depth
return max_depth
_DEPTHS_CACHE = {}
def _check_indels_and_msa(mkfile):
msa = mkfile["MultipleSequenceAlignment"]
regions = mkfile["Project"]["Regions"]
for (name, subdd) in regions.items():
msa_enabled = msa[name]["Enabled"]
if subdd["IncludeIndels"] and not msa_enabled:
raise MakefileError(
"Regions %r includes indels, but MSA is disabled!" % (name,)
)
def _update_sample_sets(mkfile):
samples = mkfile["Project"]["Samples"]
groups = mkfile["Project"]["Groups"]
for (key, subdd) in mkfile["PhylogeneticInference"].items():
subdd["ExcludeSamples"] = _select_samples(
subdd["ExcludeSamples"],
groups,
samples,
"PhylogeneticInference:%s:ExcludeSamples" % (key,),
)
# Replace None with an empty list, to simplify code using this value
root_trees_on = subdd["RootTreesOn"] or ()
subdd["RootTreesOn"] = _select_samples(
root_trees_on,
groups,
samples,
"PhylogeneticInference:%s:RootTreesOn" % (key,),
)
def _update_genotyping(mkfile):
genotyping = mkfile["Genotyping"]
defaults = genotyping.pop("Defaults")
defaults.setdefault("Padding", 5)
defaults["VCF_Filter"].setdefault("MaxReadDepth", 0)
for (key, subdd) in genotyping.items():
if subdd.get("GenotypeEntirePrefix"):
message = (
"GenotypeEntirePrefix is only allowed for prefixes "
"using default parameters, but is set for %r" % (key,)
)
raise MakefileError(message)
for key in mkfile["Project"]["Regions"]:
genotyping[key] = fill_dict(genotyping.get(key, {}), defaults)
regions = set(genotyping)
unknown_regions = regions - set(mkfile["Project"]["Regions"])
if unknown_regions:
raise MakefileError(
"Unknown Regions of Interest in Genotyping: %s"
% (", ".join(unknown_regions),)
)
def _update_msa(mkfile):
msa = mkfile["MultipleSequenceAlignment"]
defaults = msa.pop("Defaults")
defaults.setdefault("Program", "MAFFT")
defaults["MAFFT"].setdefault("Algorithm", "MAFFT")
for key in mkfile["Project"]["Regions"]:
msa[key] = fill_dict(msa.get(key, {}), defaults)
unknown_regions = set(msa) - set(mkfile["Project"]["Regions"])
if unknown_regions:
raise MakefileError(
"Unknown Regions of Interest in Genotyping: %s"
% (", ".join(unknown_regions),)
)
# Recursive definition of sample tree
_VALIDATION_SUBSAMPLE_KEY = And(StringStartsWith("<"), StringEndsWith(">"))
_VALIDATION_SAMPLES_KEY = And(IsStr, Not(_VALIDATION_SUBSAMPLE_KEY))
_VALIDATION_SAMPLES = {
_VALIDATION_SAMPLES_KEY: {
"GenotypingMethod": RemovedOption(),
"SpeciesName": RemovedOption(),
"CommonName": RemovedOption(),
"Sex": IsStr(),
"Gender": RemovedOption(),
}
}
_VALIDATION_SAMPLES[_VALIDATION_SUBSAMPLE_KEY] = _VALIDATION_SAMPLES
# Genotyping settings; note that explicit lists must not be used here, to allow
# proper inheritance of default values. Use IsListOf instead.
_VALIDATION_GENOTYPES = {
"Padding": IsUnsignedInt,
"GenotypeEntirePrefix": IsBoolean(default=False),
"MPileup": {StringStartsWith("-"): Or(IsInt, IsStr, IsNone)},
"BCFTools": {StringStartsWith("-"): Or(IsInt, IsStr, IsNone)},
"Random": {"--min-distance-to-indels": IsUnsignedInt},
"VCF_Filter": {
"MaxReadDepth": Or(
IsUnsignedInt, IsDictOf(IsStr, IsUnsignedInt), StringIn(("auto",))
),
"--keep-ambigious-genotypes": IsNone,
"--min-quality": IsUnsignedInt,
"--min-allele-frequency": RemovedOption,
"--min-mapping-quality": IsUnsignedInt,
"--min-read-depth": IsUnsignedInt,
"--max-read-depth": IsUnsignedInt,
"--min-num-alt-bases": IsUnsignedInt,
"--min-distance-to-indels": IsUnsignedInt,
"--min-distance-between-indels": IsUnsignedInt,
"--min-strand-bias": IsFloat,
"--min-baseq-bias": IsFloat,
"--min-mapq-bias": IsFloat,
"--min-end-distance-bias": IsFloat,
},
}
_VALIDATION_MSA = {
"Enabled": IsBoolean(default=True),
"Program": StringIn(("mafft",)), # TODO: Add support for other programs
"MAFFT": {
"Algorithm": StringIn(
(
"mafft",
"auto",
"FFT-NS-1",
"FFT-NS-2",
"FFT-NS-i",
"NW-INS-i",
"L-INS-i",
"E-INS-i",
"G-INS-i",
)
),
StringStartsWith("-"): CLI_PARAMETERS,
},
}
_VALIDATION = {
"Project": {
"Title": IsStr(default="Untitled"),
"Samples": _VALIDATION_SAMPLES,
"RegionsOfInterest": {
IsStr: {
"Prefix": IsStr(default=REQUIRED_VALUE),
"Realigned": RemovedOption(),
"ProteinCoding": IsBoolean(default=False),
"IncludeIndels": IsBoolean(default=True),
"HomozygousContigs": {
IsStr: Or(IsNone, IsListOf(IsStr)),
# The sex 'NA' defaults to no homozygous chromosomes
"NA": Or(IsNone, IsListOf(IsStr), default=[]),
},
}
},
"FilterSingletons": {IsStr: [IsStr]},
},
"Genotyping": {"Defaults": _VALIDATION_GENOTYPES, IsStr: _VALIDATION_GENOTYPES},
"MultipleSequenceAlignment": {"Defaults": _VALIDATION_MSA, IsStr: _VALIDATION_MSA},
"PhylogeneticInference": {
IsStr: {
# Which program to use; TODO: Add support for other programs
"Program": StringIn(("examl",), default="examl"),
# Exclude one or more samples from the phylogeny
"ExcludeSamples": [IsStr],
# Which samples to root the final trees on / or midpoint rooting
"RootTreesOn": [IsStr],
# Create a tree per gene, for each region of interest,
# or create a supermatrix tree from all regions specified.
"PerGeneTrees": IsBoolean(default=False),
# Selection of regions of interest / settings per region
"RegionsOfInterest": {
IsStr: {
"Partitions": Or(
And(IsStr, ValuesSubsetOf("123456789X")),
ValueIn([False]),
default=REQUIRED_VALUE,
),
"SubsetRegions": Or(IsStr, IsNone, default=None),
}
},
"SubsetRegions": {IsStr: IsStr},
"ExaML": {
"Bootstraps": IsUnsignedInt(default=100),
"Replicates": IsUnsignedInt(default=1),
"Model": StringIn(("GAMMA", "PSR"), default="gamma"),
},
}
},
}
```
#### File: tests/atomiccmd_test/sets_test.py
```python
from unittest.mock import call, Mock
import pytest
import paleomix.atomiccmd.pprint
from paleomix.atomiccmd.command import AtomicCmd, CmdError
from paleomix.atomiccmd.sets import ParallelCmds, SequentialCmds
_SET_CLASSES = (ParallelCmds, SequentialCmds)
###############################################################################
###############################################################################
# Properties with same expected behavior for both Parallel/SequentialCmds
@pytest.mark.parametrize("cls", _SET_CLASSES)
def test_atomicsets__properties(cls):
cmd_mock_1 = AtomicCmd(
("true",),
CHECK_A=id,
EXEC_1="false",
IN_1="/foo/bar/in_1.file",
IN_2="/foo/bar/in_2.file",
OUT_1="/bar/foo/out",
TEMP_OUT_1="out.log",
AUX_A="/aux/fA",
AUX_B="/aux/fB",
)
cmd_mock_2 = AtomicCmd(
("false",),
CHECK_A=list,
EXEC_1="echo",
EXEC_2="java",
IN_1="/foo/bar/in.file",
OUT_1="out.txt",
)
obj = cls([cmd_mock_1, cmd_mock_2])
assert obj.executables == cmd_mock_1.executables | cmd_mock_2.executables
assert obj.requirements == cmd_mock_1.requirements | cmd_mock_2.requirements
assert obj.input_files == cmd_mock_1.input_files | cmd_mock_2.input_files
assert obj.output_files == cmd_mock_1.output_files | cmd_mock_2.output_files
assert (
obj.auxiliary_files == cmd_mock_1.auxiliary_files | cmd_mock_2.auxiliary_files
)
assert obj.expected_temp_files == frozenset(["out", "out.txt"])
assert (
obj.optional_temp_files
== cmd_mock_1.optional_temp_files | cmd_mock_2.optional_temp_files
)
_NO_CLOBBERING_KWARGS = (
({"OUT_A": "/foo/out.txt"}, {"OUT_B": "/bar/out.txt"}),
({"OUT_A": "/foo/out.txt"}, {"TEMP_OUT_B": "out.txt"}),
({"OUT_A": "/foo/out.txt"}, {"OUT_STDOUT": "/bar/out.txt"}),
({"OUT_A": "/foo/out.txt"}, {"TEMP_OUT_STDOUT": "out.txt"}),
({"OUT_A": "/foo/out.txt"}, {"OUT_STDERR": "/bar/out.txt"}),
({"OUT_A": "/foo/out.txt"}, {"TEMP_OUT_STDERR": "out.txt"}),
)
# Ensure that commands in a set doesn't clobber eachothers OUT files
@pytest.mark.parametrize("cls", _SET_CLASSES)
@pytest.mark.parametrize("kwargs_1, kwargs_2", _NO_CLOBBERING_KWARGS)
def test_atomicsets__no_clobbering(cls, kwargs_1, kwargs_2):
cmd_1 = AtomicCmd("true", **kwargs_1)
cmd_2 = AtomicCmd("true", **kwargs_2)
with pytest.raises(CmdError):
cls([cmd_1, cmd_2])
###############################################################################
###############################################################################
# Functions with same expected behavior for both Parallel/SequentialCmds
@pytest.mark.parametrize("cls", _SET_CLASSES)
def test_atomicsets__commit(cls):
mock = Mock()
cmd_1 = AtomicCmd(["ls"])
cmd_1.commit = mock.commit_1
cmd_2 = AtomicCmd(["ls"])
cmd_2.commit = mock.commit_2
cmd_3 = AtomicCmd(["ls"])
cmd_3.commit = mock.commit_3
cls((cmd_1, cmd_2, cmd_3)).commit("xTMPx")
assert mock.mock_calls == [
call.commit_1("xTMPx"),
call.commit_2("xTMPx"),
call.commit_3("xTMPx"),
]
@pytest.mark.parametrize("cls", _SET_CLASSES)
def test_atomicsets__commit__remove_files_on_failure(tmp_path, cls):
(tmp_path / "tmp").mkdir()
out_path = tmp_path / "out"
cmd_1 = AtomicCmd(["touch", "%(OUT_FILE)s"], OUT_FILE=str(out_path / "file1"))
cmd_2 = AtomicCmd(["touch", "%(OUT_FILE)s"], OUT_FILE=str(out_path / "file2"))
cmd_2.commit = Mock()
cmd_2.commit.side_effect = OSError()
cmdset = cls((cmd_1, cmd_2))
cmdset.run(str(tmp_path / "tmp"))
assert cmdset.join() == [0, 0]
with pytest.raises(OSError):
cmdset.commit(tmp_path / "tmp")
tmp_files = [it.name for it in (tmp_path / "tmp").iterdir()]
assert "file1" not in tmp_files
assert "file2" in tmp_files
assert list((tmp_path / "out").iterdir()) == []
@pytest.mark.parametrize("cls", _SET_CLASSES)
def test_atomicsets__stdout(cls):
cmds = cls([AtomicCmd("ls")])
with pytest.raises(CmdError):
cmds.stdout
@pytest.mark.parametrize("cls", _SET_CLASSES)
def test_atomicsets__terminate(cls):
mock = Mock()
cmd_1 = AtomicCmd(["ls"])
cmd_1.terminate = mock.terminate_1
cmd_2 = AtomicCmd(["ls"])
cmd_2.terminate = mock.terminate_2
cmd_3 = AtomicCmd(["ls"])
cmd_3.terminate = mock.terminate_3
cmds = cls((cmd_3, cmd_2, cmd_1))
cmds.terminate()
assert mock.mock_calls == [
call.terminate_3(),
call.terminate_2(),
call.terminate_1(),
]
@pytest.mark.parametrize("cls", _SET_CLASSES)
def test_atomicsets__str__(cls):
cmds = cls([AtomicCmd("ls")])
assert paleomix.atomiccmd.pprint.pformat(cmds) == str(cmds)
@pytest.mark.parametrize("cls", _SET_CLASSES)
def test_atomicsets__duplicate_cmds(cls):
cmd_1 = AtomicCmd("true")
cmd_2 = AtomicCmd("false")
with pytest.raises(ValueError):
cls([cmd_1, cmd_2, cmd_1])
###############################################################################
###############################################################################
# Parallel commands
def test_parallel_commands__run():
mock = Mock()
cmd_1 = AtomicCmd(["ls"])
cmd_1.run = mock.run_1
cmd_2 = AtomicCmd(["ls"])
cmd_2.run = mock.run_2
cmd_3 = AtomicCmd(["ls"])
cmd_3.run = mock.run_3
cmds = ParallelCmds((cmd_1, cmd_2, cmd_3))
cmds.run("xTMPx")
assert mock.mock_calls == [
call.run_1("xTMPx"),
call.run_2("xTMPx"),
call.run_3("xTMPx"),
]
@pytest.mark.parametrize("value", (True, False))
def test_parallel_commands__ready_single(value):
cmd = AtomicCmd(["ls"])
cmd.ready = Mock()
cmd.ready.return_value = value
cmds = ParallelCmds([cmd])
assert cmds.ready() == value
cmd.ready.assert_called()
_READY_TWO_VALUES = (
(True, True, True),
(False, True, False),
(True, False, False),
(False, False, False),
)
@pytest.mark.parametrize("first, second, result", _READY_TWO_VALUES)
def test_parallel_commands__ready_two(first, second, result):
cmd_1 = AtomicCmd(["ls"])
cmd_1.ready = Mock()
cmd_1.ready.return_value = first
cmd_2 = AtomicCmd(["ls"])
cmd_2.ready = Mock()
cmd_2.ready.return_value = second
cmds = ParallelCmds([cmd_1, cmd_2])
assert cmds.ready() == result
cmd_1.ready.assert_called()
assert bool(first) == bool(cmd_2.ready.call_count)
def test_parallel_commands__join_before_run():
mock = Mock()
cmd_1 = AtomicCmd(["ls"])
cmd_1.join = mock.join_1
cmd_2 = AtomicCmd(["ls"])
cmd_2.join = mock.join_2
cmd_3 = AtomicCmd(["ls"])
cmd_3.join = mock.join_3
cmds = ParallelCmds((cmd_3, cmd_2, cmd_1))
assert cmds.join() == [None, None, None]
assert mock.mock_calls == []
def test_parallel_commands__join_after_run(tmp_path):
cmds = ParallelCmds([AtomicCmd("true") for _ in range(3)])
cmds.run(tmp_path)
assert cmds.join() == [0, 0, 0]
def _setup_mocks_for_failure(*do_mocks):
results = []
for do_mock in do_mocks:
if do_mock:
mock = AtomicCmd(("sleep", 10))
else:
mock = AtomicCmd("false")
results.append(mock)
return results
def test_parallel_commands__join_failure_1(tmp_path):
mocks = _setup_mocks_for_failure(False, True, True)
cmds = ParallelCmds(mocks)
cmds.run(tmp_path)
assert cmds.join() == [1, "SIGTERM", "SIGTERM"]
def test_parallel_commands__join_failure_2(tmp_path):
mocks = _setup_mocks_for_failure(True, False, True)
cmds = ParallelCmds(mocks)
cmds.run(tmp_path)
assert cmds.join() == ["SIGTERM", 1, "SIGTERM"]
def test_parallel_commands__join_failure_3(tmp_path):
mocks = _setup_mocks_for_failure(True, True, False)
cmds = ParallelCmds(mocks)
cmds.run(tmp_path)
assert cmds.join() == ["SIGTERM", "SIGTERM", 1]
def test_parallel_commands__reject_sequential():
command = AtomicCmd(["ls"])
seqcmd = SequentialCmds([command])
with pytest.raises(CmdError):
ParallelCmds([seqcmd])
def test_parallel_commands__accept_parallel():
command = AtomicCmd(["ls"])
parcmd = ParallelCmds([command])
ParallelCmds([parcmd])
def test_parallel_commands__reject_noncommand():
with pytest.raises(CmdError):
ParallelCmds([object()])
def test_parallel_commands__reject_empty_commandset():
with pytest.raises(CmdError):
ParallelCmds([])
###############################################################################
###############################################################################
# Sequential commands
def test_sequential_commands__atomiccmds():
mock = Mock()
cmd_1 = AtomicCmd(["ls"])
cmd_1.run = mock.run_1
cmd_1.join = mock.join_1
cmd_1.join.return_value = [0]
cmd_2 = AtomicCmd(["ls"])
cmd_2.run = mock.run_2
cmd_2.join = mock.join_2
cmd_2.join.return_value = [0]
cmd_3 = AtomicCmd(["ls"])
cmd_3.run = mock.run_3
cmd_3.join = mock.join_3
cmd_3.join.return_value = [0]
cmds = SequentialCmds((cmd_1, cmd_2, cmd_3))
assert not cmds.ready()
cmds.run("xTMPx")
assert cmds.ready()
assert cmds.join() == [0, 0, 0]
assert mock.mock_calls == [
call.run_1("xTMPx"),
call.join_1(),
call.run_2("xTMPx"),
call.join_2(),
call.run_3("xTMPx"),
call.join_3(),
call.join_1(),
call.join_2(),
call.join_3(),
]
def test_sequential_commands__abort_on_error_1(tmp_path):
cmd_1 = AtomicCmd("false")
cmd_2 = AtomicCmd(("sleep", 10))
cmd_3 = AtomicCmd(("sleep", 10))
cmds = SequentialCmds([cmd_1, cmd_2, cmd_3])
cmds.run(tmp_path)
assert cmds.join() == [1, None, None]
def test_sequential_commands__abort_on_error_2(tmp_path):
cmd_1 = AtomicCmd("true")
cmd_2 = AtomicCmd("false")
cmd_3 = AtomicCmd(("sleep", 10))
cmds = SequentialCmds([cmd_1, cmd_2, cmd_3])
cmds.run(tmp_path)
assert cmds.join() == [0, 1, None]
def test_sequential_commands__abort_on_error_3(tmp_path):
cmd_1 = AtomicCmd("true")
cmd_2 = AtomicCmd("true")
cmd_3 = AtomicCmd("false")
cmds = SequentialCmds([cmd_1, cmd_2, cmd_3])
cmds.run(tmp_path)
assert cmds.join() == [0, 0, 1]
def test_sequential_commands__accept_parallel():
command = AtomicCmd(["ls"])
parcmd = ParallelCmds([command])
SequentialCmds([parcmd])
def test_sequential_commands__accept_sequential():
command = AtomicCmd(["ls"])
seqcmd = SequentialCmds([command])
SequentialCmds([seqcmd])
def test_sequential_commands__reject_noncommand():
with pytest.raises(CmdError):
SequentialCmds([object()])
def test_sequential_commands__reject_empty_commandset():
with pytest.raises(CmdError):
SequentialCmds([])
```
|
{
"source": "jfzhang95/LSTM-dignosis",
"score": 2
}
|
#### File: jfzhang95/LSTM-dignosis/methods.py
```python
import numpy as np
import theano
import theano.tensor as T
from theano.ifelse import ifelse
from theano.tensor.shared_randomstreams import RandomStreams
from collections import OrderedDict
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
def random_weights(shape, name=None):
return theano.shared(floatX(np.random.uniform(size=shape, low=0.001, high=0.001)), name=name)
def zeros(shape, name=""):
return theano.shared(floatX(np.zeros(shape)), name=name)
def sigmoid(X):
return 1 / (1 + T.exp(-X))
def dropout(X, dropout_prob=0.0):
retain_prob = 1 - dropout_prob
srng = RandomStreams(seed=1234)
X *= srng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)
X /= retain_prob
return X
def clip(X, epsilon):
return T.maximum(T.minimum(X, epsilon), -1*epsilon)
def scale(X, max_norm):
curr_norm = T.sum(T.abs_(X))
return ifelse(T.lt(curr_norm, max_norm), X, max_norm * (X / curr_norm))
def momentum(loss, params, caches, lr=1e-1, rho=0.1, clip_at=5.0, scale_norm=0.0, reg=0.0):
updates = OrderedDict()
grads = T.grad(loss=loss, wrt=params)
caches = list(caches)
for p, c, g in zip(params, caches, grads):
if clip_at > 0.0:
grad = clip(g, clip_at)
else:
grad = g
if scale_norm > 0.0:
grad = scale(grad, scale_norm)
delta = rho * grad + (1-rho) * c
updates[p] = p - lr * (delta + reg * p)
return updates, grads
def get_params(layers):
params = []
for layer in layers:
for param in layer.get_params():
params.append(param)
return params
def make_caches(params):
caches = []
for p in params:
caches.append(theano.shared(floatX(np.zeros(p.get_value().shape))))
return caches
```
|
{
"source": "jfzhang95/Water-Table-Depth-Prediction-PyTorch",
"score": 3
}
|
#### File: jfzhang95/Water-Table-Depth-Prediction-PyTorch/rnn.py
```python
import torch
from torch import nn
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, class_size, dropout=0.5, rnn_type='lstm'):
super(RNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.class_size = class_size
self.num_layers = num_layers
self.rnn_type = rnn_type
if self.rnn_type == 'lstm':
self.rnn = nn.LSTM(
input_size=self.input_size,
hidden_size=self.hidden_size, # rnn hidden unit
num_layers=self.num_layers, # number of rnn layer
batch_first=True, # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size)
)
elif self.rnn_type == 'rnn':
self.rnn = nn.RNN(
input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True,
)
elif self.rnn_type == 'gru':
self.rnn = nn.GRU(
input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True,
)
else:
raise NotImplementedError
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(self.hidden_size, self.class_size) # FC layer in our paper
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size)
if self.rnn_type == 'lstm':
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size)
r_out, _ = self.rnn(x, (h0, c0))
else:
r_out, _ = self.rnn(x, h0)
outs = [] # save all predictions
for time_step in range(r_out.size(1)): # calculate output for each time step
outs.append(self.out(self.dropout((r_out[:, time_step, :]))))
return torch.stack(outs, dim=1)
```
|
{
"source": "jfzhang95/web_spider",
"score": 2
}
|
#### File: dazdp_detail/dazdp_detail/pipelines.py
```python
import logging
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
# Define your item pipelines here
#
from bson import ObjectId
from scrapy.conf import settings
from scrapy.exceptions import DropItem
class MongoDBPipeline(object):
def __init__(self):
connection = pymongo.MongoClient('mongodb://'+settings['MONGODB_USER_NAME']+':'+settings['MONGODB_SERVER_PASSWORD']+'@'+settings['MONGODB_SERVER']+':'+str(settings['MONGODB_PORT'])+'/'+settings['MONGODB_DB'])
self.db = connection[settings['MONGODB_DB']]
def process_item(self, item, spider):
valid = True
for data in item:
if not data:
valid = False
raise DropItem("Missing {0}!".format(data))
if valid:
try:
# key = {}
# key['sku_id'] = item['sku_id']
# self.db[item['item_name']].update(key, dict(item), upsert=True)
self.db['dazdp_shop_detail'].insert(dict(item))
dzdp_shops = self.db['dzdp_shop'].find({"_id": ObjectId(item['dzdp_shop_id'])})
for dzdp_shop in dzdp_shops:
dzdp_shop["grab_flag"] = 1
self.db['dzdp_shop'].save(dzdp_shop)
logging.debug("add {}".format('dazdp_shop_detail'))
except (pymongo.errors.WriteError, KeyError) as err:
raise DropItem("Duplicated Item: {}".format(item['dzdp_shop_id']))
return item
```
#### File: dazongdianping/dzdianping/db_control.py
```python
import logging
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from sqlalchemy import Column, Integer, String, create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# sys.path.append(os.path.dirname(os.path.realpath(__file__)))
# sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from db_info import DB_Session
Base = declarative_base()
_Logger = logging.getLogger(__name__)
class MysqlControl:
def __init__(self):
self.session= DB_Session()
def add_shop(self, res):
new_shop = Shop(id=res['shopId'],
name=res['basicInfo'][0],
level=res['basicInfo'][1],
perConsume=res['basicInfo'][2],
taste=res['basicInfo'][3],
environment=res['basicInfo'][4],
service=res['basicInfo'][5],
addr=res['basicInfo'][6],
phoneNum=res['phoneNum'],
cityName=res['cityName'])
self.session.merge(new_shop)
self.session.commit()
print "Added shopId: {0}".format(res['shopId'])
def add_comment(self, res):
query = self.session.query(Comment.id)
scalar = query.filter(Comment.shopId == res['shopId'],
Comment.userName == res['userName'],
Comment.text == res['text']).scalar()
if not scalar:
if not res['rst']:
res['rst'] = ['-', '-', '-']
new_comment = Comment(shopId=res['shopId'],
userName= res['userName'],
taste=res['rst'][0],
environment=res['rst'][1],
service=res['rst'][2],
text=res['text'],
time=res['time'])
self.session.add(new_comment)
self.session.commit()
print "Added comment from {0}".format(res['userName'])
class Shop(Base):
__tablename__ = 'shop'
id = Column(String(10), primary_key=True)
name = Column(String(20))
level = Column(String(8))
perConsume = Column(String(8))
taste = Column(String(3))
environment = Column(String(3))
service = Column(String(8))
addr = Column(String(60))
phoneNum = Column(String(15))
cityName = Column(String(8))
class Comment(Base):
__tablename__ = 'comment'
id = Column(Integer, primary_key=True)
shopId = Column(String(10))
userName = Column(String(20))
taste = Column(String(3))
environment = Column(String(3))
service = Column(String(8))
text = Column(String(500))
time = Column(String(15))
```
#### File: web_spider/first_webspider/data_scraping.py
```python
from bs4 import BeautifulSoup
import time
import datetime
import re
import urllib
from urllib import request
import itertools
from urllib import parse
def download(url, num_retries=5, user_agent='wswp'):
print('downloading:', url)
headers = {'User-agent': user_agent}
req = request.Request(url, headers=headers)
try:
html = request.urlopen(url).read().decode('utf-8')
except request.URLError as e:
print('download error:', e.reason)
html = None
if num_retries>0:
if hasattr(e, 'code') and 500<=e.code<600:
return download(url, num_retries-1)
return html
url = 'http://example.webscraping.com/places/default/view/Afghanistan-1'
html = download(url)
soup = BeautifulSoup(html, 'lxml')
tr = soup.find(attrs={'id':'places_area__row'})
td = tr.find(attrs={'class':'w2p_fw'})
area = td.text
print(area)
```
|
{
"source": "jfzhang95/weibo_spider",
"score": 3
}
|
#### File: weibo_spider/spiders/weibo_spider.py
```python
import re
import datetime
from scrapy.spider import CrawlSpider
from scrapy.selector import Selector
from scrapy.http import Request
from weibo_spider.items import InformationItem, TweetsItem, FollowsItem, FansItem
class weiboSpider(CrawlSpider):
name = "weibo_spider"
host = "https://weibo.cn"
start_ids = [
5235640836, 5676304901, 5871897095, 2139359753, 5579672076, 2517436943, 5778999829, 5780802073, 2159807003,
1756807885, 3378940452, 5762793904, 1885080105, 5778836010, 5722737202, 3105589817, 5882481217, 5831264835,
2717354573, 3637185102, 1934363217, 5336500817, 1431308884, 5818747476, 5073111647, 5398825573, 2501511785,
]
scrawl_ID = set(start_ids)
finish_ID = set() # 记录爬取过的微博ID
def start_request(self):
while self.scrawl_ID.__len__():
ID = self.scrawl_ID.pop()
self.finish_ID.add(ID)
ID = str(ID)
follows = []
followsItem = FollowsItem()
followsItem['_id'] = ID
followsItem['follows'] = follows
fans = []
fansItem = FansItem()
fansItem['_id'] = ID
fansItem['fans'] = fans
url_follows = "https://weibo.cn/{}/follow".format(ID)
url_fans = "https://weibo.cn/{}/fans".format(ID)
url_tweets = "https://weibo.cn/u/{}".format(ID)
url_information0 = "https://weibo.cn/{}/info".format(ID)
yield Request(url_follows, callback=self.parse3, meta={"item": followsItem, "result": follows}) # 去爬关注人
yield Request(url_fans, callback=self.parse3, meta={"item": fansItem, "result": fans}) # 去爬粉丝
yield Request(url_information0, callback=self.parse0, meta={"ID": ID}) # 去爬个人信息
yield Request(url_tweets, callback=self.parse2, meta={"ID": ID}) # 去爬微博
def parse0(self, response):
"""抓取个人信息1"""
informationItem = InformationItem()
sel = Selector(response)
text0 = sel.xpath('body/div[@class="u"]/div[@class="tip2"]').extract_first()
if text0:
num_tweets = re.findall(u'\u5fae\u535a\[(\d+)\]', text0) # 微博数
num_follows = re.findall(u'\u5173\u6ce8\[(\d+)\]', text0) # 关注数
num_fans = re.findall(u'\u7c89\u4e1d\[(\d+)\]', text0) # 粉丝数
if num_tweets:
informationItem["Num_Tweets"] = int(num_tweets[0])
if num_follows:
informationItem["Num_Follows"] = int(num_follows[0])
if num_fans:
informationItem["Num_Fans"] = int(num_fans[0])
informationItem["_id"] = response.meta['ID']
url_information1 = "https://weibo.cn/{}/info".format(response.meta['ID'])
yield Request(url_information1, callback=self.parse1, meta={"item":informationItem})
def parse1(self, response):
"""抓取个人信息2"""
informationItem = response.meta['item']
sel = Selector(response)
text1 = ";".join(sel.xpath('body/div[@class="c"]/text()').extract())
nickname = re.findall(u'\u6635\u79f0[:|\uff1a](.*?);', text1) # 昵称
gender = re.findall(u'\u6027\u522b[:|\uff1a](.*?);', text1) # 性别
place = re.findall(u'\u5730\u533a[:|\uff1a](.*?);', text1) # 地区(包括省份和城市)
signature = re.findall(u'\u7b80\u4ecb[:|\uff1a](.*?);', text1) # 简介
birthday = re.findall(u'\u751f\u65e5[:|\uff1a](.*?);', text1) # 生日
sexorientation = re.findall(u'\u6027\u53d6\u5411[:|\uff1a](.*?);', text1) # 性取向
marriage = re.findall(u'\u611f\u60c5\u72b6\u51b5[:|\uff1a](.*?);', text1) # 婚姻状况
url = re.findall(u'\u4e92\u8054\u7f51[:|\uff1a](.*?);', text1) # 首页链接
if nickname:
informationItem["NickName"] = nickname[0]
if gender:
informationItem["Gender"] = gender[0]
if place:
place = place[0].split(" ")
informationItem["Province"] = place[0]
if len(place) > 1:
informationItem["City"] = place[1]
if signature:
informationItem["Signature"] = signature[0]
if birthday:
try:
birthday = datetime.datetime.strptime(birthday[0], "%Y-%m-%d")
informationItem["Birthday"] = birthday - datetime.timedelta(hours=8)
except Exception:
pass
if sexorientation:
if sexorientation[0] == gender[0]:
informationItem["Sex_Orientation"] = "Homosexual"
else:
informationItem["Sex_Orientation"] = "Heterosexual"
if marriage:
informationItem["Marriage"] = marriage[0]
if url:
informationItem["URL"] = url[0]
yield informationItem
def parse2(self, response):
"""抓取微博数据"""
sel = Selector(response)
tweets = sel.xpath('body/div[@class="c" and @id]')
for tweet in tweets:
tweetsItem = TweetsItem()
_id = tweet.xpath('@id').extract_first()
content = tweet.xpath('div/span[@class="ctt"]/text()').extract_first()
cooridinates = tweet.xpath('div/a/@href').extract_first()
like = re.findall(u'\u8d5e\[(\d+)\]', tweet.extract()) # 点赞数
transfer = re.findall(u'\u8f6c\u53d1\[(\d+)\]', tweet.extract()) # 转载数
comment = re.findall(u'\u8bc4\u8bba\[(\d+)\]', tweet.extract()) # 评论数
others = tweet.xpath('div/span[@class="ct"]/text()').extract_first() # 求时间和使用工具(手机或平台)
tweetsItem["ID"] = response.meta["ID"]
tweetsItem["_id"] = response.meta["ID"] + "-" + _id
if content:
tweetsItem["Content"] = content.strip(u"[\u4f4d\u7f6e]")
if cooridinates:
cooridinates = re.findall('center=([\d|.|,]+)', cooridinates)
if cooridinates:
tweetsItem["Co_oridinates"] = cooridinates[0]
if like:
tweetsItem["Like"] = int(like[0])
if transfer:
tweetsItem["Transfer"] = int(transfer[0])
if comment:
tweetsItem["Comment"] = int(comment[0])
if others:
others = others.split(u"\u6765\u81ea")
tweetsItem["PubTime"] = others[0]
if len(others) == 2:
tweetsItem["Tools"] = others[1]
yield tweetsItem
url_next = sel.xpath(u'body/div[@class="pa" and @id="pagelist"]/form/div/a[text()="\u4e0b\u9875"]/@href').extract()
if url_next:
next_url = self.host + url_next[0]
yield Request(next_url, callback=self.parse2, meta={"ID": response.meta["ID"]})
def parse3(self, response):
"""抓取关注或粉丝"""
items = response.meta["item"]
sel = Selector(response)
text2 = sel.xpath(
u'body//table/tr/td/a[text()="\u5173\u6ce8\u4ed6" or text()="\u5173\u6ce8\u5979"]/@href').extract()
for elem in text2:
elem = re.findall('uid=(\d+)', elem)
if elem:
response.meta["result"].append(elem[0])
ID = int(elem[0])
if ID not in self.finish_ID:
self.scrawl_ID.add(ID)
url_next = sel.xpath(
u'body//div[@class="pa" and @id="pagelist"]/form/div/a[text()="\u4e0b\u9875"]/@href').extract()
if url_next:
next_url = self.host + url_next[0]
yield Request(next_url, callback=self.parse3, meta={"item": items, "result": response.meta["result"]})
else:
yield items
```
|
{
"source": "jfzhuang/DAVSS",
"score": 2
}
|
#### File: spatial_correction_camvid/python/show.py
```python
import os
import sys
import cv2
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from lib.model.scnet import SCNet
from lib.dataset.camvid import camvid_video_dataset_PDA
from lib.dataset.utils import decode_labels_camvid
def get_arguments():
parser = argparse.ArgumentParser(description="Test the SCNet")
###### general setting ######
parser.add_argument("--data_list_path", type=str, help="path to the data list")
parser.add_argument("--data_path", type=str, help="path to the data")
parser.add_argument("--gt_path", type=str, help="path to the ground truth")
parser.add_argument("--save_path", type=str, help="path to save results")
parser.add_argument("--scnet_model", type=str, help="path to the trained SCNet model")
###### inference setting ######
parser.add_argument("--num_workers", type=int, help="num of cpus used")
return parser.parse_args()
def test():
args = get_arguments()
print(args)
net = SCNet(n_classes=11)
old_weight = torch.load(args.scnet_model)
new_weight = {}
for k, v in old_weight.items():
new_k = k.replace('module.', '')
new_weight[new_k] = v
net.load_state_dict(new_weight, strict=True)
net.cuda().eval()
deeplab = net.deeplab
flownet = net.flownet
cfnet = net.cfnet
dmnet = net.dmnet
warpnet = net.warpnet
test_data = camvid_video_dataset_PDA(args.data_path, args.gt_path, args.data_list_path)
test_data_loader = torch.utils.data.DataLoader(test_data, batch_size=1, shuffle=False, num_workers=args.num_workers)
distance = 10
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
with torch.no_grad():
for d in range(9, distance):
for step, sample in enumerate(test_data_loader):
print(step)
img_list, gt_label = sample
result_list = []
img = img_list[9 - d].cuda()
feat = deeplab(img)
warp_im = F.upsample(img, scale_factor=0.25, mode='bilinear', align_corners=True)
feat_rgb = pred2im(feat)
feat_rgb = cv2.resize(feat_rgb, (480, 360))
result_list.append(feat_rgb)
for i in range(d):
img_1 = img_list[9 - d + i].cuda()
img_2 = img_list[10 - d + i].cuda()
flow = flownet(torch.cat([img_2, img_1], dim=1))
feat = warpnet(feat, flow)
warp_im = warpnet(warp_im, flow)
img_2_down = F.upsample(img_2, scale_factor=0.25, mode='bilinear', align_corners=True)
dm = dmnet(warp_im, img_2_down)
dm_up = F.interpolate(dm, scale_factor=4, mode='bilinear', align_corners=True)
feat_cc = cfnet(img_2)
feat_cc_up = F.interpolate(feat_cc, scale_factor=4, mode='bilinear', align_corners=True)
feat_up = F.interpolate(feat, scale_factor=4, mode='bilinear', align_corners=True)
feat_merge = feat_up * (1-dm_up) + feat_cc_up*dm_up
feat_merge_rgb = pred2im(feat_merge)
feat_merge_rgb = cv2.resize(feat_merge_rgb, (480, 360))
result_list.append(feat_merge_rgb)
gt_rgb = gt_label.clone()
gt_rgb = decode_labels_camvid(gt_rgb.squeeze())
gt_rgb = cv2.resize(gt_rgb, (480, 360))
result_list.append(gt_rgb)
zeros = np.zeros([360, 480, 3], dtype=np.uint8)
result_list.append(zeros)
result_1 = np.concatenate(result_list[:4], axis=1)
result_2 = np.concatenate(result_list[4:8], axis=1)
result_3 = np.concatenate(result_list[8:], axis=1)
result = np.concatenate([result_1, result_2, result_3], axis=0)
cv2.imwrite(os.path.join(args.save_path, '{}.png'.format(step)), result)
if step == 20:
break
def pred2im(pred):
pred = torch.argmax(pred, dim=1).squeeze().cpu().numpy()
pred = decode_labels_camvid(pred)
return pred
if __name__ == '__main__':
test()
```
#### File: lib/dataset/utils.py
```python
import cv2
import random
import numpy as np
from sklearn.metrics import confusion_matrix
import torch
label_colours = [(128, 64, 128), (244, 35, 231), (69, 69, 69), (102, 102, 156), (190, 153, 153), (153, 153, 153),
(250, 170, 29), (219, 219, 0), (106, 142, 35), (152, 250, 152), (69, 129, 180), (219, 19, 60),
(255, 0, 0), (0, 0, 142), (0, 0, 69), (0, 60, 100), (0, 79, 100), (0, 0, 230), (119, 10, 32),
(0, 0, 0)]
label_colours_camvid = [(128, 128, 128), (128, 0, 0), (192, 192, 128), (128, 64, 128), (0, 0, 192), (128, 128, 0),
(192, 128, 128), (64, 64, 128), (64, 0, 128), (64, 64, 0), (0, 128, 192), (0, 0, 0)]
def transform_im(img):
img = img[:, :, ::-1]
img = img.transpose((2, 0, 1))
img = img.astype(np.float32) / 255.0
return img
def randomcrop(img_list, crop_size=(512, 1024)):
_, h, w = img_list[0].shape
crop_h, crop_w = crop_size
top = np.random.randint(0, h - crop_h + 1)
left = np.random.randint(0, w - crop_w + 1)
for i in range(len(img_list)):
if len(img_list[i].shape) == 3:
img_list[i] = img_list[i][:, top:top + crop_h, left:left + crop_w]
else:
img_list[i] = img_list[i][top:top + crop_h, left:left + crop_w]
return img_list
def decode_labels(mask):
h, w = mask.shape
mask[mask == 255] = 19
color_table = np.array(label_colours, dtype=np.float32)
out = np.take(color_table, mask, axis=0)
out = out.astype(np.uint8)
out = out[:, :, ::-1]
return out
def decode_labels_camvid(mask):
h, w = mask.shape
mask[mask == 255] = 11
color_table = np.array(label_colours_camvid, dtype=np.float32)
out = np.take(color_table, mask, axis=0)
out = out.astype(np.uint8)
out = out[:, :, ::-1]
return out
class runningScore(object):
def __init__(self, n_classes):
self.n_classes = n_classes
self.confusion_matrix = np.zeros((n_classes, n_classes))
def _fast_hist(self, label_true, label_pred, n_class):
mask = (label_true >= 0) & (label_true < n_class)
hist = np.bincount(
n_class * label_true[mask].astype(int) + label_pred[mask],
minlength=n_class**2,
).reshape(n_class, n_class)
return hist
def update(self, label_trues, label_preds):
for lt, lp in zip(label_trues, label_preds):
self.confusion_matrix += self._fast_hist(lt.flatten(), lp.flatten(), self.n_classes)
def get_scores(self, return_class=False):
hist = self.confusion_matrix
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
mean_iu = np.nanmean(iu)
if return_class:
return mean_iu, iu
else:
return mean_iu
def reset(self):
self.confusion_matrix = np.zeros((self.n_classes, self.n_classes))
```
#### File: lib/model/deeplabv3plus.py
```python
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class deeplabv3plus(nn.Module):
def __init__(self, n_classes=21):
super(deeplabv3plus, self).__init__()
# Atrous Conv
self.xception_features = Xception()
# ASPP
rates = [1, 6, 12, 18]
self.aspp0 = ASPP_module(2048, 256, rate=rates[0], separable=False)
self.aspp1 = ASPP_module(2048, 256, rate=rates[1])
self.aspp2 = ASPP_module(2048, 256, rate=rates[2])
self.aspp3 = ASPP_module(2048, 256, rate=rates[3])
self.image_pooling = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)), nn.Conv2d(2048, 256, 1, stride=1, bias=False),
nn.SyncBatchNorm(256, eps=1e-05, momentum=0.0003), nn.ReLU(inplace=True))
self.concat_projection = nn.Sequential(nn.Conv2d(1280, 256, 1, stride=1, bias=False),
nn.SyncBatchNorm(256, eps=1e-05, momentum=0.0003), nn.ReLU(inplace=True),
nn.Dropout2d(p=0.1))
# adopt [1x1, 48] for channel reduction.
self.feature_projection0_conv = nn.Conv2d(256, 48, 1, bias=False)
self.feature_projection0_bn = nn.SyncBatchNorm(48, eps=1e-03, momentum=0.0003)
self.feature_projection0_relu = nn.ReLU(inplace=True)
self.decoder = nn.Sequential(SeparableConv2d(304, 256, kernel_size=3, stride=1, padding=1, bias=False),
SeparableConv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False))
self.logits = nn.Conv2d(256, n_classes, kernel_size=1, stride=1, padding=0, bias=True)
def forward(self, image):
n, c, h, w = image.shape
x, low_level_features = self.xception_features(image)
x1 = self.aspp0(x)
x2 = self.aspp1(x)
x3 = self.aspp2(x)
x4 = self.aspp3(x)
x5 = self.image_pooling(x)
x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x5, x1, x2, x3, x4), dim=1)
x = self.concat_projection(x)
x = F.interpolate(x,
size=(int(math.ceil(image.size()[-2] / 4)), int(math.ceil(image.size()[-1] / 4))),
mode='bilinear',
align_corners=True)
low_level_features = self.feature_projection0_conv(low_level_features)
low_level_features = self.feature_projection0_bn(low_level_features)
low_level_features = self.feature_projection0_relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.decoder(x)
x = self.logits(x)
return x
class ASPP_module(nn.Module):
def __init__(self, inplanes, planes, rate, separable=True):
super(ASPP_module, self).__init__()
if rate == 1:
kernel_size = 1
padding = 0
else:
kernel_size = 3
padding = rate
self.atrous_convolution = SeparableConv2d(inplanes,
planes,
kernel_size=kernel_size,
stride=1,
padding=padding,
dilation=rate,
bias=False,
separable=separable)
def forward(self, x):
x = self.atrous_convolution(x)
return x
class Xception(nn.Module):
"""
Modified Alighed Xception
"""
def __init__(self, inplanes=3, os=16):
super(Xception, self).__init__()
if os == 16:
entry_block3_stride = 2
middle_block_rate = 1
exit_block_rates = (1, 2)
elif os == 8:
entry_block3_stride = 1
middle_block_rate = 2
exit_block_rates = (2, 4)
else:
raise NotImplementedError
# Entry flow
self.entry_flow_conv1_1 = resnet_utils_conv2d_same(inplanes, 32, 3, stride=2, bias=False)
self.entry_flow_bn1_1 = nn.SyncBatchNorm(32, eps=1e-03, momentum=0.0003)
self.entry_flow_relu1_1 = nn.ReLU(inplace=True)
self.entry_flow_conv1_2 = resnet_utils_conv2d_same(32, 64, 3, stride=1, bias=False)
self.entry_flow_bn1_2 = nn.SyncBatchNorm(64, eps=1e-03, momentum=0.0003)
self.entry_flow_relu1_2 = nn.ReLU(inplace=True)
self.entry_flow_block1_unit_1 = xception_module(inplanes=64,
depth_list=[128, 128, 128],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
stride=2)
self.entry_flow_block2_unit_1 = xception_module(inplanes=128,
depth_list=[256, 256, 256],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
stride=2,
low_level_features=True)
self.entry_flow_block3_unit_1 = xception_module(inplanes=256,
depth_list=[728, 728, 728],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
stride=2)
# Middle flow
self.middle_flow_block1_unit_1 = xception_module(inplanes=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
stride=1)
self.middle_flow_block1_unit_2 = xception_module(inplanes=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
stride=1)
self.middle_flow_block1_unit_3 = xception_module(inplanes=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
stride=1)
self.middle_flow_block1_unit_4 = xception_module(inplanes=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
stride=1)
self.middle_flow_block1_unit_5 = xception_module(inplanes=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
stride=1)
self.middle_flow_block1_unit_6 = xception_module(inplanes=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
stride=1)
self.middle_flow_block1_unit_7 = xception_module(inplanes=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
stride=1)
self.middle_flow_block1_unit_8 = xception_module(inplanes=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
stride=1)
self.middle_flow_block1_unit_9 = xception_module(inplanes=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
stride=1)
self.middle_flow_block1_unit_10 = xception_module(inplanes=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
stride=1)
self.middle_flow_block1_unit_11 = xception_module(inplanes=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
stride=1)
self.middle_flow_block1_unit_12 = xception_module(inplanes=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
stride=1)
self.middle_flow_block1_unit_13 = xception_module(inplanes=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
stride=1)
self.middle_flow_block1_unit_14 = xception_module(inplanes=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
stride=1)
self.middle_flow_block1_unit_15 = xception_module(inplanes=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
stride=1)
self.middle_flow_block1_unit_16 = xception_module(inplanes=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
stride=1)
# Exit flow
self.exit_flow_block1_unit_1 = xception_module(inplanes=728,
depth_list=[728, 1024, 1024],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
stride=1)
self.exit_flow_block2_unit_1 = xception_module(inplanes=1024,
depth_list=[1536, 1536, 2048],
skip_connection_type='none',
activation_fn_in_separable_conv=True,
stride=1,
dilation=2)
self._init_weight()
def forward(self, x):
# Entry flow
x = self.entry_flow_conv1_1(x)
x = self.entry_flow_bn1_1(x)
x = self.entry_flow_relu1_1(x)
x = self.entry_flow_conv1_2(x)
x = self.entry_flow_bn1_2(x)
x = self.entry_flow_relu1_2(x)
x = self.entry_flow_block1_unit_1(x)
x, low_level_feat = self.entry_flow_block2_unit_1(x)
x = self.entry_flow_block3_unit_1(x)
# Middle flow
x = self.middle_flow_block1_unit_1(x)
x = self.middle_flow_block1_unit_2(x)
x = self.middle_flow_block1_unit_3(x)
x = self.middle_flow_block1_unit_4(x)
x = self.middle_flow_block1_unit_5(x)
x = self.middle_flow_block1_unit_6(x)
x = self.middle_flow_block1_unit_7(x)
x = self.middle_flow_block1_unit_8(x)
x = self.middle_flow_block1_unit_9(x)
x = self.middle_flow_block1_unit_10(x)
x = self.middle_flow_block1_unit_11(x)
x = self.middle_flow_block1_unit_12(x)
x = self.middle_flow_block1_unit_13(x)
x = self.middle_flow_block1_unit_14(x)
x = self.middle_flow_block1_unit_15(x)
x = self.middle_flow_block1_unit_16(x)
# Exit flow
x = self.exit_flow_block1_unit_1(x)
x = self.exit_flow_block2_unit_1(x)
return x, low_level_feat
def _init_weight(self):
self.entry_flow_bn1_1.weight.data.fill_(1)
self.entry_flow_bn1_1.bias.data.zero_()
self.entry_flow_bn1_2.weight.data.fill_(1)
self.entry_flow_bn1_2.bias.data.zero_()
class xception_module(nn.Module):
def __init__(self,
inplanes,
depth_list,
skip_connection_type,
stride,
unit_rate_list=None,
dilation=1,
activation_fn_in_separable_conv=True,
low_level_features=False):
super(xception_module, self).__init__()
if len(depth_list) != 3:
raise ValueError('Expect three elements in depth_list.')
if unit_rate_list:
if len(unit_rate_list) != 3:
raise ValueError('Expect three elements in unit_rate_list.')
else:
unit_rate_list = [1, 1, 1]
residual = inplanes
self.separable_conv1 = SeparableConv2d_same(residual,
depth_list[0],
kernel_size=3,
stride=1,
dilation=dilation * unit_rate_list[0],
activation_fn_in_separable_conv=activation_fn_in_separable_conv)
residual = depth_list[0]
self.separable_conv2 = SeparableConv2d_same(residual,
depth_list[1],
kernel_size=3,
stride=1,
dilation=dilation * unit_rate_list[1],
activation_fn_in_separable_conv=activation_fn_in_separable_conv)
residual = depth_list[1]
self.separable_conv3 = SeparableConv2d_same(residual,
depth_list[2],
kernel_size=3,
stride=stride,
dilation=dilation * unit_rate_list[2],
activation_fn_in_separable_conv=activation_fn_in_separable_conv)
shortcut_list = []
if skip_connection_type == 'conv':
shortcut_list.append(nn.Conv2d(inplanes, depth_list[-1], kernel_size=1, stride=stride, bias=False))
shortcut_list.append(nn.SyncBatchNorm(depth_list[-1], eps=1e-03, momentum=0.0003))
self.shortcut = nn.Sequential(*shortcut_list)
self.skip_connection_type = skip_connection_type
self.low_level_features = low_level_features
self._init_weight()
def forward(self, x):
x1 = self.separable_conv1(x)
x2 = self.separable_conv2(x1)
x3 = self.separable_conv3(x2)
x4 = self.shortcut(x)
if self.skip_connection_type == 'conv':
y = x3 + x4
elif self.skip_connection_type == 'sum':
y = x3 + x
elif self.skip_connection_type == 'none':
y = x3
else:
raise ValueError('Unsupported skip connection type.')
if self.low_level_features:
return y, x2
else:
return y
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.normal_(m.weight, std=0.09)
elif isinstance(m, nn.SyncBatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
class resnet_utils_conv2d_same(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False):
super(resnet_utils_conv2d_same, self).__init__()
self.stride = stride
if stride == 1:
self.conv = nn.Conv2d(inplanes, planes, kernel_size, stride=stride, padding=1, dilation=dilation, bias=bias)
else:
self.conv = nn.Conv2d(inplanes, planes, kernel_size, stride=stride, padding=0, dilation=dilation, bias=bias)
self._init_weight()
def forward(self, x):
if self.stride != 1:
x = fixed_padding(x, self.conv.kernel_size[0], rate=self.conv.dilation[0])
x = self.conv(x)
return x
def _init_weight(self):
n = self.conv.kernel_size[0] * self.conv.kernel_size[1] * self.conv.out_channels
self.conv.weight.data.normal_(0, math.sqrt(2. / n))
class SeparableConv2d_same(nn.Module):
def __init__(self,
inplanes,
planes,
kernel_size=3,
stride=1,
dilation=1,
bias=False,
activation_fn_in_separable_conv=True):
super(SeparableConv2d_same, self).__init__()
self.relu = nn.ReLU(inplace=False)
self.activation_fn_in_separable_conv = activation_fn_in_separable_conv
self.depthwise = nn.Conv2d(inplanes, inplanes, kernel_size, stride, 0, dilation, groups=inplanes, bias=bias)
self.depthwise_bn = nn.SyncBatchNorm(inplanes, eps=1e-03, momentum=0.0003)
if activation_fn_in_separable_conv:
self.depthwise_relu = nn.ReLU(inplace=True)
self.pointwise = nn.Conv2d(inplanes,
planes,
kernel_size=1,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=bias)
self.pointwise_bn = nn.SyncBatchNorm(planes, eps=1e-03, momentum=0.0003)
if activation_fn_in_separable_conv:
self.pointwise_relu = nn.ReLU(inplace=True)
def forward(self, x):
if not self.activation_fn_in_separable_conv:
x = self.relu(x)
x = fixed_padding(x, self.depthwise.kernel_size[0], rate=self.depthwise.dilation[0])
x = self.depthwise(x)
x = self.depthwise_bn(x)
x = self.pointwise(x)
x = self.pointwise_bn(x)
else:
x = fixed_padding(x, self.depthwise.kernel_size[0], rate=self.depthwise.dilation[0])
x = self.depthwise(x)
x = self.depthwise_bn(x)
x = self.depthwise_relu(x)
x = self.pointwise(x)
x = self.pointwise_bn(x)
x = self.pointwise_relu(x)
return x
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, padding=0, dilation=1, bias=False, separable=True):
super(SeparableConv2d, self).__init__()
self.separable = separable
if self.separable:
self.depthwise = nn.Conv2d(inplanes,
inplanes,
kernel_size,
stride,
padding,
dilation,
groups=inplanes,
bias=bias)
self.depthwise_bn = nn.SyncBatchNorm(inplanes, eps=1e-05, momentum=0.0003)
self.depthwise_relu = nn.ReLU(inplace=True)
self.pointwise = nn.Conv2d(inplanes,
planes,
kernel_size=1,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=bias)
self.pointwise_bn = nn.SyncBatchNorm(planes, eps=1e-05, momentum=0.0003)
self.pointwise_relu = nn.ReLU(inplace=True)
else:
self.conv = nn.Conv2d(inplanes, planes, kernel_size, stride, padding, dilation, bias=bias)
self.bn = nn.SyncBatchNorm(planes, eps=1e-05, momentum=0.0003)
self.relu = nn.ReLU(inplace=True)
self._init_weight()
def forward(self, x):
if self.separable:
x = self.depthwise(x)
x = self.depthwise_bn(x)
x = self.depthwise_relu(x)
x = self.pointwise(x)
x = self.pointwise_bn(x)
x = self.pointwise_relu(x)
else:
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
def _init_weight(self):
if self.separable:
torch.nn.init.normal_(self.depthwise.weight, std=0.33)
self.depthwise_bn.weight.data.fill_(1)
self.depthwise_bn.bias.data.zero_()
torch.nn.init.normal_(self.pointwise.weight, std=0.33)
self.pointwise_bn.weight.data.fill_(1)
self.pointwise_bn.bias.data.zero_()
else:
torch.nn.init.normal_(self.conv.weight, std=0.33)
self.bn.weight.data.fill_(1)
self.bn.bias.data.zero_()
def fixed_padding(inputs, kernel_size, rate):
kernel_size_effective = kernel_size + (kernel_size-1) * (rate-1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
if __name__ == "__main__":
model = deeplabv3plus(n_classes=19)
model.cuda().eval()
data = torch.randn(1, 3, 1024, 2048).cuda()
with torch.no_grad():
out = model(data)
print(out.shape)
```
#### File: lib/model/warpnet.py
```python
import torch
import torch.nn as nn
from lib.model.resample2d_package.resample2d import Resample2d
class warp(nn.Module):
def __init__(self):
super(warp, self).__init__()
self.resample = Resample2d()
def forward(self, feature, flow):
assert feature.shape[2:] == flow.shape[2:]
out = self.resample(feature, flow)
return out
```
|
{
"source": "jfzhuang/IFR",
"score": 2
}
|
#### File: lib/dataset/image.py
```python
import numpy as np
label_colours_viper = [
(70, 130, 180),
(128, 64, 128),
(244, 35, 232),
(152, 251, 152),
(87, 182, 35),
(35, 142, 35),
(70, 70, 70),
(153, 153, 153),
(190, 153, 153),
(150, 20, 20),
(250, 170, 30),
(220, 220, 0),
(180, 180, 100),
(173, 153, 153),
(168, 153, 153),
(81, 0, 21),
(81, 0, 81),
(220, 20, 60),
(0, 0, 230),
(0, 0, 142),
(0, 80, 100),
(0, 60, 100),
(0, 0, 70),
(0, 0, 0),
]
label_colours = [
(128, 64, 128),
(244, 35, 231),
(69, 69, 69),
(102, 102, 156),
(190, 153, 153),
(153, 153, 153),
(250, 170, 29),
(219, 219, 0),
(106, 142, 35),
(152, 250, 152),
(69, 129, 180),
(219, 19, 60),
(255, 0, 0),
(0, 0, 142),
(0, 0, 69),
(0, 60, 100),
(0, 79, 100),
(0, 0, 230),
(119, 10, 32),
(0, 0, 0),
]
def decode_labels_viper(mask):
h, w = mask.shape
mask[mask == 255] = 23
color_table = np.array(label_colours_viper, dtype=np.float32)
out = np.take(color_table, mask, axis=0)
out = out.astype(np.uint8)
out = out[:, :, ::-1]
return out
def decode_labels(mask):
h, w = mask.shape
mask[mask == 255] = 19
color_table = np.array(label_colours, dtype=np.float32)
out = np.take(color_table, mask, axis=0)
out = out.astype(np.uint8)
out = out[:, :, ::-1]
return out
```
#### File: lib/model/accel.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmseg.models import build_segmentor
from mmcv.utils import Config
from lib.model.pspnet import pspnet_res18, pspnet_res101
from lib.model.flownet import FlowNets
from lib.model.warpnet import warp
class Accel18(nn.Module):
def __init__(self, num_classes=19, weight_res18=None, weight_res101=None, weight_flownet=None):
super(Accel18, self).__init__()
self.net_ref = pspnet_res101()
self.net_update = pspnet_res18()
self.merge = nn.Conv2d(num_classes * 2, num_classes, kernel_size=1, stride=1, padding=0)
self.flownet = FlowNets()
self.warp = warp()
self.weight_init(weight_res18, weight_res101, weight_flownet)
self.criterion_semantic = nn.CrossEntropyLoss(ignore_index=255)
def weight_init(self, weight_res18, weight_res101, weight_flownet):
if weight_res18 is not None:
weight = torch.load(weight_res18, map_location='cpu')
weight = weight['state_dict']
self.net_update.load_state_dict(weight, True)
self.net_update.fix_backbone()
if weight_res101 is not None:
weight = torch.load(weight_res101, map_location='cpu')
weight = weight['state_dict']
self.net_ref.load_state_dict(weight, True)
self.net_ref.fix_backbone()
if weight_flownet is not None:
weight = torch.load(weight_flownet, map_location='cpu')
self.flownet.load_state_dict(weight, True)
nn.init.xavier_normal_(self.merge.weight)
self.merge.bias.data.fill_(0)
print('pretrained weight loaded')
def forward(self, im_seg_list, im_flow_list, gt=None):
n, c, t, h, w = im_seg_list.shape
pred = self.net_ref(im_seg_list[:, :, 0, :, :])
pred = F.interpolate(pred, scale_factor=2, mode='bilinear', align_corners=False)
for i in range(t - 1):
flow = self.flownet(torch.cat([im_flow_list[:, :, i + 1, :, :], im_flow_list[:, :, i, :, :]], dim=1))
pred = self.warp(pred, flow)
pred_update = self.net_update(im_seg_list[:, :, -1, :, :])
pred_update = F.interpolate(pred_update, scale_factor=2, mode='bilinear', align_corners=False)
pred_merge = self.merge(torch.cat([pred, pred_update], dim=1))
pred_merge = F.interpolate(pred_merge, scale_factor=4, mode='bilinear', align_corners=False)
if gt is not None:
loss = self.criterion_semantic(pred_merge, gt)
loss = loss.unsqueeze(0)
return loss
else:
return pred_merge
def evaluate(self, im_seg_list, im_flow_list):
out_list = []
t = im_seg_list.shape[2]
pred = self.net_ref(im_seg_list[:, :, 0, :, :])
pred = F.interpolate(pred, scale_factor=2, mode='bilinear', align_corners=False)
out = F.interpolate(pred, scale_factor=4, mode='bilinear', align_corners=False)
out = torch.argmax(out, dim=1)
out_list.append(out)
for i in range(t - 1):
flow = self.flownet(torch.cat([im_flow_list[:, :, i + 1, :, :], im_flow_list[:, :, i, :, :]], dim=1))
pred = self.warp(pred, flow)
pred_update = self.net_update(im_seg_list[:, :, -1, :, :])
pred_update = F.interpolate(pred_update, scale_factor=2, mode='bilinear', align_corners=False)
pred_merge = self.merge(torch.cat([pred, pred_update], dim=1))
pred_merge = F.interpolate(pred_merge, scale_factor=4, mode='bilinear', align_corners=False)
out = torch.argmax(pred_merge, dim=1)
out_list.append(out)
return out_list
def set_train(self):
self.net_ref.eval()
self.net_ref.model.decode_head.conv_seg.train()
self.net_update.eval()
self.net_update.model.decode_head.conv_seg.train()
self.flownet.train()
self.merge.train()
if __name__ == '__main__':
model = Accel18(weight_res18=None, weight_res101=None, weight_flownet=None)
model.cuda().eval()
im_seg_list = torch.rand([1, 3, 5, 512, 1024]).cuda()
im_flow_list = torch.rand([1, 3, 5, 512, 1024]).cuda()
with torch.no_grad():
out_list = model.evaluate(im_seg_list, im_flow_list)
print(len(out_list), out_list[0].shape)
```
#### File: pipelines/semi/loading.py
```python
import os.path as osp
import mmcv
import numpy as np
from mmseg.datasets.builder import PIPELINES
@PIPELINES.register_module()
class LoadImageFromFile_Semi(object):
def __init__(
self, to_float32=False, color_type='color', file_client_args=dict(backend='disk'), imdecode_backend='cv2'
):
self.to_float32 = to_float32
self.color_type = color_type
self.file_client_args = file_client_args.copy()
self.file_client = None
self.imdecode_backend = imdecode_backend
def __call__(self, results):
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results.get('img_prefix') is not None:
filenames_v0_0 = osp.join(results['img_prefix'], results['img_info_v0_0']['filename'])
filenames_v0_1 = osp.join(results['img_prefix'], results['img_info_v0_1']['filename'])
filenames_v1_0 = osp.join(results['img_prefix'], results['img_info_v1_0']['filename'])
filenames_v1_1 = osp.join(results['img_prefix'], results['img_info_v1_1']['filename'])
else:
filenames_v0_0 = results['img_info_v0_0']['filename']
filenames_v0_1 = results['img_info_v0_1']['filename']
filenames_v1_0 = results['img_info_v1_0']['filename']
filenames_v1_1 = results['img_info_v1_1']['filename']
img_bytes = self.file_client.get(filenames_v0_0)
img = mmcv.imfrombytes(img_bytes, flag=self.color_type, backend=self.imdecode_backend)
if self.to_float32:
img = img.astype(np.float32)
results['img_v0_0'] = img
img_bytes = self.file_client.get(filenames_v0_1)
img = mmcv.imfrombytes(img_bytes, flag=self.color_type, backend=self.imdecode_backend)
if self.to_float32:
img = img.astype(np.float32)
results['img_v0_1'] = img
img_bytes = self.file_client.get(filenames_v1_0)
img = mmcv.imfrombytes(img_bytes, flag=self.color_type, backend=self.imdecode_backend)
if self.to_float32:
img = img.astype(np.float32)
results['img_v1_0'] = img
img_bytes = self.file_client.get(filenames_v1_1)
img = mmcv.imfrombytes(img_bytes, flag=self.color_type, backend=self.imdecode_backend)
if self.to_float32:
img = img.astype(np.float32)
results['img_v1_1'] = img
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results['img_norm_cfg'] = dict(
mean=np.zeros(num_channels, dtype=np.float32), std=np.ones(num_channels, dtype=np.float32), to_rgb=False
)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(to_float32={self.to_float32},'
repr_str += f"color_type='{self.color_type}',"
repr_str += f"imdecode_backend='{self.imdecode_backend}')"
return repr_str
@PIPELINES.register_module()
class LoadAnnotations_Semi(object):
def __init__(self, reduce_zero_label=False, file_client_args=dict(backend='disk'), imdecode_backend='pillow'):
self.reduce_zero_label = reduce_zero_label
self.file_client_args = file_client_args.copy()
self.file_client = None
self.imdecode_backend = imdecode_backend
def __call__(self, results):
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results.get('seg_prefix', None) is not None:
filename = osp.join(results['seg_prefix'], results['img_info_v0_0']['ann'])
else:
filename = results['img_info_v0_0']['ann']
img_bytes = self.file_client.get(filename)
gt = mmcv.imfrombytes(img_bytes, flag='unchanged', backend=self.imdecode_backend).squeeze().astype(np.uint8)
# modify if custom classes
if results.get('label_map', None) is not None:
for old_id, new_id in results['label_map'].items():
gt[gt == old_id] = new_id
# reduce zero_label
if self.reduce_zero_label:
# avoid using underflow conversion
gt[gt == 0] = 255
gt = gt - 1
gt[gt == 254] = 255
results['gt'] = gt
results['seg_fields'].append('gt')
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(reduce_zero_label={self.reduce_zero_label},'
repr_str += f"imdecode_backend='{self.imdecode_backend}')"
return repr_str
```
|
{
"source": "jfzhuang/ST_Memory",
"score": 2
}
|
#### File: MAR/python/test.py
```python
import os
import sys
import cv2
import random
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.metrics import runningScore
from lib.datasets.CityscapesDataset import CityscapesDataset
from exp_new.cityscapes.psp50.MAR.model.refine import SegRefine
from mmseg.models import build_segmentor
from mmcv.utils import Config
def get_arguments():
parser = argparse.ArgumentParser(description="Test the DAVSS")
###### general setting ######
parser.add_argument("--data_path", type=str, help="path to the data")
parser.add_argument("--im_path", type=str, help="path to the images")
parser.add_argument("--model_weight", type=str, help="path to the trained model")
###### inference setting ######
parser.add_argument("--num_workers", type=int, help="num of cpus used")
return parser.parse_args()
def test():
args = get_arguments()
print(args)
net = SegRefine()
weight = torch.load(args.model_weight, map_location='cpu')
net.load_state_dict(weight, True)
net.cuda().eval()
test_data = CityscapesDataset(args.data_path, args.im_path, 'val')
test_data_loader = torch.utils.data.DataLoader(test_data, batch_size=1, shuffle=False, num_workers=args.num_workers)
miou_cal = runningScore(n_classes=19)
with torch.no_grad():
for step, data in enumerate(test_data_loader):
print('{}/{}'.format(step, len(test_data_loader)))
im_list, gt = data
for i in range(len(im_list)):
im_list[i] = im_list[i].cuda()
gt = gt.squeeze().numpy()
pred = net(im_list)
out = torch.argmax(pred, dim=1)
out = out.squeeze().cpu().numpy()
miou_cal.update(gt, out)
# if step == 10:
# break
miou, iou = miou_cal.get_scores()
miou_cal.reset()
print('miou:{}'.format(miou))
for i in range(len(iou)):
print(iou[i])
if __name__ == '__main__':
test()
```
#### File: STF/model/refine.py
```python
import torch
from torch import nn
import torch.nn.functional as F
from lib.net.transformer import STF, Transformer
from mmseg.models import build_segmentor
from mmcv.utils import Config
class SegRefine(nn.Module):
def __init__(self, num_classes=19):
super().__init__()
self.num_classes = num_classes
config_path = '/code/mmsegmentation/configs/pspnet_r50-d8.py'
cfg = Config.fromfile(config_path)
self.pspnet = build_segmentor(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
self.STF = STF(dim=512, depth=1, heads=2, mlp_dim=256, pe_dim=(128, 256), use_checkpoint=True)
self.semantic_loss = nn.CrossEntropyLoss(ignore_index=255)
self.weight_init()
def weight_init(self):
weight_path = '/data/jfzhuang/VSPW_480p/work_dirs_v2/psp50_v2/psp50_trained.pth'
weight = torch.load(weight_path, map_location='cpu')
new_weight = {}
for k, v in weight.items():
k = k.replace('pspnet.', '')
new_weight[k] = v
del new_weight['decode_head.conv_seg.weight']
del new_weight['decode_head.conv_seg.bias']
self.pspnet.load_state_dict(new_weight, False)
for p in self.STF.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, im_list, gt=None):
with torch.no_grad():
feat_0 = self.pspnet.backbone(im_list[0])
feat_0 = self.pspnet.decode_head(feat_0, return_feat=True)
feat_0 = feat_0.unsqueeze(2)
feat_1 = self.pspnet.backbone(im_list[1])
feat_1 = self.pspnet.decode_head(feat_1, return_feat=True)
feat_1 = feat_1.unsqueeze(2)
feat_2 = self.pspnet.backbone(im_list[2])
feat_2 = self.pspnet.decode_head(feat_2, return_feat=True)
feat_2 = feat_2.unsqueeze(2)
feat_query = self.STF(feat_0, feat_1, feat_2)
feat_query = self.pspnet.decode_head.cls_seg(feat_query)
feat_query = F.interpolate(feat_query, scale_factor=8, mode='bilinear')
if gt is not None:
loss = self.semantic_loss(feat_query, gt)
loss = loss.unsqueeze(0)
return loss
return feat_query
def forward_return_feat(self, im_list):
im_feat = []
with torch.no_grad():
feat_0 = self.pspnet.backbone(im_list[0])
feat_0 = self.pspnet.decode_head(feat_0, return_feat=True)
feat_0 = feat_0.unsqueeze(2)
feat_1 = self.pspnet.backbone(im_list[1])
feat_1 = self.pspnet.decode_head(feat_1, return_feat=True)
feat_1 = feat_1.unsqueeze(2)
feat_2 = self.pspnet.backbone(im_list[2])
feat_2 = self.pspnet.decode_head(feat_2, return_feat=True)
feat_2 = feat_2.unsqueeze(2)
feat_query = self.STF(feat_0, feat_1, feat_2)
logits = self.pspnet.decode_head.cls_seg(feat_query)
return feat_query, logits
if __name__ == '__main__':
model = SegRefine()
model.cuda().eval()
im_list = []
for i in range(3):
im = torch.zeros([1, 3, 1024, 2048]).cuda()
im_list.append(im)
# with torch.no_grad():
while True:
out = model(im_list)
print(out.shape)
```
#### File: STF/python/train.py
```python
import os
import ast
import random
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.distributed as dist
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from lib.metrics import runningScore
from lib.datasets.CityscapesDataset import CityscapesDataset
from exp_new.cityscapes.psp50.STF.model.refine import SegRefine
def get_arguments():
parser = argparse.ArgumentParser(description="Train Deeplabv3+")
###### general setting ######
parser.add_argument("--exp_name", type=str, help="exp name")
parser.add_argument("--root_data_path", type=str, help="root path to the dataset")
parser.add_argument("--root_im_path", type=str, help="root path to the image")
parser.add_argument("--crop_size", type=int, default=768, help="crop_size")
###### training setting ######
parser.add_argument("--resume", type=ast.literal_eval, default=False, help="resume or not")
parser.add_argument("--resume_epoch", type=int, help="from which epoch for resume")
parser.add_argument("--resume_load_path", type=str, help="resume model load path")
parser.add_argument("--train_load_path", type=str, help="train model load path")
parser.add_argument("--lr", type=float, help="learning rate")
parser.add_argument("--random_seed", type=int, help="random seed")
parser.add_argument("--train_power", type=float, help="power value for linear learning rate schedule")
parser.add_argument("--momentum", type=float, help="momentum")
parser.add_argument("--weight_decay", type=float, help="weight_decay")
parser.add_argument("--train_batch_size", type=int, help="train batch size")
parser.add_argument("--train_num_workers", type=int, default=8, help="num cpu use")
parser.add_argument("--num_epoch", type=int, default=100, help="num of epoch in training")
parser.add_argument("--snap_shot", type=int, default=1, help="save model every per snap_shot")
parser.add_argument("--model_save_path", type=str, help="model save path")
###### testing setting ######
parser.add_argument("--val_batch_size", type=int, default=1, help="batch_size for validation")
parser.add_argument("--val_num_workers", type=int, default=4, help="num of used cpus in validation")
###### tensorboard setting ######
parser.add_argument("--use_tensorboard", type=ast.literal_eval, default=True, help="use tensorboard or not")
parser.add_argument("--tblog_dir", type=str, help="log save path")
parser.add_argument("--tblog_interval", type=int, default=50, help="interval for tensorboard logging")
return parser.parse_args()
def make_dirs(args):
if args.use_tensorboard and not os.path.exists(args.tblog_dir):
os.makedirs(args.tblog_dir)
if not os.path.exists(args.model_save_path):
os.makedirs(args.model_save_path)
def train():
args = get_arguments()
print(args)
make_dirs(args)
random_seed = args.random_seed
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
np.random.seed(random_seed)
random.seed(random_seed)
print('random seed:{}'.format(random_seed))
tblogger = SummaryWriter(args.tblog_dir)
train_dataset = CityscapesDataset(args.root_data_path, args.root_im_path, 'train', resize_size=(768, 1536))
train_dataloader = DataLoader(
train_dataset,
batch_size=args.train_batch_size,
shuffle=True,
pin_memory=False,
num_workers=args.train_num_workers,
drop_last=True,
persistent_workers=True,
)
val_dataset = CityscapesDataset(args.root_data_path, args.root_im_path, 'val')
val_dataloader = DataLoader(
val_dataset,
batch_size=args.val_batch_size,
shuffle=False,
pin_memory=False,
num_workers=args.val_num_workers,
drop_last=False,
persistent_workers=True,
)
net = SegRefine()
if args.resume:
old_weight = torch.load(args.resume_load_path, map_location='cpu')
start_epoch = args.resume_epoch
new_weight = {}
for k, v in old_weight.items():
k = k.replace('module.', '')
new_weight[k] = v
net.load_state_dict(new_weight, strict=True)
else:
start_epoch = 0
print('Successful loading model!')
params = []
for p in net.STF.parameters():
params.append(p)
for p in net.pspnet.decode_head.conv_seg.parameters():
params.append(p)
optimizer = optim.AdamW(params, lr=args.lr, weight_decay=args.weight_decay)
net.cuda()
net = torch.nn.DataParallel(net)
running_loss = 0.0
miou_cal = runningScore(n_classes=19)
current_miou = 0
itr = start_epoch * len(train_dataloader)
max_itr = args.num_epoch * len(train_dataloader)
criterion_semantic = nn.CrossEntropyLoss(ignore_index=255)
for epoch in range(start_epoch, args.num_epoch):
net.train()
net.module.pspnet.eval()
for step, data in enumerate(train_dataloader):
im_list, gt = data
for i in range(len(im_list)):
im_list[i] = im_list[i].cuda()
gt = gt.cuda()
optimizer.zero_grad()
# pred = net(im_list)
# loss_semantic = criterion_semantic(pred, gt)
loss_semantic = net(im_list, gt)
loss_semantic = loss_semantic.mean()
loss_semantic.backward()
torch.nn.utils.clip_grad_norm_(params, 0.1)
optimizer.step()
print('epoch:{}/{} batch:{}/{} itr:{} loss:{:02f}'.format(epoch, args.num_epoch, step,
len(train_dataloader), itr,
loss_semantic.item()))
if args.use_tensorboard and itr % args.tblog_interval == 0:
tblogger.add_scalar('data/loss', loss_semantic.item(), itr)
itr += 1
# if step == 20:
# break
if (epoch + 1) % args.snap_shot == 0:
net.eval()
for step, data in enumerate(val_dataloader):
print('{}/{}'.format(step, len(val_dataloader)))
im_list, gt = data
gt = gt.numpy()
for i in range(len(im_list)):
im_list[i] = im_list[i].cuda()
with torch.no_grad():
pred = net(im_list)
out = torch.argmax(pred, dim=1)
out = out.cpu().numpy()
miou_cal.update(gt, out)
# if step == 10:
# break
miou = miou_cal.get_scores()
miou_cal.reset()
save_path = os.path.join(args.model_save_path, 'epoch_{}.pth'.format(epoch))
torch.save(net.module.state_dict(), save_path)
print('miou:{}'.format(miou))
tblogger.add_scalar('data/mIoU', miou, epoch)
if miou > current_miou:
save_path = os.path.join(args.model_save_path, 'best.pth')
torch.save(net.module.state_dict(), save_path)
current_miou = miou
torch.cuda.empty_cache()
save_path = os.path.join(args.model_save_path, 'last.pth')
torch.save(net.module.state_dict(), save_path)
if args.use_tensorboard:
tblogger.close()
print('%s has been saved' % save_path)
if __name__ == '__main__':
train()
```
#### File: lib/datasets/transform.py
```python
import cv2
import numpy as np
import torch
import random
class Rescale(object):
"""Rescale the image in a sample to a given size.
Args:
output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_size, is_continuous=False, fix=False):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
self.output_size = output_size
self.seg_interpolation = cv2.INTER_CUBIC if is_continuous else cv2.INTER_NEAREST
self.fix = fix
def __call__(self, sample):
image = sample['image']
h, w = image.shape[:2]
if self.output_size == (h, w):
return sample
if self.fix:
h_rate = self.output_size[0] / h
w_rate = self.output_size[1] / w
min_rate = h_rate if h_rate < w_rate else w_rate
new_h = h * min_rate
new_w = w * min_rate
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
img = cv2.resize(image, dsize=(new_w, new_h), interpolation=cv2.INTER_CUBIC)
top = (self.output_size[0] - new_h) // 2
bottom = self.output_size[0] - new_h - top
left = (self.output_size[1] - new_w) // 2
right = self.output_size[1] - new_w - left
if self.fix:
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0, 0, 0])
if 'segmentation' in sample.keys():
segmentation = sample['segmentation']
seg = cv2.resize(segmentation, dsize=(new_w, new_h), interpolation=self.seg_interpolation)
if self.fix:
seg = cv2.copyMakeBorder(seg, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0])
sample['segmentation'] = seg
sample['image'] = img
return sample
class Centerlize(object):
def __init__(self, output_size, is_continuous=False):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
self.seg_interpolation = cv2.INTER_CUBIC if is_continuous else cv2.INTER_NEAREST
def __call__(self, sample):
image = sample['image']
h, w = image.shape[:2]
if self.output_size == (h, w):
return sample
if isinstance(self.output_size, int):
new_h = self.output_size
new_w = self.output_size
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
top = (new_h-h) // 2
bottom = new_h - h - top
left = (new_w-w) // 2
right = new_w - w - left
img = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0, 0, 0])
if 'segmentation' in sample.keys():
segmentation = sample['segmentation']
seg = cv2.copyMakeBorder(segmentation, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0])
sample['segmentation'] = seg
sample['image'] = img
return sample
class RandomCrop(object):
"""Crop randomly the image in a sample.
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
image, segmentation = sample['image'], sample['segmentation']
h, w = image.shape[:2]
new_h, new_w = self.output_size
new_h = h if new_h >= h else new_h
new_w = w if new_w >= w else new_w
top = np.random.randint(0, h - new_h + 1)
left = np.random.randint(0, w - new_w + 1)
image = image[top:top + new_h, left:left + new_w]
segmentation = segmentation[top:top + new_h, left:left + new_w]
sample['image'] = image
sample['segmentation'] = segmentation
return sample
class RandomScaleCrop(object):
"""Randomly scale image and crop"""
def __init__(self, size):
self.crop_size = size
def __call__(self, sample):
image, segmentation = sample['image'], sample['segmentation']
row, col, _ = image.shape
scale_list = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0]
scale = scale_list[random.randint(0, 6)]
resized_row, resized_col = int(row * scale), int(col * scale)
img = cv2.resize(image, (resized_col, resized_row), interpolation=cv2.INTER_LINEAR)
seg = cv2.resize(segmentation, (resized_col, resized_row), interpolation=cv2.INTER_NEAREST)
crop_row = self.crop_size[0]
crop_col = self.crop_size[1]
pad_row = max(crop_row - resized_row, 0)
pad_col = max(crop_col - resized_col, 0)
img = np.pad(img, ((0, pad_row), (0, pad_col), (0, 0)), 'constant', constant_values=0.0)
seg = np.pad(seg, ((0, pad_row), (0, pad_col)), 'constant', constant_values=255)
row, col, _ = img.shape
crop_x = random.randint(0, col - crop_col)
crop_y = random.randint(0, row - crop_row)
img = img[crop_y:crop_y + crop_row, crop_x:crop_x + crop_col, :]
seg = seg[crop_y:crop_y + crop_row, crop_x:crop_x + crop_col]
sample['image'] = img
sample['segmentation'] = seg
return sample
class RandomHSV(object):
"""Generate randomly the image in hsv space."""
def __init__(self, h_r, s_r, v_r):
self.h_r = h_r
self.s_r = s_r
self.v_r = v_r
def __call__(self, sample):
image = sample['image']
hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
h = hsv[:, :, 0].astype(np.int32)
s = hsv[:, :, 1].astype(np.int32)
v = hsv[:, :, 2].astype(np.int32)
delta_h = np.random.randint(-self.h_r, self.h_r)
delta_s = np.random.randint(-self.s_r, self.s_r)
delta_v = np.random.randint(-self.v_r, self.v_r)
h = (h+delta_h) % 180
s = s + delta_s
s[s > 255] = 255
s[s < 0] = 0
v = v + delta_v
v[v > 255] = 255
v[v < 0] = 0
hsv = np.stack([h, s, v], axis=-1).astype(np.uint8)
image = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB).astype(np.uint8)
sample['image'] = image
return sample
class RandomFlip(object):
"""Randomly flip image"""
def __init__(self, threshold):
self.flip_t = threshold
def __call__(self, sample):
image, segmentation = sample['image'], sample['segmentation']
if np.random.rand() < self.flip_t:
image_flip = np.flip(image, axis=1)
segmentation_flip = np.flip(segmentation, axis=1)
sample['image'] = image_flip
sample['segmentation'] = segmentation_flip
return sample
class RandomRotation(object):
"""Randomly rotate image"""
def __init__(self, angle_r, is_continuous=False):
self.angle_r = angle_r
self.seg_interpolation = cv2.INTER_CUBIC if is_continuous else cv2.INTER_NEAREST
def __call__(self, sample):
image, segmentation = sample['image'], sample['segmentation']
row, col, _ = image.shape
rand_angle = np.random.randint(-self.angle_r, self.angle_r) if self.angle_r != 0 else 0
m = cv2.getRotationMatrix2D(center=(col / 2, row / 2), angle=rand_angle, scale=1)
new_image = cv2.warpAffine(image, m, (col, row), flags=cv2.INTER_CUBIC, borderValue=0)
new_segmentation = cv2.warpAffine(segmentation, m, (col, row), flags=self.seg_interpolation, borderValue=0)
sample['image'] = new_image
sample['segmentation'] = new_segmentation
return sample
class RandomScale(object):
"""Randomly scale image"""
def __init__(self, scale_r, is_continuous=False):
self.scale_r = scale_r
self.seg_interpolation = cv2.INTER_CUBIC if is_continuous else cv2.INTER_NEAREST
def __call__(self, sample):
image, segmentation = sample['image'], sample['segmentation']
row, col, _ = image.shape
rand_scale = np.random.rand() * (self.scale_r - 1 / self.scale_r) + 1 / self.scale_r
img = cv2.resize(image, None, fx=rand_scale, fy=rand_scale, interpolation=cv2.INTER_CUBIC)
seg = cv2.resize(segmentation, None, fx=rand_scale, fy=rand_scale, interpolation=self.seg_interpolation)
sample['image'] = img
sample['segmentation'] = seg
return sample
class Multiscale(object):
def __init__(self, rate_list):
self.rate_list = rate_list
def __call__(self, sample):
image = sample['image']
row, col, _ = image.shape
image_multiscale = []
for rate in self.rate_list:
rescaled_image = cv2.resize(image, None, fx=rate, fy=rate, interpolation=cv2.INTER_CUBIC)
sample['image_%f' % rate] = rescaled_image
return sample
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
key_list = sample.keys()
for key in key_list:
if 'image' in key:
image = sample[key]
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = image.transpose((2, 0, 1))
sample[key] = torch.from_numpy(image.astype(np.float32) / 255.0)
#sample[key] = torch.from_numpy(image.astype(np.float32)/128.0-1.0)
elif 'segmentation' == key:
segmentation = sample['segmentation']
sample['segmentation'] = torch.from_numpy(segmentation.astype(np.int64))
elif 'segmentation_onehot' == key:
onehot = sample['segmentation_onehot'].transpose((2, 0, 1))
sample['segmentation_onehot'] = torch.from_numpy(onehot.astype(np.int64))
elif 'mask' == key:
mask = sample['mask']
sample['mask'] = torch.from_numpy(mask.astype(np.int64))
return sample
def onehot(label, num):
m = label
one_hot = np.eye(num)[m]
return one_hot
def Apollo_gt_convert(label):
class_table = [[17, 0], [33, 1], [34, 2], [35, 3], [36, 4], [37, 5], [38, 6], [39, 7], [40, 8], [49, 9], [50, 10],
[65, 11], [66, 12], [67, 13], [81, 14], [82, 15], [83, 16], [84, 17], [85, 255], [86, 255], [97, 18],
[98, 255], [99, 255], [100, 19], [113, 20], [0, 255]]
class_id = [
0, 1, 17, 33, 161, 34, 162, 35, 163, 36, 164, 37, 165, 38, 166, 39, 167, 40, 168, 49, 50, 65, 66, 67, 81, 82,
83, 84, 85, 86, 97, 98, 99, 100, 113, 255
]
train_id = [
255, 255, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 255,
255, 255, 21, 255
]
class_table = [[0, 255], [1, 255], [17, 0], [33, 1], [161, 1], [34, 2], [162, 2], [35, 3], [163, 3], [36, 4],
[164, 4], [37, 5], [165, 5], [38, 6], [166, 6], [39, 7], [167, 7], [40, 8], [168, 8], [49, 9],
[50, 10], [65, 11], [66, 12], [67, 13], [81, 14], [82, 15], [83, 16], [84, 17], [85, 18], [86, 19],
[97, 20], [98, 255], [99, 255], [100, 255], [113, 21]]
out = np.zeros([label.shape[0], label.shape[1]], dtype=np.uint8)
for i in range(len(class_table)):
mask = label == class_table[i][0]
out += mask.astype(np.uint8) * class_table[i][1]
return out
```
|
{
"source": "jfzhuang/VIM-LPR",
"score": 2
}
|
#### File: jfzhuang/VIM-LPR/test.py
```python
import os
import cv2
import time
import numpy as np
import torch
import torchvision.transforms.functional as TF
from model.lpr import BiSeNet
import argparse
def isEqual(labelGT, labelP, num_char):
compare = [1 if int(labelGT[i]) == int(labelP[i]) else 0 for i in range(num_char)]
return sum(compare)
def transform(img):
img = cv2.resize(img, (160, 50))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = np.expand_dims(img, axis=2)
img = np.concatenate((img, img, img), axis=2)
img = TF.to_tensor(img)
img = torch.unsqueeze(img, dim=0)
return img
def main():
# basic parameters
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default=None, required=True, help='dataset to test')
parser.add_argument('--backbone', type=str, default=None, required=True, help='backbone, select from "resnet18, resnet34, resnet50, resnet101"')
parser.add_argument('--weightfile', type=str, default=None, required=True, help='path to weightfile')
args = parser.parse_args()
num_char = 7
num_class = 35
img_path = os.path.join(os.getcwd(), "dataset", args.dataset, 'image')
char_path = os.path.join(os.getcwd(), "dataset", args.dataset, 'char')
model = BiSeNet(num_class, num_char, args.backbone).cuda()
model.load_state_dict(torch.load(args.weightfile), True)
model.eval()
name_list = os.listdir(img_path)
count, correct = 0, 0
for i, name in enumerate(name_list):
count += 1
name = os.path.splitext(name)[0]
char = np.loadtxt(os.path.join(char_path, name + '.txt'), dtype=int)
img = cv2.imread(os.path.join(img_path, name + '.png'))
img = transform(img)
img = img.cuda()
with torch.no_grad():
string_pred = model(img)
string_pred = [x.data.cpu().numpy().tolist() for x in string_pred]
string_pred = [y[0].index(max(y[0])) for y in string_pred]
if isEqual(string_pred, char, num_char-1) == num_char-1:
correct += 1
print('precision:{}'.format(float(correct) / count))
def test_fps():
model = BiSeNet(36, 'resnet101')
model = model.cuda()
model.eval()
total = 0.0
num = 5000
data = torch.rand([1, 3, 50, 160]).cuda()
with torch.no_grad():
for i in range(100):
print(i)
_ = model(data)
for i in range(num):
print(i)
t1 = time.time()
_ = model(data)
t2 = time.time()
total += t2 - t1
print('num:{} total_time:{}s avg_time:{}s'.format(num, total, total / num))
if __name__ == '__main__':
main()
```
|
{
"source": "jfz/iep-apps",
"score": 2
}
|
#### File: src/scripts/lift-data.py
```python
import argparse
import gzip
import json
import pprint
from argparse import Namespace
from datetime import datetime
from typing import Dict, List
import boto3
import requests
import sys
from boto3.dynamodb.types import Binary
from botocore.exceptions import ClientError, ProfileNotFound
def parse_args() -> Namespace:
parser = argparse.ArgumentParser(description='Lift slotting data from Edda into DynamoDB')
parser.add_argument('--profile', type=str, required=True,
help='AWS credentials profile used to write to the Atlas Slotting DynamoDB table')
parser.add_argument('--region', type=str, nargs='+', required=True,
choices=['eu-west-1', 'us-east-1', 'us-west-1', 'us-west-2'],
help='List of AWS regions where data will be lifted from Edda into DynamoDB')
parser.add_argument('--edda_name', type=str, required=True,
help='Edda DNS name, with a region placeholder, where data will be read')
parser.add_argument('--slotting_table', type=str, required=True,
help='Atlas Slotting DynamoDB table name, where data will be written')
parser.add_argument('--app_name', type=str, nargs='+', required=True,
help='List of application names that will be lifted')
parser.add_argument('--dryrun', action='store_true', required=False, default=False,
help='Enable dryrun mode, to preview changes')
return parser.parse_args()
def get_edda_data(args: Namespace, region: str) -> List[Dict]:
url = f'http://{args.edda_name.format(region)}/api/v2/group/autoScalingGroups;_expand'
r = requests.get(url)
if not r.ok:
print(f'ERROR: Failed to load Edda data from {url}')
sys.exit(1)
else:
return [asg for asg in r.json() if asg['name'].split('-')[0] in args.app_name]
def get_ddb_table(args: Namespace, region: str):
try:
session = boto3.session.Session(profile_name=args.profile)
except ProfileNotFound:
print(f'ERROR: AWS profile {args.profile} does not exist')
sys.exit(1)
dynamodb = session.resource('dynamodb', region_name=region)
table = dynamodb.Table(args.slotting_table)
try:
table.table_status
except ClientError as e:
code = e.response['Error']['Code']
if code == 'ExpiredTokenException':
print(f'ERROR: Security token in AWS profile {args.profile} has expired')
elif code == 'ResourceNotFoundException':
print(f'ERROR: Table {args.slotting_table} does not exist in {region}')
else:
pprint.pprint(e.response)
sys.exit(1)
return table
def lift_data(args: Namespace, region: str):
asgs = get_edda_data(args, region)
table = get_ddb_table(args, region)
for asg in asgs:
item = {
'name': asg['name'],
'active': True,
'data': Binary(gzip.compress(bytes(json.dumps(asg), encoding='utf-8'))),
'timestamp': int(datetime.utcnow().timestamp() * 1000)
}
if args.dryrun:
print(f'DRYRUN: PUT {asg["name"]}')
else:
print(f'PUT {asg["name"]}')
table.put_item(Item=item)
def main():
args = parse_args()
print('==== config ====')
print(f'AWS Profile: {args.profile}')
print(f'Source Edda: {args.edda_name}')
print(f'Destination Table: {args.slotting_table}')
for region in args.region:
print(f'==== {region} ====')
lift_data(args, region)
if __name__ == "__main__":
main()
```
|
{
"source": "jg10545/cleanup",
"score": 3
}
|
#### File: cleanup/cleanup/load.py
```python
import pandas as pd
import sklearn.model_selection
import logging
def load_and_preprocess(filepath, target_col="price", split=0.1):
"""
Load a CSV file containing our data, preprocess it, and split into train
and test components.
Parameters
----------
filepath: string
path to CSV file
target_col: string, optional
name of column containing our dependent variable
split: float, optional
fraction of data to devote to testing
Returns
-------
numpy array
2D numpy array of training examples
numpy array
2D numpy array of test examples
numpy array
1D numpy array of training labels
numpy array
1D numpy array of test labels
"""
# <--- snip
df = pd.read_csv(filepath)
for c in ["Latitude", "Longitude", "lat", "lon", "latitude", "longitude", "Lat", "Lon"]:
if c in df.columns:
logging.info(f"removing column {c}")
df = df.drop(c, 1)
if target_col not in df.columns:
logging.error(f"target column {target_col} isn't even in this dataset. how are you so basic?")
Y = df[target_col].values
X = df[[c for c in df.columns if c != target_col]].values
X_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X, Y,
test_size=split)
logging.debug("X_train:, %s"%str(X_train.shape))
logging.debug("X_test:, %s"%str(X_test.shape))
logging.debug("Y_train:, %s"%str(Y_train.shape))
logging.debug("Y_test:, %s"%str(Y_test.shape))
# <--- end snip
return X_train, X_test, Y_train, Y_test
```
#### File: cleanup/tests/test_plot.py
```python
import numpy as np
import matplotlib
import os
from cleanup.plot import build_embedding_figure
def test_build_embedding_figure_returns_fig():
N = 100
embeddings = np.random.normal(0, 1, (N,2))
residuals = np.random.normal(0, 2, (N,))
fig = build_embedding_figure(embeddings, residuals)
assert isinstance(fig, matplotlib.figure.Figure)
def test_build_embedding_figure_saves_fig(tmpdir):
N = 100
embeddings = np.random.normal(0, 1, (N,2))
residuals = np.random.normal(0, 2, (N,))
saveloc = os.path.join(tmpdir, "fig.jpg")
output = build_embedding_figure(embeddings, residuals, save=saveloc)
assert output is None
```
|
{
"source": "jg10545/decepticon",
"score": 3
}
|
#### File: decepticon/tests/test_util.py
```python
import numpy as np
import tensorflow as tf
from PIL import Image
tf.enable_eager_execution()
from decepticon._util import _load_to_array, _remove_objects
"""
BUILDING TEST COMPONENTS
define some shared fake model pieces
"""
# MASK GENERATOR
inpt = tf.keras.layers.Input((None, None, 3))
output = tf.keras.layers.Conv2D(1, 1, padding="same", activation="sigmoid")(inpt)
maskgen = tf.keras.Model(inpt, output)
# INPAINTER
inp_inpt = tf.keras.layers.Input((None, None, 4))
output = tf.keras.layers.Conv2D(3, 1, activation="sigmoid")(inp_inpt)
inpainter = tf.keras.Model(inp_inpt, output)
def test_load_to_array():
test_arr = np.ones((10, 10), dtype=np.uint8)
test_img = Image.fromarray(test_arr)
for test in [test_arr, test_img]:
output = _load_to_array(test)
assert isinstance(output, np.ndarray)
assert output.dtype == np.float32
assert (output >= 0).all()
assert (output <= 1).all()
def test_remove_objects(test_png_path):
img = Image.open(test_png_path)
reconstructed = _remove_objects(test_png_path, maskgen, inpainter)
assert isinstance(reconstructed, Image.Image)
assert img.size == reconstructed.size
```
#### File: decepticon/decepticon/_training_steps.py
```python
import tensorflow as tf
import tensorflow.keras.backend as K
from decepticon._losses import least_squares_gan_loss, compute_style_loss
from decepticon._losses import total_variation_loss, compute_gradient_penalty
@tf.function
def maskgen_training_step(opt, inpt_img, maskgen, classifier,
inpainter, maskdisc=None, cls_weight=1, exp_weight=0.1,
prior_weight=0.25, tv_weight=0, inpaint=True,
class_prob_min=1e-2):
"""
TensorFlow function to perform one training step on the mask generator.
NOT currently set up for multi-class training.
:opt: keras optimizer
:input_img: batch of input images
:maskgen: mask generator model
:classifier: classifier model
:inpainter: inpainting model
:maskdisc: mask discriminator model, if using one
:cls_weight: weight for classification loss (in paper: 12)
:exp_weight: weight for exponential loss (in paper: 18)
:prior_weight: weight for mask discriminator loss (in paper: 3)
:tv_weight: weight total variation loss (not in paper)
:inpaint: if True, fill in the mask using the inpainter before computing
classification loss (the way they did it in the paper)
:class_prob_min: offset the class probability by this amount when computing
classification loss
Returns
:cls_loss: classification loss for the batch
:exp_loss: exponential loss for the batch
:prior_loss: loss from the mask discriminator for the batch
:tv_loss: total variation loss from the batch
:loss: total weighted loss for the batch
:mask: batch masks (for use in mask buffer)
"""
print("tracing maskgen_training_step")
input_shape = inpt_img.get_shape()
tv_norm = input_shape[1]*input_shape[2]
with tf.GradientTape() as tape:
# predict a mask from the original image and mask it
mask = maskgen(inpt_img)
inverse_mask = 1-mask
masked_inpt = inpt_img*inverse_mask
if inpaint:
# fill in with inpainter
inpainted = inpainter(tf.concat([masked_inpt, mask], 3))
y = masked_inpt + mask*inpainted
else:
y = masked_inpt
# run masked image through classifier
softmax_out = classifier(y)
# compute losses
cls_loss = tf.reduce_mean(-1*tf.math.log(softmax_out[:,0] + class_prob_min))
exp_loss = tf.reduce_mean(
tf.exp(tf.reduce_mean(mask, axis=[1,2,3])))
if (prior_weight > 0) & (maskdisc is not None):
prior_loss = -1*tf.reduce_sum(maskdisc(mask))
else:
prior_loss = 0
if tv_weight > 0:
tv_loss = total_variation_loss(mask)/tv_norm
else:
tv_loss = 0
loss = cls_weight*cls_loss + exp_weight*exp_loss + \
prior_weight*prior_loss + tv_weight*tv_loss
# compute gradients and update
variables = maskgen.trainable_variables
gradients = tape.gradient(loss, variables)
opt.apply_gradients(zip(gradients, variables))
return cls_loss, exp_loss, prior_loss, tv_loss, loss, mask
@tf.function
def mask_discriminator_training_step(maskdisc, mask, prior_sample, opt, gradient_penalty=0):
"""
TensorFlow function to perform one training step for the mask discriminator.
:maskdisc: mask discriminator model
:mask: batch of masks generated by mask generator
:prior_sample: sample from prior distribution over masks
:opt: keras optimizer
:gradient_penalty: weight for wasserstein-GAN gradient penalty
"""
print("tracing mask_discriminator_training_step")
with tf.GradientTape() as tape:
# compute losses with respect to actual masks and samples
# from the mask prior
gen_loss = tf.reduce_mean(maskdisc(mask))
prior_loss = tf.reduce_mean(maskdisc(prior_sample))
if gradient_penalty > 0:
gp = compute_gradient_penalty(mask, prior_sample, maskdisc)
else:
gp = 0
wgan_loss = gen_loss - prior_loss + gradient_penalty*gp
wgan_grads = tape.gradient(wgan_loss, maskdisc.trainable_variables)
opt.apply_gradients(zip(wgan_grads, maskdisc.trainable_variables))
return wgan_loss, gp
@tf.function
def inpainter_training_step(inpaint_opt, disc_opt, inpt_img, mask, inpainter,
disc, recon_weight=100,
disc_weight=2, style_weight=0,
tv_weight=0, style_model=None,
gradient_penalty=0):
"""
TensorFlow function to perform one training step on the inpainter as well
as the inpainter's discriminator.
NOT currently set up for multi-class training.
:inpaint_opt: keras optimizer for inpainter
:disc_opt: keras optimizer for discriminator
:input_img: batch of input images
:mask: batch of masks from mask buffer
:inpainter: inpainting model
:disc: discriminator model
:recon_weight: reconstruction loss weight (equations 6, 9)
:disc_weight: discriminator loss weight (equations 7, 9)
:style_weight: weight for style loss (equations 6, 9)
:tv_weight: weight for total variation loss (equation 9)
:style_model: model to use for computing style representation
Returns
:recon_loss: reconstruction loss for the batch
:disc_loss: inpainter loss from the discriminator
:style_loss: neural style loss for the batch
:tv_loss: total variation loss for the batch
:loss: total weighted loss for the batch
:d_loss: discriminator loss
"""
print("tracing inpainter_training_step")
input_shape = inpt_img.get_shape()
tv_norm = input_shape[1]*input_shape[2]*input_shape[3]
with tf.GradientTape() as inp_tape, tf.GradientTape() as disc_tape:
# predict a mask from the original image and mask it
inverse_mask = 1 - mask
masked_inpt = inpt_img*inverse_mask
# fill in with inpainter
inpainted = inpainter(tf.concat([masked_inpt, mask], 3))
y = masked_inpt + mask*inpainted
# run masked image through discriminator
# note that outputs will be (batch_size, X, Y, 1)
sigmoid_out = disc(y)
# ----- INPAINTER LOSSES --------
# compute losses. style loss only computed if weight is nonzero
recon_loss = tf.reduce_mean(tf.abs(y - inpt_img))
# this is the discriminator's loss ON THE INPAINTER
disc_loss = tf.reduce_mean(-1*tf.math.log(1 - sigmoid_out + K.epsilon()))
if (style_weight > 0)&(style_model is not None):
style_loss = compute_style_loss(inpt_img, y, style_model)
else:
style_loss = 0
if tv_weight > 0:
tv_loss = total_variation_loss(y)/tv_norm
else:
tv_loss = 0
loss = recon_weight*recon_loss + disc_weight*disc_loss + \
style_weight*style_loss + tv_weight*tv_loss
# ----- DISCRIMINATOR LOSSES --------
if gradient_penalty > 0:
gp = compute_gradient_penalty(inpt_img, y, disc)
else:
gp = 0
d_loss = tf.reduce_mean(least_squares_gan_loss(mask, sigmoid_out)) + \
gradient_penalty
# compute gradients and update
inp_variables = inpainter.trainable_variables
inp_gradients = inp_tape.gradient(loss, inp_variables)
inpaint_opt.apply_gradients(zip(inp_gradients, inp_variables))
disc_variables = disc.trainable_variables
disc_gradients = disc_tape.gradient(d_loss, disc_variables)
disc_opt.apply_gradients(zip(disc_gradients, disc_variables))
return recon_loss, disc_loss, style_loss, tv_loss, loss, d_loss, gp
```
|
{
"source": "jg10545/gomap",
"score": 3
}
|
#### File: jg10545/gomap/prune.py
```python
import os
import argparse
import gomap
def main(f, t):
# find a list of folders
#folders = [d.strip() for d in f.split(",")]
# get a list of all the image files in every folder
print("Finding all the images...")
imfiles = gomap.gather_images(f)
#for f in folders:
# imfiles += [f + x for x in os.listdir(f) if "jpg" in x.lower()]
# extract metadata
print("Extracting metadata...")
try:
df = gomap.exif_df_mp(imfiles)
except:
df = gomap.exif_df(imfiles)
print("%s images found"%len(df))
# find the images that are too close together
close = gomap.find_close_images(df, t)
print("Found %s images within %s meters of the previous one"%(len(close), t))
print("\n")
print("Do you want to delete these images? Seriously they'll be gone forever.")
print("\n")
inpt = input("(y/n) . ")
if inpt.lower() == "y":
print("Deleting...")
for f in close["file"].values:
os.system("rm %s"%f)
print("Done")
else:
print("Then I guess we're done here")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Prune one or more directories of images")
parser.add_argument("folder", type=str, help="top-level directory to search through for images")
parser.add_argument("--thresh", type=float, help="pruning distance (in radii)", default=2)
args = parser.parse_args()
main(args.folder, args.thresh)
```
|
{
"source": "jg10545/patchwork",
"score": 3
}
|
#### File: patchwork/feature/_autoencoder.py
```python
import numpy as np
import tensorflow as tf
from patchwork.loaders import dataset
from patchwork.feature._generic import GenericExtractor
def _build_autoencoder(num_channels=3, conv_layers=[32, 48, 64, 128],
dropout=0.5):
"""
"""
# encoder
inpt = tf.keras.layers.Input((None, None, 3))
net = inpt
for c in conv_layers:
net = tf.keras.layers.Conv2D(c, 3, activation="relu",
strides=(2,2), padding="same")(net)
encoder = tf.keras.Model(inpt, net)
# decoder
decoder_inpt = tf.keras.Input((None, None, conv_layers[-1]))
net = decoder_inpt
for c in conv_layers[::-1]:
net = tf.keras.layers.Conv2DTranspose(c, 3,
padding="same",
activation="relu",
strides=(2,2))(net)
net = tf.keras.layers.Conv2D(3, 3, padding="same",
activation="sigmoid")(net)
decoder = tf.keras.Model(decoder_inpt, net)
# full_model
inpt_full = tf.keras.layers.Input((256,256,3))
encoded = encoder(inpt_full)
if dropout > 0:
encoded = tf.keras.layers.SpatialDropout2D(0.5)(encoded)
decoded = decoder(encoded)
full_model = tf.keras.Model(inpt_full, decoded)
return encoder, full_model
def _build_training_step(model, opt):
variables = model.trainable_variables
@tf.function
def training_step(x):
with tf.GradientTape() as tape:
reconstructed = model(x, training=True)
loss = tf.reduce_mean(
tf.keras.losses.mae(x, reconstructed)
)
gradient = tape.gradient(loss, variables)
opt.apply_gradients(zip(gradient, variables))
return {"reconstruction_loss":loss}
return training_step
class AutoEncoderTrainer(GenericExtractor):
"""
Generic convolutional autoencoder
"""
modelname = "Autoencoder"
def __init__(self, logdir, trainingdata, testdata=None, fcn=None, full_model=None,
conv_layers=[32, 48, 64, 128], dropout=0.5,
augment=True,
lr=1e-3, lr_decay=100000,
imshape=(256,256), num_channels=3,
norm=255, batch_size=64, num_parallel_calls=None,
single_channel=False, notes="",
downstream_labels=None):
"""
:logdir: (string) path to log directory
:trainingdata: (list) list of paths to training images
:testdata: (list) filepaths of a batch of images to use for eval
:fcn: (keras Model) fully-convolutional network to train as feature extractor
:full_model: (keras model) full autoencoder
:augment: (dict) dictionary of augmentation parameters, True for defaults or
False to disable augmentation
:lr: (float) initial learning rate
:lr_decay: (int) steps for learning rate to decay by half (0 to disable)
:imshape: (tuple) image dimensions in H,W
:num_channels: (int) number of image channels
:norm: (int or float) normalization constant for images (for rescaling to
unit interval)
:batch_size: (int) batch size for training
:num_parallel_calls: (int) number of threads for loader mapping
:single_channel: if True, expect a single-channel input image and
stack it num_channels times.
:notes: (string) any notes on the experiment that you want saved in the
config.yml file
:downstream_labels: dictionary mapping image file paths to labels
"""
self.logdir = logdir
self.trainingdata = trainingdata
self._downstream_labels = downstream_labels
self._file_writer = tf.summary.create_file_writer(logdir, flush_millis=10000)
self._file_writer.set_as_default()
# build models if necessary
if fcn is None or full_model is None:
print("building new autoencoder")
fcn, full_model = _build_autoencoder(num_channels, conv_layers, dropout)
self.fcn = fcn
self._models = {"fcn":fcn, "full":full_model}
# create optimizer
self._optimizer = self._build_optimizer(lr, lr_decay)
# training dataset
self._train_ds, _ = dataset(trainingdata, imshape=imshape,norm=norm,
sobel=False, num_channels=num_channels,
augment=augment, single_channel=single_channel,
batch_size=batch_size, shuffle=True)
# build evaluation dataset
if testdata is not None:
self._test_ds, self._test_steps = dataset(testdata,
imshape=imshape,norm=norm,
sobel=False, num_channels=num_channels,
single_channel=single_channel,
batch_size=batch_size, shuffle=False,
trainer="autoencoder")
self._test = True
else:
self._test = False
# build training step function- this step makes sure that this object
# has its own @tf.function-decorated training function so if we have
# multiple deepcluster objects (for example, for hyperparameter tuning)
# they won't interfere with each other.
self._training_step = _build_training_step(full_model, self._optimizer)
self.step = 0
# parse and write out config YAML
self._parse_configs(augment=augment, conv_layers=conv_layers,
dropout=dropout, lr=lr,
lr_decay=lr_decay,
imshape=imshape, num_channels=num_channels,
norm=norm, batch_size=batch_size,
num_parallel_calls=num_parallel_calls,
single_channel=single_channel, notes=notes)
def _run_training_epoch(self, **kwargs):
"""
"""
for x in self._train_ds:
lossdict = self._training_step(x)
self._record_scalars(**lossdict)
self.step += 1
def evaluate(self, avpool=True, query_fig=False):
if self._test:
test_recon_loss = 0
for x in self._test_ds:
reconstructed = self._models["full"](x)
test_recon_loss += np.mean(np.abs(x.numpy()-reconstructed.numpy()))
self._record_scalars(test_reconstruction_loss=test_recon_loss/self._test_steps)
test_ims = np.concatenate([x.numpy(), reconstructed.numpy()], 2)
self._record_images(test_images=test_ims)
if self._downstream_labels is not None:
# choose the hyperparameters to record
if not hasattr(self, "_hparams_config"):
from tensorboard.plugins.hparams import api as hp
hparams = {
hp.HParam("dropout", hp.RealInterval(0., 1.)):self.config["dropout"]
}
else:
hparams=None
self._linear_classification_test(hparams, avpool=avpool, query_fig=query_fig)
```
#### File: patchwork/feature/_clip.py
```python
import numpy as np
import tensorflow as tf
import logging
import os
import matplotlib.pyplot as plt
from PIL import Image
from scipy.spatial.distance import cdist
from patchwork._util import compute_l2_loss
from patchwork.feature._generic import GenericExtractor
from patchwork.feature._text_transformer import build_text_transformer
from patchwork.loaders import _image_file_dataset
from patchwork._augment import augment_function
from patchwork.feature._contrastive import _contrastive_loss, _build_negative_mask
try:
import sentencepiece as spm
except:
logging.debug("unable to import sentencepiece- CLIPTrainer won't work.")
def clip_dataset(imfiles, labels, encoder, maxlen=76, imshape=(256,256),
num_channels=3, num_parallel_calls=None, norm=255,
batch_size=256, augment=False, shuffle=True,
single_channel=False):
"""
Build a tf.data.Dataset object for training CLIP
:imfiles: list of paths to training images
:labels: list of strings containing a caption for each image
:encoder: sentencepiece object for mapping strings to byte pair encoded arrays
:maxlen: int; length of sequences. BPE arrays will be padded or truncated to this.
"""
ds = _image_file_dataset(imfiles, ys=labels, imshape=imshape,
augment=augment, shuffle=shuffle)
if augment:
aug_func = augment_function(imshape, {"rot90":True})
def _encode_text(y):
y = str(y)
y = encoder.encode(y, out_type=int, add_bos=True, add_eos=True)
N = len(y)
if N > maxlen:
y = y[:maxlen]
elif N < maxlen:
y += [0]*(maxlen-N)
return np.array(y)
def _augment_and_encode(x,y):
if augment:
x = aug_func(x)
y = tf.py_function(_encode_text, (y,), Tout=tf.int64)
return x,y
ds = ds.map(_augment_and_encode, num_parallel_calls=num_parallel_calls)
ds = ds.batch(batch_size)
ds = ds.prefetch(1)
return ds
def build_image_encoder(fcn, num_channels=3, output_dim=64):
"""
NOT the full version used in OpenAI's paper- just a linear
projection head after the global average pool, instead of
a multi-head attention mechanism
:fcn: fully convolutional network to build off of
:num_channels: int; number of input channels
:output_dim: int; output dimension of final dense layer
"""
inpt = tf.keras.layers.Input((None, None, 3))
x = fcn(inpt)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dense(output_dim)(x)
return tf.keras.Model(inpt, x)
def compute_nce_loss(img_embed, text_embed, temp=0.07, return_acc=False):
"""
Symmetrized NCE loss for paired image/text embeddings
"""
N = img_embed.shape[0]
img_norm = tf.nn.l2_normalize(img_embed, 1)
text_norm = tf.nn.l2_normalize(text_embed, 1)
# NOTE this is different from what's described in the paper- check
# pseudocode in figure 3
logits1 = tf.matmul(img_norm, text_norm, transpose_b=True)/temp
labels1 = tf.range(N)
loss1 = tf.reduce_mean(
tf.losses.sparse_categorical_crossentropy(labels1, logits1, from_logits=True))
logits2 = tf.matmul(text_norm, img_norm, transpose_b=True)/temp
labels2 = tf.range(N)
loss2 = tf.reduce_mean(
tf.losses.sparse_categorical_crossentropy(labels2, logits2, from_logits=True))
loss = 0.5*(loss1 + loss2)
if return_acc:
pred = tf.argmax(logits1, 1)
acc = tf.reduce_mean(tf.cast(tf.cast(pred, tf.int32) == tf.cast(labels1, tf.int32), tf.float32))
return loss, acc
return loss
def build_clip_training_step(img_model, text_model, optimizer, temp=0.07, weight_decay=0):
trainvars = img_model.trainable_variables + text_model.trainable_variables
def trainstep(img_batch, text_batch):
with tf.GradientTape() as tape:
img_embed = img_model(img_batch, training=True)
text_embed = text_model(text_batch, training=True)
nce_loss = compute_nce_loss(img_embed, text_embed, temp)
if weight_decay > 0:
l2_loss = compute_l2_loss(img_model) + compute_l2_loss(text_model)
else:
l2_loss = 0
loss = nce_loss + weight_decay*l2_loss
grads = tape.gradient(loss, trainvars)
optimizer.apply_gradients(zip(grads, trainvars))
lossdict = {"loss":loss, "l2_loss":l2_loss, "nce_loss":nce_loss}
return lossdict
return trainstep
def build_clip_test_step(img_model, text_model, optimizer, temp=0.07, weight_decay=0):
@tf.function
def teststep(img_batch, text_batch):
img_embed = img_model(img_batch, training=False)
text_embed = text_model(text_batch, training=False)
nce_loss, acc = compute_nce_loss(img_embed, text_embed, temp, True)
if weight_decay > 0:
l2_loss = compute_l2_loss(img_model) + compute_l2_loss(text_model)
else:
l2_loss = 0
loss = nce_loss + weight_decay*l2_loss
return loss, acc
return teststep
class CLIPTrainer(GenericExtractor):
"""
Class for training a CLIP model.
Based on "Learning transferable visual models from natural language
supervision" by Radford et al.
"""
modelname = "CLIP"
def __init__(self, logdir, tokenizer, trainingdata, traininglabels,
testdata=None, testlabels=None, fcn=None, augment=True,
maxlen=76, embed_dim=512, ff_dim=2048,
num_layers=12, num_heads=8,
temperature=0.07, output_dim=64,
weight_decay=0,
lr=0.01, lr_decay=0, decay_type="cosine",
opt_type="adam",
imshape=(256,256), num_channels=3,
norm=255, batch_size=64, num_parallel_calls=None,
single_channel=False, notes="",
downstream_labels=None,
strategy=None):
"""
:logdir: (string) path to log directory
:tokenizer: (string) path to sentencepiece model file
:trainingdata: (list) list of strings; paths to training images
:traininglabels: (list) list of strings; captions for training images
:testdata: (list) filepaths of a batch of images to use for eval
:testlabels: (list) list of strings; captions for test images
:fcn: (keras Model) fully-convolutional network to train as feature extractor
:augment: (dict) dictionary of augmentation parameters, True for defaults
:maxlen: int; length to pad or truncate encoded text queries to
:embed_dim: int; embedding dimension of tokens and transformers for language model
:ff_dim: int; dimension of internal feed-forward layers inside transformer blocks
:num_layers: int; number of transformer blocks in language model
:num_heads: int; number of heads in each transformer block in language model
:temperature: the Boltzmann temperature parameter- rescale the cosine similarities by this factor before computing softmax loss.
:output_dim: dimension of projection head's output space.
:weight_decay: coefficient for L2-norm loss. The original SimCLR paper used 1e-6.
:lr: (float) initial learning rate
:lr_decay: (int) number of steps for one decay period (0 to disable)
:decay_type: (string) how to decay the learning rate- "exponential" (smooth exponential decay), "staircase" (non-smooth exponential decay), or "cosine"
:opt_type: (string) optimizer type; "adam" or "momentum"
:imshape: (tuple) image dimensions in H,W
:num_channels: (int) number of image channels
:norm: (int or float) normalization constant for images (for rescaling to
unit interval)
:batch_size: (int) batch size for training
:num_parallel_calls: (int) number of threads for loader mapping
:single_channel: if True, expect a single-channel input image and
stack it num_channels times.
:notes: (string) any notes on the experiment that you want saved in the
config.yml file
:downstream_labels: dictionary mapping image file paths to labels
:strategy: if distributing across multiple GPUs, pass a tf.distribute
Strategy object here
"""
self.logdir = logdir
self.trainingdata = trainingdata
self._downstream_labels = downstream_labels
self._test_index_updated = False
if strategy is None:
strategy = tf.distribute.get_strategy()
self.strategy = strategy
self._testdata = testdata
self._testlabels = testlabels
self._file_writer = tf.summary.create_file_writer(logdir, flush_millis=10000)
self._file_writer.set_as_default()
# load tokenizer
self._tokenizer = spm.SentencePieceProcessor(tokenizer)
self._vocab_size = self._tokenizer.vocab_size()
# if no FCN is passed- build one
with self.scope():
if fcn is None:
fcn = tf.keras.applications.ResNet50(weights=None, include_top=False)
self.fcn = fcn
# Create a Keras model that wraps the base encoder and
# the projection head
full = build_image_encoder(fcn, num_channels=num_channels,
output_dim=output_dim)
text = build_text_transformer(self._vocab_size, maxlen,
embed_dim=embed_dim, num_layers=num_layers,
num_heads=num_heads, ff_dim=ff_dim,
final_projection=output_dim)
self._models = {"fcn":fcn,
"full":full,
"text":text}
# build training dataset
ds = clip_dataset(trainingdata, traininglabels, self._tokenizer,
maxlen=maxlen, imshape=imshape,
num_channels=num_channels,
num_parallel_calls=num_parallel_calls,
norm=norm, batch_size=batch_size, shuffle=True)
self._ds = self._distribute_dataset(ds)
# create optimizer
self._optimizer = self._build_optimizer(lr, lr_decay, opt_type=opt_type,
decay_type=decay_type)
# build training step
self._training_step = build_clip_training_step(full, text,
self._optimizer, temp=temperature,
weight_decay=weight_decay)
if testdata is not None:
self._test_ds = clip_dataset(testdata, testlabels, self._tokenizer,
maxlen=maxlen, imshape=imshape,
num_channels=num_channels,
num_parallel_calls=num_parallel_calls,
norm=norm, batch_size=batch_size, shuffle=False)
@tf.function
def loss_step(x,y):
img_embed = self._models["full"](x, training=False)
text_embed = self._models["text"](y, training=False)
img_norm = tf.nn.l2_normalize(img_embed, 1)
text_norm = tf.nn.l2_normalize(text_embed, 1)
return _contrastive_loss(img_norm, text_norm, temperature)
self._loss_step = loss_step
self._test = True
else:
self._test = False
self.step = 0
# parse and write out config YAML
metrics= ["linear_classification_accuracy",
"test_acc"]
self._parse_configs(metrics=metrics,
tokenizer=tokenizer, maxlen=maxlen,
augment=augment, temperature=temperature,
output_dim=output_dim, weight_decay=weight_decay,
num_layers=num_layers,
num_heads=num_heads,
lr=lr, lr_decay=lr_decay,
imshape=imshape, num_channels=num_channels,
norm=norm, batch_size=batch_size,
num_parallel_calls=num_parallel_calls,
single_channel=single_channel, notes=notes,
trainer="clip", strategy=str(strategy),
decay_type=decay_type, opt_type=opt_type)
def _run_training_epoch(self, **kwargs):
"""
"""
self._test_index_updated = False
for x, y in self._ds:
lossdict = self._training_step(x, y)
self._record_scalars(**lossdict)
self._record_scalars(learning_rate=self._get_current_learning_rate())
self.step += 1
def evaluate(self, avpool=True, query_fig=False):
if self._test:
test_acc = []
test_loss = []
for x, y in self._test_ds:
l, a = self._loss_step(x,y)
test_acc.append(a.numpy())
test_loss.append(l.numpy())
self._record_scalars(test_acc=np.mean(test_acc),
test_loss=np.mean(test_loss))
if self._downstream_labels is not None:
self._linear_classification_test(avpool=avpool, query_fig=query_fig)
def save(self):
"""
Write model(s) to disk
Note: tried to use SavedModel format for this and got a memory leak;
think it's related to https://github.com/tensorflow/tensorflow/issues/32234
For now sticking with HDF5
"""
for m in self._models:
path = os.path.join(self.logdir, m)
self._models[m].save(path, overwrite=True, save_format="tf")
def load_weights(self, logdir):
"""
Update model weights from a previously trained model
Different from generic load_weights because we're using TF
savedmodel format instead of HDF5
"""
for k in self._models:
savedloc = os.path.join(logdir, k, "variables", "variables")
self._models[k].load_weights(savedloc)
self._test_index_updated = False
def _update_test_index(self):
"""
Precompute embeddings on the test set so we can run sample
queries against it.
"""
if not self._test_index_updated:
logging.info("updating embedding index on test data")
self._test_index = self._models["full"].predict(self._test_ds)
self._test_index_updated = True
def query_test_set(self, querystring, plot=True):
"""
"""
maxlen = self.config["maxlen"]
# make sure test index is up-to-date
self._update_test_index()
# run byte-pair encoding of query text
query_array = np.array(self._tokenizer.encode(querystring,
out_type=int, add_bos=True,
add_eos=True))
# pad or clip query as necessary
L = len(query_array)
if L > maxlen:
query_array = query_array[:maxlen]
elif L < maxlen:
query_array = np.concatenate([query_array,
np.zeros(maxlen-L, dtype=np.int64)])
query_array = query_array.reshape(1,-1)
# now map that query to a semantic vector in the shared
# embedding space
query_vector = self._models["text"](query_array, training=False)
# find distance to all the image embeddings from the test set
dists = cdist(self._test_index, query_vector.numpy(), metric="cosine").ravel()
ordering = dists.argsort()
if plot:
plt.suptitle("Query: '%s'"%querystring, fontsize=14)
for i in range(9):
plt.subplot(3,3,i+1)
img = Image.open(self._testdata[ordering[i]])
plt.imshow(img)
plt.axis(False)
plt.title(self._testlabels[ordering[i]].replace(".",".\n").replace(",",",\n"))
else:
return dists
```
#### File: patchwork/feature/_detcon_utils.py
```python
import numpy as np
import tensorflow as tf
import warnings
import skimage.segmentation
from patchwork._augment import SINGLE_AUG_FUNC
SEG_AUG_FUNCTIONS = ["flip_left_right", "flip_up_down", "rot90", "shear", "zoom_scale", "center_zoom_scale"]
def _get_segments(img, mean_scale=1000, num_samples=16, return_enough_segments=False):
"""
Wrapper for computing segments for an image. Inputs an image tensor and
returns a stack of binary segmentation masks.
:img: (H,W,C) image tensor
:mean_scale: average scale parameter for Felzenszwalb's algorithm. Actual
value will be sampled from (0.5*mean_scale, 1.5*mean_scale), and min_size
will be set to the scale.
:num_samples: number of segments to compute. if more segments are found than
num_samples, they'll be uniformly subsampled without replacement; if fewer
are found they'll be sampled with replacement.
:return_enough_segments: whether to include a Boolean indicator of whether
"""
# randomly choose the segmentation scale
scale = np.random.uniform(0.5*mean_scale, 1.5*mean_scale)
# run heuristic segmentation
segments = skimage.segmentation.felzenszwalb(img, scale=scale,
min_size=int(scale))
# sample a set of segmentations to use; bias toward larger ones
max_segment = segments.max()
indices = np.arange(max_segment+1)
seg_count = np.array([(segments == i).sum()+1 for i in indices])
p = seg_count/seg_count.sum()
# try this for error correction?
if num_samples <= max_segment:
sampled_indices = np.random.choice(indices, p=p, size=num_samples,
replace=False)
else:
warnings.warn("not enough unique segments; sampling WITH replacement")
sampled_indices = np.random.choice(indices, size=num_samples, replace=True)
# build normalized segment occupancy masks for each segment we choose
seg_tensor = np.stack([(segments == i)/seg_count[i] for i in sampled_indices],
-1).astype(np.float32)
if return_enough_segments:
enough_segs = num_samples <= max_segment
return seg_tensor, enough_segs
return seg_tensor
def _get_grid_segments(imshape, num_samples=16):
gridsize = int(np.sqrt(num_samples))
h,w = imshape
seg = np.zeros((h,w, gridsize**2), dtype=np.int64)
index = 0
dy = int(h/gridsize)
dx = int(w/gridsize)
for i in range(gridsize):
for j in range(gridsize):
seg[i*dy:(i+1)*dy, j*dx:(j+1)*dx,index] = 1
index += 1
return seg
def _segment_aug(img, seg, aug, outputsize=None):
"""
"""
num_channels = img.shape[-1]
imshape = (img.shape[0], img.shape[1])
x = tf.concat([img, tf.cast(seg, tf.float32)], -1)
for f in SEG_AUG_FUNCTIONS:
if f in aug:
x = SINGLE_AUG_FUNC[f](x, aug[f], imshape=imshape)
img_aug = x[:,:,:num_channels]
seg_aug = x[:,:,num_channels:]
if outputsize is not None:
seg_aug = tf.image.resize(seg_aug, outputsize, method="area")
# normalize segments
norm = tf.expand_dims(tf.expand_dims(tf.reduce_sum(seg_aug, [0,1]),0),0)
seg_aug /= (norm+1e-8)
return img_aug, seg_aug
def _filter_out_bad_segments(img1, seg1, img2, seg2):
"""
It's possible for shearing or scaling augmentation to sample
one segment completely out of the image- use this function
to filter out those cases
"""
minval = tf.reduce_min(tf.reduce_sum(seg1, [0,1])*tf.reduce_sum(seg2, [0,1]))
if minval < 0.5:
warnings.warn("filtering bad segment")
return False
else:
return True
def _prepare_embeddings(h, m):
"""
Combine FCN outputs with segmentation masks to build a batch of
mask-pooled hidden vectors. Represents the calculation of h_{m}
in the first equation in Henaff et al's paper
:h: batch of embeddings; (N,w,h,d)
:m: batch of NORMALIZED segmentation tensors; (N,w,h,num_samples)
Returns a tensor of shape (N*num_samples, d)
"""
d = h.shape[-1]
h = tf.expand_dims(h, 4)
m = tf.expand_dims(m, 3)
hm = tf.reduce_mean(h*m, [1,2])
return tf.reshape(hm, [-1, d])
def _prepare_mask(m1, m2):
"""
:m1, m2: masks of segmentation tensors; (N,w,h,num_samples)
Returns a mask of shape (N*num_samples, 1)
"""
m1_sum = tf.reduce_sum(m1, [1,2])
m2_sum = tf.reduce_sum(m2, [1,2])
return tf.reshape(m1_sum*m2_sum, [-1,1])
```
#### File: patchwork/feature/_hcl.py
```python
import numpy as np
import tensorflow as tf
from patchwork.feature._generic import GenericExtractor
from patchwork._util import compute_l2_loss, _compute_alignment_and_uniformity
from patchwork.feature._moco import _build_augment_pair_dataset
from patchwork.feature._simclr import _build_embedding_model
from patchwork.feature._contrastive import _build_negative_mask
from patchwork.feature._contrastive import _simclr_softmax_prob
from patchwork.feature._contrastive import _hcl_softmax_prob
def _build_negative_indices(batch_size):
"""
compute indices of negative sampled from matrix of pairwise dot products
of embeddings. use with tf.gather_nd()
DEPRECATED: because the number of operations scales with batch size,
tracing functions that use this take forever on large batches. use
_build_negative_mask instead.
"""
indices = []
for i in range(2*batch_size):
row = []
for j in range(2*batch_size):
if (i != j) and (abs(i-j) != batch_size):
row.append((i,j))
indices.append(row)
return indices
def _build_trainstep(model, optimizer, strategy, temp=1, tau_plus=0, beta=0, weight_decay=0):
"""
Build a distributed training step for SimCLR or HCL.
Set tau_plus and beta to 0 for SimCLR parameters.
:model: Keras projection model
:optimizer: Keras optimizer
:strategy: tf.distribute.Strategy object
:temp: temperature parameter
:tau_plus: HCL class probability parameter
:beta: HCL concentration parameter
:weightdecay: L2 loss coefficient. 0 to disable
Returns a distributed training function
"""
def _step(x,y):
with tf.GradientTape() as tape:
loss = 0
# get replica context- we'll use this to aggregate embeddings
# across different GPUs
context = tf.distribute.get_replica_context()
# run images through model and normalize embeddings
z1 = tf.nn.l2_normalize(model(x, training=True), 1)
z2 = tf.nn.l2_normalize(model(y, training=True), 1)
# aggregate projections across replicas. z1 and z2 should
# now correspond to the global batch size (gbs, d)
z1 = context.all_gather(z1, 0)
z2 = context.all_gather(z2, 0)
with tape.stop_recording():
gbs = z1.shape[0]
mask = _build_negative_mask(gbs)
# SimCLR case
if (tau_plus == 0)&(beta == 0):
softmax_prob, nce_batch_acc = _simclr_softmax_prob(z1, z2, temp, mask)
# HCL case
elif (tau_plus > 0)&(beta > 0):
softmax_prob, nce_batch_acc = _hcl_softmax_prob(z1, z2, temp,
beta, tau_plus, mask)
else:
assert False, "both tau_plus and beta must be nonzero to run HCL"
softmax_loss = tf.reduce_mean(-1*tf.math.log(softmax_prob))
loss += softmax_loss
if weight_decay > 0:
l2_loss = compute_l2_loss(model)
loss += weight_decay*l2_loss
else:
l2_loss = 0
grad = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grad, model.trainable_variables))
return {"loss":loss, "nt_xent_loss":softmax_loss,
"l2_loss":l2_loss,
"nce_batch_accuracy":nce_batch_acc}
@tf.function
def trainstep(x,y):
per_example_losses = strategy.run(_step, args=(x,y))
lossdict = {k:strategy.reduce(
tf.distribute.ReduceOp.MEAN,
per_example_losses[k], axis=None)
for k in per_example_losses}
return lossdict
return trainstep
class HCLTrainer(GenericExtractor):
"""
Class for training an HCL model. SimCLR is a special case of this model,
for tau_plus = beta = 0.
Based on "Contrastive Learning with Hard Negative Examples" by <NAME>.
"""
modelname = "HCL"
def __init__(self, logdir, trainingdata, testdata=None, fcn=None,
augment=True, temperature=1., beta=0, tau_plus=0,
num_hidden=128, output_dim=64,
batchnorm=True, weight_decay=0,
lr=0.01, lr_decay=0, decay_type="exponential",
opt_type="adam",
imshape=(256,256), num_channels=3,
norm=255, batch_size=64, num_parallel_calls=None,
single_channel=False, notes="",
downstream_labels=None, stratify=None, strategy=None):
"""
:logdir: (string) path to log directory
:trainingdata: (list) list of paths to training images
:testdata: (list) filepaths of a batch of images to use for eval
:fcn: (keras Model) fully-convolutional network to train as feature extractor
:augment: (dict) dictionary of augmentation parameters, True for defaults
:temperature: the Boltzmann temperature parameter- rescale the cosine similarities by this factor before computing softmax loss.
:num_hidden: number of hidden neurons in the network's projection head
:output_dim: dimension of projection head's output space. Figure 8 in Chen et al's paper shows that their results did not depend strongly on this value.
:batchnorm: whether to include batch normalization in the projection head.
:weight_decay: coefficient for L2-norm loss. The original SimCLR paper used 1e-6.
:lr: (float) initial learning rate
:lr_decay: (int) number of steps for one decay period (0 to disable)
:decay_type: (string) how to decay the learning rate- "exponential" (smooth exponential decay), "staircase" (non-smooth exponential decay), or "cosine"
:opt_type: (string) optimizer type; "adam" or "momentum"
:imshape: (tuple) image dimensions in H,W
:num_channels: (int) number of image channels
:norm: (int or float) normalization constant for images (for rescaling to
unit interval)
:batch_size: (int) batch size for training
:num_parallel_calls: (int) number of threads for loader mapping
:single_channel: if True, expect a single-channel input image and
stack it num_channels times.
:notes: (string) any notes on the experiment that you want saved in the
config.yml file
:downstream_labels: dictionary mapping image file paths to labels
:stratify: pass a list of image labels here to stratify by batch
during training
:strategy: if distributing across multiple GPUs, pass a tf.distribute
Strategy object here
"""
assert augment is not False, "this method needs an augmentation scheme"
self.logdir = logdir
self.trainingdata = trainingdata
self._downstream_labels = downstream_labels
if strategy is None:
strategy = tf.distribute.get_strategy()
self.strategy = strategy
self._file_writer = tf.summary.create_file_writer(logdir, flush_millis=10000)
self._file_writer.set_as_default()
# if no FCN is passed- build one
with self.scope():
if fcn is None:
fcn = tf.keras.applications.ResNet50V2(weights=None, include_top=False)
self.fcn = fcn
# Create a Keras model that wraps the base encoder and
# the projection head
embed_model = _build_embedding_model(fcn, imshape, num_channels,
num_hidden, output_dim, batchnorm)
self._models = {"fcn":fcn,
"full":embed_model}
# build training dataset
ds = _build_augment_pair_dataset(trainingdata,
imshape=imshape, batch_size=batch_size,
num_parallel_calls=num_parallel_calls,
norm=norm, num_channels=num_channels,
augment=augment,
single_channel=single_channel)
self._ds = self._distribute_dataset(ds)
# create optimizer
self._optimizer = self._build_optimizer(lr, lr_decay, opt_type=opt_type,
decay_type=decay_type)
# build training step
self._training_step = _build_trainstep(embed_model, self._optimizer,
self.strategy, temp=temperature,
tau_plus=tau_plus, beta=beta,
weight_decay=weight_decay)
if testdata is not None:
self._test_ds = _build_augment_pair_dataset(testdata,
imshape=imshape, batch_size=batch_size,
num_parallel_calls=num_parallel_calls,
norm=norm, num_channels=num_channels,
augment=augment,
single_channel=single_channel)
self._test = True
else:
self._test = False
self.step = 0
# parse and write out config YAML
self._parse_configs(augment=augment, temperature=temperature,
beta=beta, tau_plus=tau_plus,
num_hidden=num_hidden, output_dim=output_dim,
weight_decay=weight_decay, batchnorm=batchnorm,
lr=lr, lr_decay=lr_decay,
imshape=imshape, num_channels=num_channels,
norm=norm, batch_size=batch_size,
num_parallel_calls=num_parallel_calls,
single_channel=single_channel, notes=notes,
trainer="hcl", strategy=str(strategy),
decay_type=decay_type, opt_type=opt_type)
def _run_training_epoch(self, **kwargs):
"""
"""
for x, y in self._ds:
lossdict = self._training_step(x,y)
self._record_scalars(**lossdict)
self._record_scalars(learning_rate=self._get_current_learning_rate())
self.step += 1
def evaluate(self, avpool=True, query_fig=False):
if self._test:
# if the user passed out-of-sample data to test- compute
# alignment and uniformity measures
alignment, uniformity = _compute_alignment_and_uniformity(
self._test_ds, self._models["full"])
self._record_scalars(alignment=alignment,
uniformity=uniformity, metric=True)
if self._downstream_labels is not None:
self._linear_classification_test(avpool=avpool, query_fig=query_fig)
# -*- coding: utf-8 -*-
```
#### File: patchwork/feature/_text_transformer.py
```python
import tensorflow as tf
class TransformerBlock(tf.keras.layers.Layer):
"""
Transfomer Block as a keras layer.
from here: https://keras.io/examples/nlp/text_classification_with_transformer/
"""
def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):
super(TransformerBlock, self).__init__()
self.att = tf.keras.layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)
self.ffn = tf.keras.Sequential(
[tf.keras.layers.Dense(ff_dim, activation="relu"), tf.keras.layers.Dense(embed_dim),]
)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, inputs, training=True):
attn_output = self.att(inputs, inputs)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(inputs + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
return self.layernorm2(out1 + ffn_output)
class TokenAndPositionEmbedding(tf.keras.layers.Layer):
"""
keras layer for token + positional embeddings.
from here: https://keras.io/examples/nlp/text_classification_with_transformer/
Note that this uses learned positional embeddings instead of sinusoids. From Attention
is All You Need: "We also experimented with using learned positional embeddings [9]
instead, and found that the two versions produced nearly identical results"
"""
def __init__(self, maxlen, vocab_size, embed_dim):
super(TokenAndPositionEmbedding, self).__init__()
self.token_emb = tf.keras.layers.Embedding(input_dim=vocab_size, output_dim=embed_dim)
self.pos_emb = tf.keras.layers.Embedding(input_dim=maxlen, output_dim=embed_dim)
def call(self, x):
maxlen = tf.shape(x)[-1]
positions = tf.range(start=0, limit=maxlen, delta=1)
positions = self.pos_emb(positions)
x = self.token_emb(x)
return x + positions
def build_text_transformer(vocab_size, maxlen, embed_dim=512, num_layers=12, num_heads=8,
ff_dim=2048, final_projection=False):
"""
Assemble TransformerBlock and TokenAndPositionEmbedding layers into a text transformer.
:vocab_size: int; number of (BPE) tokens in the vocabulary
:maxlen: int; length of text sequences (BPE tokens, not raw string lengths). preprocessing should
pad/clip to this value
:embed_dim: int; embedding dimension for tokens and transformers
:num_layers: int; number of transformer blocks
:num_heads: int; number of heads in each transformer block
:ff_dim: int; dimension of internal feed-forward layers inside transformer blocks (2048 was value from
Attention is All You Need)
:final_projection: if an integer; pool and project final transformer block to this dimension
"""
inpt = tf.keras.layers.Input((maxlen,))
x = TokenAndPositionEmbedding(maxlen, vocab_size, embed_dim)(inpt)
for n in range(num_layers):
x = TransformerBlock(embed_dim, num_heads, ff_dim)(x)
if final_projection:
x = tf.keras.layers.GlobalAvgPool1D(data_format='channels_last')(x)
x = tf.keras.layers.Dense(final_projection)(x)
return tf.keras.Model(inpt, x)
```
#### File: patchwork/tests/test_distill.py
```python
import numpy as np
import tensorflow as tf
from patchwork._distill import _build_student_model, distill
def test_student_model_with_premade_model():
inpt = tf.keras.layers.Input((None, None, 3))
net = tf.keras.layers.GlobalMaxPool2D()(inpt)
net = tf.keras.layers.Dense(5, activation="sigmoid")(net)
student0 = tf.keras.Model(inpt, net)
student1 = _build_student_model(student0, 5)
assert isinstance(student1, tf.keras.Model)
assert len(student0.layers) == len(student1.layers)
assert student0.output_shape == student1.output_shape
def test_student_model_without_premade_model():
student = _build_student_model("VGG16", 5, imshape=(32,32))
assert isinstance(student, tf.keras.Model)
assert student.output_shape[-1] == 5
def test_student_model_with_wide_resnet():
student = _build_student_model("WRN_16_1", 5, imshape=(32,32))
assert isinstance(student, tf.keras.Model)
assert student.output_shape[-1] == 5
def test_distill(test_png_path):
inpt = tf.keras.layers.Input((None, None, 3))
net = tf.keras.layers.GlobalMaxPool2D()(inpt)
net = tf.keras.layers.Dense(5, activation="sigmoid")(net)
student0 = tf.keras.Model(inpt, net)
filepaths = [test_png_path, test_png_path]
ys = 0.5*np.ones((2,5), dtype=np.float32)
student1, trainloss = distill(filepaths, ys, student0, epochs=1,
imshape=(32,32), batch_size=1,
augment=False)
assert isinstance(student1, tf.keras.Model)
assert len(student0.layers) == len(student1.layers)
assert student0.output_shape == student1.output_shape
assert isinstance(trainloss, dict)
assert isinstance(trainloss["train_loss"][0], np.float32)
```
#### File: patchwork/tests/test_feature_simclr.py
```python
import numpy as np
import tensorflow as tf
from patchwork.feature._simclr import _build_simclr_dataset
from patchwork.feature._simclr import _build_embedding_model
from patchwork.feature._simclr import _build_simclr_training_step
# build a tiny FCN for testing
inpt = tf.keras.layers.Input((None, None, 3))
net = tf.keras.layers.Conv2D(5,1)(inpt)
net = tf.keras.layers.MaxPool2D(10,10)(net)
fcn = tf.keras.Model(inpt, net)
def test_simclr_dataset(test_png_path):
filepaths = 10*[test_png_path]
batch_size = 5
ds = _build_simclr_dataset(filepaths, imshape=(32,32),
num_channels=3, norm=255,
augment=True, single_channel=False,
batch_size=batch_size)
assert isinstance(ds, tf.data.Dataset)
for x,y in ds:
break
# since SimCLR makes augmented pairs, the batch size
# is doubled
assert x.shape[0] == 2*batch_size
assert (y.numpy() == np.array([1,-1,1,-1,1,-1,1,-1,1,-1])).all()
def test_stratified_simclr_dataset(test_png_path, test_jpg_path):
filepaths = 10*[test_png_path, test_jpg_path]
labels = 10*["png", "jpg"]
batch_size = 5
ds = _build_simclr_dataset(filepaths, imshape=(32,32),
num_channels=3, norm=255,
augment={}, single_channel=False,
batch_size=batch_size, stratify=labels)
assert isinstance(ds, tf.data.Dataset)
for x,y in ds:
x = x.numpy()
break
# since SimCLR makes augmented pairs, the batch size
# is doubled
assert x.shape[0] == 2*batch_size
# since we're using a stratified dataset- each element of a particular
# batch should be from one stratification category (which in this
# case is an identical image)
assert (x[0] == x[2]).all()
def test_simclr_dataset_with_custom_dataset():
rawdata = np.zeros((10,32,32,3)).astype(np.float32)
ds = tf.data.Dataset.from_tensor_slices(rawdata)
batch_size = 5
ds = _build_simclr_dataset(ds, imshape=(32,32),
num_channels=3, norm=255,
augment=True, single_channel=False,
batch_size=batch_size)
assert isinstance(ds, tf.data.Dataset)
for x,y in ds:
break
# since SimCLR makes augmented pairs, the batch size
# is doubled
assert x.shape[0] == 2*batch_size
assert (y.numpy() == np.array([1,-1,1,-1,1,-1,1,-1,1,-1])).all()
def test_simclr_dataset_with_custom_pair_dataset():
rawdata = np.zeros((10,32,32,3)).astype(np.float32)
ds = tf.data.Dataset.from_tensor_slices((rawdata, rawdata))
batch_size = 5
ds = _build_simclr_dataset(ds, imshape=(32,32),
num_channels=3, norm=255,
augment=True, single_channel=False,
batch_size=batch_size)
assert isinstance(ds, tf.data.Dataset)
for x,y in ds:
break
# since SimCLR makes augmented pairs, the batch size
# is doubled
assert x.shape[0] == 2*batch_size
assert (y.numpy() == np.array([1,-1,1,-1,1,-1,1,-1,1,-1])).all()
def test_build_embedding_model():
model = _build_embedding_model(fcn, (32,32), 3, 17, 11)
assert isinstance(model, tf.keras.Model)
assert model.output_shape[-1] == 11
assert len(model.layers) == 7 # was 8 before taking out extra batchnorm
def test_build_simclr_training_step():
model = _build_embedding_model(fcn, (32,32), 3, 5, 7)
opt = tf.keras.optimizers.SGD()
step = _build_simclr_training_step(model, opt, 0.1)
x = tf.zeros((4,32,32,3), dtype=tf.float32)
#y = np.array([1,-1,1,-1]).astype(np.int32)
y = x
lossdict = step(x,y)
assert isinstance(lossdict["loss"].numpy(), np.float32)
# should include loss and average cosine similarity
assert len(lossdict) == 4 #2
def test_build_simclr_training_step_with_weight_decay():
model = _build_embedding_model(fcn, (32,32), 3, 5, 7)
opt = tf.keras.optimizers.SGD()
step = _build_simclr_training_step(model, opt, 0.1,
weight_decay=1e-6)
x = tf.zeros((4,32,32,3), dtype=tf.float32)
#y = np.array([1,-1,1,-1]).astype(np.int32)
y = x
lossdict = step(x,y)
# should include total loss, crossent loss, average cosine
# similarity and L2 norm squared
assert len(lossdict) == 4
```
#### File: patchwork/viz/_detcon.py
```python
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from patchwork.feature._detcon_utils import _get_segments, _get_grid_segments
from patchwork.feature._detcon_utils import _segment_aug, _filter_out_bad_segments
from patchwork.feature._detcon_utils import SEG_AUG_FUNCTIONS
from patchwork.loaders import _image_file_dataset
from patchwork._augment import augment_function
def detcon_input_pipeline(imfiles, augment, mean_scale=1000, num_samples=16,
outputsize=None, imshape=(256,256), **kwargs):
"""
"""
# build a dataset to load images one at a time
ds = _image_file_dataset(imfiles, shuffle=False, imshape=imshape,
**kwargs).prefetch(1)
# place to store some examples
imgs = []
segs = []
img1s = []
img2s = []
seg1s = []
seg2s = []
N = len(imfiles)
not_enough_segs_count = 0
augment_removed_segmentation = 0
aug2 = {k:augment[k] for k in augment if k not in SEG_AUG_FUNCTIONS}
_aug = augment_function(imshape, aug2)
progressbar = tqdm(total=N)
# for each image
for x in ds:
# get the segments
if mean_scale > 0:
seg, enough_segs = _get_segments(x, mean_scale=mean_scale,
num_samples=num_samples,
return_enough_segments=True)
# count how many times we had to sample with replacement
if not enough_segs:
not_enough_segs_count += 1
else:
seg = _get_grid_segments(imshape, num_samples)
# now augment image and segmentation together, twice
img1, seg1 = _segment_aug(x, seg, augment, outputsize=outputsize)
img2, seg2 = _segment_aug(x, seg, augment, outputsize=outputsize)
# check to see if any segments were pushed out of the image by augmentation
segmentation_ok = _filter_out_bad_segments(img1, seg1, img2, seg2)
if not segmentation_ok:
augment_removed_segmentation += 1
# finally, augment images separately
img1 = _aug(img1).numpy()
img2 = _aug(img2).numpy()
seg1 = seg1.numpy()
seg2 = seg2.numpy()
if len(img1s) < 16:
imgs.append(x)
segs.append(seg)
img1s.append(img1)
img2s.append(img2)
seg1s.append(seg1)
seg2s.append(seg2)
progressbar.update()
img = np.stack(imgs, 0)
seg = np.stack(segs, 0)
img1 = np.stack(img1s, 0)
img2 = np.stack(img2s, 0)
seg1 = np.stack(seg1s, 0)
seg2 = np.stack(seg2s, 0)
progressbar.close()
print(f"Had to sample with replacement for {not_enough_segs_count} of {N} images")
print(f"At least one missing segment in {augment_removed_segmentation} of {N} images due to augmentation")
def _segshow(s):
plt.imshow(s, alpha=0.4, extent=[0,imshape[0],imshape[1],0],
cmap="tab20", vmin=0, vmax=num_samples-1)
for j in range(5):
plt.subplot(5,4,4*j+1)
plt.imshow(img[j])
plt.axis(False)
if j == 0: plt.title("original")
plt.subplot(5,4, 4*j+2)
plt.imshow(img[j])
plt.axis(False)
_segshow(seg[j].argmax(-1))
if j == 0: plt.title("segments")
plt.subplot(5,4, 4*j+3)
plt.imshow(img1[j])
plt.axis(False)
_segshow(seg1[j].argmax(-1))
if j == 0: plt.title("augmentation 1")
plt.subplot(5,4, 4*j+4)
plt.imshow(img2[j])
plt.axis(False)
_segshow(seg2[j].argmax(-1))
if j == 0: plt.title("augmentation 2");
```
|
{
"source": "jg1141/DRLND-Project1",
"score": 3
}
|
#### File: jg1141/DRLND-Project1/model.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class QNetwork(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, hidden_layers=[64, 64], dropout_percentage=0.3):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
hidden_layers (list of int): Width of hidden layers
dropout_percentage (float): Percentage of nodes to drop out
"""
super(QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
self.input_size = state_size
self.output_size = action_size
self.hidden_layers = hidden_layers
self.dropout_percentage = dropout_percentage
self.layers = nn.ModuleList([nn.Linear(self.input_size, self.hidden_layers[0])])
self.layer_sizes = [(self.hidden_layers[0], self.hidden_layers[1]),]
self.layers.extend(nn.Linear(size_1, size_2) for size_1, size_2 in self.layer_sizes)
self.output = nn.Linear(self.hidden_layers[-1], self.output_size)
self.dropout = nn.Dropout(self.dropout_percentage)
def forward(self, state):
"""Build a network that maps state -> action values."""
x = state
for layer in self.layers:
x = F.relu(layer(x))
x = self.dropout(x)
return self.output(x)
```
|
{
"source": "jg1141/jg1141.github.io",
"score": 3
}
|
#### File: jg1141/jg1141.github.io/cogglejson2mp3.py
```python
import sys
import json
from subprocess import call
import urllib
import os
class Slides():
""" Class for processing audio of slides extracted from Coggle JSON file"""
def stem(self, file_name):
return ".".join(file_name.split(".")[0:len(file_name.split("."))-1])
def write_audio_say(self, file_name, text_to_speak, language, on_slide, on_block):
"""Use Mac say command to create .m4a files and ffmpeg to convert them to .mp3"""
cmd = ["say", '\"' + text_to_speak + '\"', "-v", language, "-o", "s" +
str(on_slide) + "_" + str(on_block) + ".m4a", "--file-format=m4af"]
call(cmd)
output_file = "audio/" + self.stem(file_name) +"_output.json/s" + str(on_slide) + "_" + str(on_block) + ".mp3"
cmd = ["ffmpeg", "-y", "-i", "s" +
str(on_slide) + "_" + str(on_block) + ".m4a", output_file]
call(cmd)
cmd = ["rm", "s" +
str(on_slide) + "_" + str(on_block) + ".m4a"]
call(cmd)
return output_file
def write_audio_google(self, file_name, text_to_speak, language, on_slide, on_block):
"""Use Google Text-to-Speech to create .mp3 files"""
print "To .mp3: " + text_to_speak
q = urllib.urlencode({"q":text_to_speak.encode('utf-8')})
cmd = ["wget", "-q", "-U", "Mozilla",
'http://www.translate.google.com/translate_tts?ie=UTF-8&tl=' + language + '&' + q,
"-O", "audio/" + self.stem(file_name) +"_output.json/s" + str(on_slide) + "_" + str(on_block) + ".mp3"]
call(cmd)
return "s" + str(on_slide) + "_" + str(on_block) + ".mp3"
def write_audio(self, file_name, text_to_speak, language, on_slide, on_block, tts_engine):
"""Call appropriate write_audio method"""
if tts_engine == 'say':
return self.write_audio_say(file_name, text_to_speak, language, on_slide, on_block)
if tts_engine == 'google':
return self.write_audio_google(file_name, text_to_speak, language, on_slide, on_block)
def read_and_parse(self, file_name_or_url):
"""Makes calls to output an .mp3 file for each block and one for the full slide"""
d = {}
if file_name_or_url.startswith("http"):
# make URL to fetch from Coggle api
h,t = os.path.split(file_name_or_url)
print "\nUse the URL below to fetch the JSON for your Coggle."
print "https://coggle.it/api/1/diagrams/{}/nodes".format(t)
print "\nSave the result to a file and run "
print "python cogglejson2mp3.py <file name>\n"
return
else:
file_name = file_name_or_url
with open(file_name) as json_data:
d = json.load(json_data)
coggle_nodes = d[0]["children"]
default_language = "Alex" # alternative is "en-ca" for example
default_tts_engine ="say" # alternative is "google"
default_block_type = "words" # alternative is "mixed" words and phonemes
# set defaults and reset to (overriding) defaults, if any
for node in coggle_nodes:
if node["offset"]["x"] < 0:
if node["text"].startswith("language:"):
default_language = node["text"][9:].strip()
if node["text"].startswith("tts_engine:"):
default_tts_engine = node["text"][11:].strip()
if node["text"].startswith("block-type:"):
default_block_type = node["text"][11:].strip()
# extract and sort on y the text from slides with positive x
slides = []
for node in coggle_nodes:
if node["offset"]["x"] > 0:
# filter any improper slides
text = node["text"]
if text.startswith(" and (text.find("\n") > 0):
image = text[18:text.find(")")]
words = text[text.find("\n")+1:]
slides.append((node["offset"]["y"],image,words))
slides.sort(key=lambda tup: tup[0])
output = {"slides": []}
for on_slide, slide in enumerate(slides):
output["slides"].append({"slide" : []})
text_to_speak = ""
# split blocks on [ ] or space
if slide[2].find("[") >= 0:
blocks = []
text_to_parse = slide[2]
left_bracket = text_to_parse.find("[")
while left_bracket >= 0:
# add any words to left of [
words = text_to_parse[:left_bracket].split()
for word in words:
blocks.append(word.strip())
right_bracket = text_to_parse.find("]", left_bracket)
if right_bracket > 0:
blocks.append(text_to_parse[left_bracket+1:right_bracket].strip())
text_to_parse = text_to_parse[right_bracket+1:]
left_bracket = text_to_parse.find("[")
else:
blocks.append(text_to_parse[left_bracket+1:].strip())
text_to_parse = ""
left_bracket = -1
remaining_blocks = text_to_parse.strip().split()
for block in remaining_blocks:
blocks.append(block.strip())
else:
blocks = slide[2].split()
blocks = [block.strip() for block in blocks]
for on_block, block in enumerate(blocks):
# pass defaults through to blocks
language = default_language
tts_engine = default_tts_engine
block_type = default_block_type
text_to_speak += block + " "
output["slides"][on_slide]['slide'].append({})
output["slides"][on_slide]['slide'][on_block]['img'] = slide[1]
output["slides"][on_slide]['slide'][on_block]['text'] = block
output["slides"][on_slide]['slide'][on_block]['audio'] = \
self.write_audio(file_name, block, language, on_slide, on_block + 1, tts_engine)
output["slides"][on_slide]['audio'] = self.write_audio(file_name, text_to_speak, language, on_slide, 0, tts_engine)
with open(self.stem(file_name) + '_output.json', 'w') as outfile:
json.dump(output, outfile)
def main(argv):
if len(argv) < 2:
print "Usage: python cogglejson2mp3.py <json file>"
print " or: python cogglejson2mp3.py <Coggle URL>"
print " .mp3 files output to <json file> or <Coggle code> subfolder of audio folder"
else:
slides = Slides()
# Make sure audio folder exists
if not os.path.exists("./audio"):
os.makedirs("./audio")
if not os.path.exists("./audio/" + slides.stem(argv[1]) + "_output.json"):
os.makedirs("./audio/" + slides.stem(argv[1]) + "_output.json")
# As a convenience, make sure corresponding images folder also exists
if not os.path.exists("./images"):
os.makedirs("./images")
if not os.path.exists("./images/" + slides.stem(argv[1]) + "_output.json"):
os.makedirs("./images/" + slides.stem(argv[1]) + "_output.json")
slides.read_and_parse(argv[1])
if __name__ == "__main__":
main(sys.argv)
```
|
{
"source": "jg1141/nzpug20210721",
"score": 2
}
|
#### File: jg1141/nzpug20210721/setup.py
```python
VERSION = "20210720 2217 "
import datetime
import humanize
import numpy as np
import os
import pandas as pd
import plotly.express as px
import pyperclip
import re
import sidetable
import snowflake.connector
import time
from snowflake.connector.pandas_tools import write_pandas
from dotenv import load_dotenv
_ = load_dotenv()
# Get non-null counts
pd.options.display.max_info_rows = 16907850
# Connection string
conn = snowflake.connector.connect(
user=os.getenv('user'),
password=<PASSWORD>('password'),
account=os.getenv('account'),
warehouse=os.getenv('warehouse'),
database=os.getenv('database'),
schema=os.getenv('schema')
)
# Execute a statement that will generate a result set.
cur = conn.cursor()
def compare_sets(list1, list2):
"""Make a count of the intersections of two sets, A and B"""
set1 = set(list1)
set2 = set(list2)
set2_intersection_set1 = set2.intersection(set1)
result = {'IN A':[len(set1), len(set2_intersection_set1), round(len(set1)/len(set1)*100,1), round(len(set2_intersection_set1)/len(set2)*100,1)]}
result['IN B'] = [len(set2_intersection_set1), len(set2), round(len(set2_intersection_set1)/len(set1)*100,1), round(len(set2)/len(set2)*100,1)]
result['NOT IN A'] = [0, len(set2 - set1), 0, round(len(set2 - set1)/len(set2)*100,1)]
result['NOT IN B'] = [len(set1 - set2), 0, round(len(set1 - set2)/len(set1)*100,1), 0]
df = pd.DataFrame.from_dict(result, orient='index', columns=['A', 'B', '% of A', '% of B'])
return df
def d(vars):
"""List of variables starting with string "df" in reverse order. Usage: d(dir())
@vars list of variables output by dir() command
"""
list_of_dfs = [item for item in vars if (item.find('df') == 0 and item.find('_') == -1 and item != 'dfs')]
list_of_dfs.sort(key=lambda x:int(re.sub("[^0-9]", "", x.replace('df',''))) if len(x) > 2 else 0, reverse=True)
return list_of_dfs
def e(start_time):
"""Return human readable time delta
@start_time time to compare to current time
"""
print(f'Time now: {datetime.datetime.now().strftime("%Y-%m-%d %H:%M")}')
print(f"Time since start: {humanize.naturaldelta(time.monotonic() - start_time)}")
def execute(sql):
"""Execute a SQL command"""
start_time = time.monotonic()
_ = cur.execute(sql)
end_time = time.monotonic()
elapsed = end_time - start_time
print(f"Elapsed time {elapsed:.2f}")
return
def find_col_with(df, char_to_find):
"""Return column index of first column containing char_to_find
@char_to_find character to search for in column name
"""
first_column_with_char_to_find = [col for col in df.columns if col.find(char_to_find) > -1][0]
return list(df.columns).index(first_column_with_char_to_find)
def find_max_order(df, start_col=1):
"""Find the max value in each column and use it to put columns in rank order
@start_col Index of starting column (typically 1 as first column -- column 0 -- is a date or label)
"""
return list(df[df.columns[start_col:]].max().sort_values(ascending=False).keys())
def find_percentage_total(df, start_col=1):
"""Find total and percent of total for columns of Pandas dataframe
@start_col Index of starting column (typically 1 as first column -- column 0 -- is a date or label)
"""
# Get values for col1,col2 and col3
total = pd.Series(data=np.zeros(len(df)))
col_count = len(df.columns)
for i in range(start_col, col_count):
total += df.iloc[:,i]
df.insert(len(df.columns), 'total', total)
for i in range(start_col, col_count + 1):
pct_of_total = round((df.iloc[:,i]/total)*100, 2)
# Create Pandas DF with new column of pct_of_total
df.insert(len(df.columns),f"{df.columns[i]} %", pct_of_total)
# Pull original dataframe to show total and %
return df
def query(sql):
"""Run a SQL query and fetch result into Pandas DataFrame"""
start_time = time.monotonic()
_ = cur.execute(sql)
df = cur.fetch_pandas_all()
end_time = time.monotonic()
elapsed = end_time - start_time
print(f"Elapsed time {elapsed:.2f}")
return df
def t(title_string):
"""Add "as at {today}" to title. Usage: t(title_sting)
@title_string text to preceed the "as at" part
"""
today = datetime.datetime.today().strftime('%d %b %Y')
title = f"{title_string} as at {today}"
print(title)
pyperclip.copy(title)
print("(now on clipboard)")
return title
start_time = time.monotonic()
print(f"Setup Complete v {VERSION}")
```
|
{
"source": "jg1141/words_and_pictures",
"score": 3
}
|
#### File: jg1141/words_and_pictures/json2mp3.py
```python
import sys
import json
from subprocess import call
import urllib
import os
class Slides():
""" Class for processing audio of slides extracted from JSON file"""
def stem(self, file_name):
return ".".join(file_name.split(".")[0:len(file_name.split("."))-1])
def write_audio_say(self, file_name, text_to_speak, language, on_slide, on_block):
"""Use Mac say command to create .m4a files and ffmpeg to convert them to .mp3"""
cmd = ["say", '\"' + text_to_speak + '\"', "-v", language, "-o", "s" +
str(on_slide) + "_" + str(on_block) + ".m4a", "--file-format=m4af"]
call(cmd)
output_file = "audio/" + self.stem(file_name) +"_output.json/s" + str(on_slide) + "_" + str(on_block) + ".mp3"
cmd = ["ffmpeg", "-y", "-i", "s" +
str(on_slide) + "_" + str(on_block) + ".m4a", output_file]
call(cmd)
cmd = ["rm", "s" +
str(on_slide) + "_" + str(on_block) + ".m4a"]
call(cmd)
return output_file
def write_audio_google(self, file_name, text_to_speak, language, on_slide, on_block):
"""Use Google Text-to-Speech to create .mp3 files"""
print "To .mp3: " + text_to_speak
q = urllib.urlencode({"q":text_to_speak.encode('utf-8')})
cmd = ["wget", "-q", "-U", "Mozilla",
'http://www.translate.google.com/translate_tts?ie=UTF-8&tl=' + language + '&' + q,
"-O", "audio/" + self.stem(file_name) +"_output.json/s" + str(on_slide) + "_" + str(on_block) + ".mp3"]
call(cmd)
return "s" + str(on_slide) + "_" + str(on_block) + ".mp3"
def write_audio(self, file_name, text_to_speak, language, on_slide, on_block, tts_engine):
"""Call appropriate write_audio method"""
if tts_engine == 'say':
return self.write_audio_say(file_name, text_to_speak, language, on_slide, on_block)
if tts_engine == 'google':
return self.write_audio_google(file_name, text_to_speak, language, on_slide, on_block)
def read_and_parse(self, file_name):
"""Makes calls to output an .mp3 file for each block and one for the full slide"""
d = {}
with open(file_name) as json_data:
d = json.load(json_data)
# set defaults and reset to (overriding) defaults, if any
default_tts_engine ="say" # alternative is "google"
default_language = "Alex" # alternative is "en-ca" for example
default_block_type = "words" # alternative is "mixed" words and phonemes
if "language-default" in d:
default_language = d["language-default"]
if "tts-engine" in d:
default_tts_engine = d["tts-engine"]
if "block-type" in d:
default_block_type = d["block-type"]
for on_slide, slide in enumerate(d["slides"]):
text_to_speak = ""
for on_block, block in enumerate(slide['slide']):
# pass defaults through to blocks
language = default_language
tts_engine = default_tts_engine
block_type = default_block_type
if "language" in block:
language = block["language"]
if "tts-engine" in block:
tts_engine = block["tts-engine"]
if "block-type" in block:
block_type = block["block-type"]
if "text" in block:
text_to_speak += block['text'] + " "
if "phoneme" in block:
tts_input = "[[inpt PHON]] " + block["phoneme"]
d["slides"][on_slide]['slide'][on_block]['audio'] = \
self.write_audio(file_name, tts_input, language, on_slide, on_block + 1, tts_engine)
else:
d["slides"][on_slide]['slide'][on_block]['audio'] = \
self.write_audio(file_name, block['text'], language, on_slide, on_block + 1, tts_engine)
elif "phoneme" in block:
tts_input = "[[inpt PHON]] " + block["phoneme"]
d["slides"][on_slide]['slide'][on_block]['text'] = block["phoneme"]
d["slides"][on_slide]['slide'][on_block]['audio'] = \
self.write_audio(file_name, tts_input, language, on_slide, on_block + 1, tts_engine)
text_to_speak = tts_input + " "
if "word" in slide:
d["slides"][on_slide]['audio'] = self.write_audio(file_name, slide["word"], language, on_slide, 0, tts_engine)
else:
d["slides"][on_slide]['audio'] = self.write_audio(file_name, text_to_speak, language, on_slide, 0, tts_engine)
with open(self.stem(file_name) + '_output.json', 'w') as outfile:
json.dump(d, outfile)
def main(argv):
if len(argv) < 2:
print "Usage: python json2mp3.py <json file>"
print " .mp3 files output to <json file> subfolder of audio folder"
else:
slides = Slides()
# Make sure audio folder exists
if not os.path.exists("./audio"):
os.makedirs("./audio")
if not os.path.exists("./audio/" + slides.stem(argv[1]) + "_output.json"):
os.makedirs("./audio/" + slides.stem(argv[1]) + "_output.json")
slides.read_and_parse(argv[1])
if __name__ == "__main__":
main(sys.argv)
```
|
{
"source": "Jg1255/azure-sdk-for-python",
"score": 2
}
|
#### File: identity/_credentials/azure_powershell.py
```python
import base64
import logging
import platform
import subprocess
import sys
from typing import TYPE_CHECKING
import six
from azure.core.credentials import AccessToken
from azure.core.exceptions import ClientAuthenticationError
from .azure_cli import get_safe_working_dir
from .. import CredentialUnavailableError
from .._internal import _scopes_to_resource, resolve_tenant
from .._internal.decorators import log_get_token
if TYPE_CHECKING:
# pylint:disable=ungrouped-imports
from typing import Any, List, Tuple
_LOGGER = logging.getLogger(__name__)
AZ_ACCOUNT_NOT_INSTALLED = "Az.Account module >= 2.2.0 is not installed"
BLOCKED_BY_EXECUTION_POLICY = "Execution policy prevented invoking Azure PowerShell"
NO_AZ_ACCOUNT_MODULE = "NO_AZ_ACCOUNT_MODULE"
POWERSHELL_NOT_INSTALLED = "PowerShell is not installed"
RUN_CONNECT_AZ_ACCOUNT = 'Please run "Connect-AzAccount" to set up account'
SCRIPT = """$ErrorActionPreference = 'Stop'
[version]$minimumVersion = '2.2.0'
$m = Import-Module Az.Accounts -MinimumVersion $minimumVersion -PassThru -ErrorAction SilentlyContinue
if (! $m) {{
Write-Output {}
exit
}}
$token = Get-AzAccessToken -ResourceUrl '{}'{}
Write-Output "`nazsdk%$($token.Token)%$($token.ExpiresOn.ToUnixTimeSeconds())`n"
"""
class AzurePowerShellCredential(object):
"""Authenticates by requesting a token from Azure PowerShell.
This requires previously logging in to Azure via "Connect-AzAccount", and will use the currently logged in identity.
:keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
the identity logged in to Azure PowerShell is registered in. When False, which is the default, the credential
will acquire tokens only from the tenant of Azure PowerShell's active subscription.
"""
def __init__(self, **kwargs):
# type: (**Any) -> None
self._allow_multitenant = kwargs.get("allow_multitenant_authentication", False)
@log_get_token("AzurePowerShellCredential")
def get_token(self, *scopes, **kwargs):
# type: (*str, **Any) -> AccessToken
"""Request an access token for `scopes`.
This method is called automatically by Azure SDK clients. Applications calling this method directly must
also handle token caching because this credential doesn't cache the tokens it acquires.
:param str scopes: desired scope for the access token. This credential allows only one scope per request.
:keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication**
is False, specifying a tenant with this argument may raise an exception.
:rtype: :class:`azure.core.credentials.AccessToken`
:raises ~azure.identity.CredentialUnavailableError: the credential was unable to invoke Azure PowerShell, or
no account is authenticated
:raises ~azure.core.exceptions.ClientAuthenticationError: the credential invoked Azure PowerShell but didn't
receive an access token
"""
tenant_id = resolve_tenant("", self._allow_multitenant, **kwargs)
command_line = get_command_line(scopes, tenant_id)
output = run_command_line(command_line)
token = parse_token(output)
return token
def run_command_line(command_line):
# type: (List[str]) -> str
stdout = stderr = ""
proc = None
kwargs = {}
if platform.python_version() >= "3.3":
kwargs["timeout"] = 10
try:
proc = start_process(command_line)
stdout, stderr = proc.communicate(**kwargs)
if sys.platform.startswith("win") and "' is not recognized" in stderr:
# pwsh.exe isn't on the path; try powershell.exe
command_line[-1] = command_line[-1].replace("pwsh", "powershell", 1)
proc = start_process(command_line)
stdout, stderr = proc.communicate(**kwargs)
except Exception as ex: # pylint:disable=broad-except
# failed to execute "cmd" or "/bin/sh", or timed out; PowerShell and Az.Account may or may not be installed
# (handling Exception here because subprocess.SubprocessError and .TimeoutExpired were added in 3.3)
if proc and not proc.returncode:
proc.kill()
error = CredentialUnavailableError(message="Failed to invoke PowerShell")
six.raise_from(error, ex)
raise_for_error(proc.returncode, stdout, stderr)
return stdout
def start_process(args):
# type: (List[str]) -> subprocess.Popen
working_directory = get_safe_working_dir()
proc = subprocess.Popen(
args,
cwd=working_directory,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
return proc
def parse_token(output):
# type: (str) -> AccessToken
for line in output.split():
if line.startswith("azsdk%"):
_, token, expires_on = line.split("%")
return AccessToken(token, int(expires_on))
raise ClientAuthenticationError(message='Unexpected output from Get-AzAccessToken: "{}"'.format(output))
def get_command_line(scopes, tenant_id):
# type: (Tuple, str) -> List[str]
if tenant_id:
tenant_argument = " -TenantId " + tenant_id
else:
tenant_argument = ""
resource = _scopes_to_resource(*scopes)
script = SCRIPT.format(NO_AZ_ACCOUNT_MODULE, resource, tenant_argument)
encoded_script = base64.b64encode(script.encode("utf-16-le")).decode()
command = "pwsh -NonInteractive -EncodedCommand " + encoded_script
if sys.platform.startswith("win"):
return ["cmd", "/c", command]
return ["/bin/sh", "-c", command]
def raise_for_error(return_code, stdout, stderr):
# type: (int, str, str) -> None
if return_code == 0:
if NO_AZ_ACCOUNT_MODULE in stdout:
raise CredentialUnavailableError(AZ_ACCOUNT_NOT_INSTALLED)
return
if return_code == 127 or "' is not recognized" in stderr:
raise CredentialUnavailableError(message=POWERSHELL_NOT_INSTALLED)
if "Run Connect-AzAccount to login" in stderr:
raise CredentialUnavailableError(message=RUN_CONNECT_AZ_ACCOUNT)
if "AuthorizationManager check failed" in stderr:
raise CredentialUnavailableError(message=BLOCKED_BY_EXECUTION_POLICY)
if stderr:
# stderr is too noisy to include with an exception but may be useful for debugging
_LOGGER.debug('%s received an error from Azure PowerShell: "%s"', AzurePowerShellCredential.__name__, stderr)
raise CredentialUnavailableError(message="Failed to invoke PowerShell")
```
#### File: identity/_credentials/imds.py
```python
import os
from typing import TYPE_CHECKING
import six
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError
from azure.core.pipeline.transport import HttpRequest
from .. import CredentialUnavailableError
from .._constants import EnvironmentVariables
from .._internal.get_token_mixin import GetTokenMixin
from .._internal.managed_identity_client import ManagedIdentityClient
if TYPE_CHECKING:
# pylint:disable=ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import AccessToken
IMDS_URL = "http://169.254.169.254/metadata/identity/oauth2/token"
PIPELINE_SETTINGS = {
"connection_timeout": 2,
"retry_backoff_factor": 2,
"retry_backoff_max": 60,
"retry_on_status_codes": [404, 429] + list(range(500, 600)),
"retry_status": 5,
"retry_total": 5,
}
def get_request(scope, identity_config):
request = HttpRequest("GET", os.environ.get(EnvironmentVariables.AZURE_POD_IDENTITY_TOKEN_URL, IMDS_URL))
request.format_parameters(dict({"api-version": "2018-02-01", "resource": scope}, **identity_config))
return request
class ImdsCredential(GetTokenMixin):
def __init__(self, **kwargs):
# type: (**Any) -> None
super(ImdsCredential, self).__init__()
self._client = ManagedIdentityClient(get_request, **dict(PIPELINE_SETTINGS, **kwargs))
if EnvironmentVariables.AZURE_POD_IDENTITY_TOKEN_URL in os.environ:
self._endpoint_available = True # type: Optional[bool]
else:
self._endpoint_available = None
self._error_message = None # type: Optional[str]
self._user_assigned_identity = "client_id" in kwargs or "identity_config" in kwargs
def _acquire_token_silently(self, *scopes, **kwargs):
# type: (*str, **Any) -> Optional[AccessToken]
return self._client.get_cached_token(*scopes)
def _request_token(self, *scopes, **kwargs): # pylint:disable=unused-argument
# type: (*str, **Any) -> AccessToken
if self._endpoint_available is None:
# Lacking another way to determine whether the IMDS endpoint is listening,
# we send a request it would immediately reject (because it lacks the Metadata header),
# setting a short timeout.
try:
self._client.request_token(*scopes, connection_timeout=0.3, retry_total=0)
self._endpoint_available = True
except HttpResponseError:
# IMDS responded
self._endpoint_available = True
except Exception as ex: # pylint:disable=broad-except
# if anything else was raised, assume the endpoint is unavailable
self._endpoint_available = False
self._error_message = (
"ManagedIdentityCredential authentication unavailable, no response from the IMDS endpoint."
)
six.raise_from(CredentialUnavailableError(self._error_message), ex)
if not self._endpoint_available:
raise CredentialUnavailableError(self._error_message)
try:
token = self._client.request_token(*scopes, headers={"Metadata": "true"})
except HttpResponseError as ex:
# 400 in response to a token request indicates managed identity is disabled,
# or the identity with the specified client_id is not available
if ex.status_code == 400:
self._endpoint_available = False
self._error_message = "ManagedIdentityCredential authentication unavailable. "
if self._user_assigned_identity:
self._error_message += "The requested identity has not been assigned to this resource."
else:
self._error_message += "No identity has been assigned to this resource."
six.raise_from(CredentialUnavailableError(message=self._error_message), ex)
# any other error is unexpected
six.raise_from(ClientAuthenticationError(message=ex.message, response=ex.response), ex)
return token
```
#### File: identity/_internal/aad_client_base.py
```python
import abc
import base64
import json
import time
from uuid import uuid4
import six
from msal import TokenCache
from azure.core.pipeline.policies import ContentDecodePolicy
from azure.core.pipeline.transport import HttpRequest
from azure.core.credentials import AccessToken
from azure.core.exceptions import ClientAuthenticationError
from . import get_default_authority, normalize_authority
from .._internal import resolve_tenant
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
try:
ABC = abc.ABC
except AttributeError: # Python 2.7, abc exists, but not ABC
ABC = abc.ABCMeta("ABC", (object,), {"__slots__": ()}) # type: ignore
if TYPE_CHECKING:
# pylint:disable=unused-import,ungrouped-imports
from typing import Any, Iterable, List, Optional, Union
from azure.core.pipeline import AsyncPipeline, Pipeline, PipelineResponse
from azure.core.pipeline.policies import AsyncHTTPPolicy, HTTPPolicy, SansIOHTTPPolicy
from azure.core.pipeline.transport import AsyncHttpTransport, HttpTransport
from .._internal import AadClientCertificate
PipelineType = Union[AsyncPipeline, Pipeline]
PolicyType = Union[AsyncHTTPPolicy, HTTPPolicy, SansIOHTTPPolicy]
TransportType = Union[AsyncHttpTransport, HttpTransport]
class AadClientBase(ABC):
_POST = ["POST"]
def __init__(
self, tenant_id, client_id, authority=None, cache=None, allow_multitenant_authentication=False, **kwargs
):
# type: (str, str, Optional[str], Optional[TokenCache], bool, **Any) -> None
self._authority = normalize_authority(authority) if authority else get_default_authority()
self._tenant_id = tenant_id
self._allow_multitenant = allow_multitenant_authentication
self._cache = cache or TokenCache()
self._client_id = client_id
self._pipeline = self._build_pipeline(**kwargs)
def get_cached_access_token(self, scopes, **kwargs):
# type: (Iterable[str], **Any) -> Optional[AccessToken]
tenant = resolve_tenant(self._tenant_id, self._allow_multitenant, **kwargs)
tokens = self._cache.find(
TokenCache.CredentialType.ACCESS_TOKEN,
target=list(scopes),
query={"client_id": self._client_id, "realm": tenant},
)
for token in tokens:
expires_on = int(token["expires_on"])
if expires_on > int(time.time()):
return AccessToken(token["secret"], expires_on)
return None
def get_cached_refresh_tokens(self, scopes):
# type: (Iterable[str]) -> List[dict]
"""Assumes all cached refresh tokens belong to the same user"""
return self._cache.find(TokenCache.CredentialType.REFRESH_TOKEN, target=list(scopes))
@abc.abstractmethod
def obtain_token_by_authorization_code(self, scopes, code, redirect_uri, client_secret=None, **kwargs):
pass
@abc.abstractmethod
def obtain_token_by_client_certificate(self, scopes, certificate, **kwargs):
pass
@abc.abstractmethod
def obtain_token_by_client_secret(self, scopes, secret, **kwargs):
pass
@abc.abstractmethod
def obtain_token_by_refresh_token(self, scopes, refresh_token, **kwargs):
pass
@abc.abstractmethod
def _build_pipeline(self, config=None, policies=None, transport=None, **kwargs):
pass
def _process_response(self, response, request_time):
# type: (PipelineResponse, int) -> AccessToken
content = ContentDecodePolicy.deserialize_from_http_generics(response.http_response)
if response.http_request.body.get("grant_type") == "refresh_token":
if content.get("error") == "invalid_grant":
# the request's refresh token is invalid -> evict it from the cache
cache_entries = self._cache.find(
TokenCache.CredentialType.REFRESH_TOKEN,
query={"secret": response.http_request.body["refresh_token"]},
)
for invalid_token in cache_entries:
self._cache.remove_rt(invalid_token)
if "refresh_token" in content:
# AAD returned a new refresh token -> update the cache entry
cache_entries = self._cache.find(
TokenCache.CredentialType.REFRESH_TOKEN,
query={"secret": response.http_request.body["refresh_token"]},
)
# If the old token is in multiple cache entries, the cache is in a state we don't
# expect or know how to reason about, so we update nothing.
if len(cache_entries) == 1:
self._cache.update_rt(cache_entries[0], content["refresh_token"])
del content["refresh_token"] # prevent caching a redundant entry
_raise_for_error(response, content)
if "expires_on" in content:
expires_on = int(content["expires_on"])
elif "expires_in" in content:
expires_on = request_time + int(content["expires_in"])
else:
_scrub_secrets(content)
raise ClientAuthenticationError(
message="Unexpected response from Azure Active Directory: {}".format(content)
)
token = AccessToken(content["access_token"], expires_on)
# caching is the final step because 'add' mutates 'content'
self._cache.add(
event={
"client_id": self._client_id,
"response": content,
"scope": response.http_request.body["scope"].split(),
"token_endpoint": response.http_request.url,
},
now=request_time,
)
return token
def _get_auth_code_request(self, scopes, code, redirect_uri, client_secret=None, **kwargs):
# type: (Iterable[str], str, str, Optional[str], **Any) -> HttpRequest
data = {
"client_id": self._client_id,
"code": code,
"grant_type": "authorization_code",
"redirect_uri": redirect_uri,
"scope": " ".join(scopes),
}
if client_secret:
data["client_secret"] = client_secret
request = self._post(data, **kwargs)
return request
def _get_client_certificate_request(self, scopes, certificate, **kwargs):
# type: (Iterable[str], AadClientCertificate, **Any) -> HttpRequest
audience = self._get_token_url(**kwargs)
assertion = self._get_jwt_assertion(certificate, audience)
data = {
"client_assertion": assertion,
"client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer",
"client_id": self._client_id,
"grant_type": "client_credentials",
"scope": " ".join(scopes),
}
request = self._post(data, **kwargs)
return request
def _get_client_secret_request(self, scopes, secret, **kwargs):
# type: (Iterable[str], str, **Any) -> HttpRequest
data = {
"client_id": self._client_id,
"client_secret": secret,
"grant_type": "client_credentials",
"scope": " ".join(scopes),
}
request = self._post(data, **kwargs)
return request
def _get_jwt_assertion(self, certificate, audience):
# type: (AadClientCertificate, str) -> str
now = int(time.time())
header = six.ensure_binary(
json.dumps({"typ": "JWT", "alg": "RS256", "x5t": certificate.thumbprint}), encoding="utf-8"
)
payload = six.ensure_binary(
json.dumps(
{
"jti": str(uuid4()),
"aud": audience,
"iss": self._client_id,
"sub": self._client_id,
"nbf": now,
"exp": now + (60 * 30),
}
),
encoding="utf-8",
)
jws = base64.urlsafe_b64encode(header) + b"." + base64.urlsafe_b64encode(payload)
signature = certificate.sign(jws)
jwt_bytes = jws + b"." + base64.urlsafe_b64encode(signature)
return jwt_bytes.decode("utf-8")
def _get_refresh_token_request(self, scopes, refresh_token, **kwargs):
# type: (Iterable[str], str, **Any) -> HttpRequest
data = {
"grant_type": "refresh_token",
"refresh_token": refresh_token,
"scope": " ".join(scopes),
"client_id": self._client_id,
"client_info": 1, # request AAD include home_account_id in its response
}
request = self._post(data, **kwargs)
return request
def _get_token_url(self, **kwargs):
# type: (**Any) -> str
tenant = resolve_tenant(self._tenant_id, self._allow_multitenant, **kwargs)
return "/".join((self._authority, tenant, "oauth2/v2.0/token"))
def _post(self, data, **kwargs):
# type: (dict, **Any) -> HttpRequest
url = self._get_token_url(**kwargs)
return HttpRequest("POST", url, data=data, headers={"Content-Type": "application/x-www-form-urlencoded"})
def _scrub_secrets(response):
# type: (dict) -> None
for secret in ("access_token", "refresh_token"):
if secret in response:
response[secret] = "***"
def _raise_for_error(response, content):
# type: (PipelineResponse, dict) -> None
if "error" not in content:
return
_scrub_secrets(content)
if "error_description" in content:
message = "Azure Active Directory error '({}) {}'".format(content["error"], content["error_description"])
else:
message = "Azure Active Directory error '{}'".format(content)
raise ClientAuthenticationError(message=message, response=response.http_response)
```
|
{
"source": "jg1uaa/LPCNet.mozilla",
"score": 2
}
|
#### File: LPCNet.mozilla/training_tf2/pade.py
```python
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, GRU, Dense, Embedding, Reshape, Concatenate, Lambda, Conv1D, Multiply, Add, Bidirectional, MaxPooling1D, Activation
import tensorflow.keras.backend as K
from tensorflow.keras.optimizers import Adam, SGD
def my_loss1(y_true, y_pred):
return 1*K.mean(K.square(y_true-y_pred)) + 1*K.max(K.square(y_true-y_pred), axis=1)
def my_loss2(y_true, y_pred):
return .1*K.mean(K.square(y_true-y_pred)) + 1*K.max(K.square(y_true-y_pred), axis=1)
def my_loss3(y_true, y_pred):
return .01*K.mean(K.square(y_true-y_pred)) + 1*K.max(K.square(y_true-y_pred), axis=1)
# Using these initializers to seed the approximation
# with a reasonable starting point
def num_init(shape, dtype=None):
rr = tf.constant([[945], [105], [1]], dtype=dtype)
#rr = tf.constant([[946.56757], [98.01368], [0.66841]], dtype=dtype)
print(rr)
return rr
def den_init(shape, dtype=None):
rr = tf.constant([[945], [420], [15]], dtype=dtype)
#rr = tf.constant([[946.604], [413.342], [12.465]], dtype=dtype)
print(rr)
return rr
x = np.arange(-10, 10, .01)
N = len(x)
x = np.reshape(x, (1, -1, 1))
x2 = x*x
x2in = np.concatenate([x2*0 + 1, x2, x2*x2], axis=2)
yout = np.tanh(x)
model_x = Input(shape=(None, 1,))
model_x2 = Input(shape=(None, 3,))
num = Dense(1, name='num', use_bias=False, kernel_initializer=num_init)
den = Dense(1, name='den', use_bias=False, kernel_initializer=den_init)
def ratio(x):
return tf.minimum(1., tf.maximum(-1., x[0]*x[1]/x[2]))
out_layer = Lambda(ratio)
output = out_layer([model_x, num(model_x2), den(model_x2)])
model = Model([model_x, model_x2], output)
model.summary()
model.compile(Adam(0.05, beta_1=0.9, beta_2=0.9, decay=2e-5), loss='mean_squared_error')
model.fit([x, x2in], yout, batch_size=1, epochs=500000, validation_split=0.0)
model.compile(Adam(0.001, beta_2=0.9, decay=1e-4), loss=my_loss1)
model.fit([x, x2in], yout, batch_size=1, epochs=50000, validation_split=0.0)
model.compile(Adam(0.0001, beta_2=0.9, decay=1e-4), loss=my_loss2)
model.fit([x, x2in], yout, batch_size=1, epochs=50000, validation_split=0.0)
model.compile(Adam(0.00001, beta_2=0.9, decay=1e-4), loss=my_loss3)
model.fit([x, x2in], yout, batch_size=1, epochs=50000, validation_split=0.0)
model.save_weights('tanh.h5')
```
|
{
"source": "jg20019/simple-ssg",
"score": 3
}
|
#### File: jg20019/simple-ssg/post.py
```python
from typing import Dict
from datetime import date
class Post:
def __init__(self, filename, headers: Dict, body: str):
self.title = headers['title']
self.author = headers['author']
self.date = date.fromisoformat(headers['date'])
self.filename = filename
self.body = body
def __str__(self) -> str:
return f'{self.title} by {self.author}'
```
|
{
"source": "jg2023/Lab-Group-22-1ALent-Computing",
"score": 3
}
|
#### File: jg2023/Lab-Group-22-1ALent-Computing/Task1F.py
```python
from floodsystem import geo
from floodsystem import stationdata
from floodsystem import station
def run():
stations = stationdata.build_station_list()
List = station.inconsistent_typical_range_stations(stations)
print(List)
ListOfNames = []
for Station in List:
ListOfNames.append(Station.name)
print(ListOfNames)
if __name__ == '__main__':
run()
```
#### File: jg2023/Lab-Group-22-1ALent-Computing/test_geo.py
```python
from floodsystem.geo import rivers_with_station
from floodsystem.geo import rivers_by_station_number
from floodsystem.geo import stations_by_river
from floodsystem.stationdata import build_station_list
from floodsystem.station import MonitoringStation
def test_rivers_with_station():
stations = build_station_list()
rivers = rivers_with_station(stations)
Cam = None
for river in rivers:
if river == "River Cam":
Cam = river
break
assert Cam
assert len(rivers) <= len(stations)
def test_stations_by_river():
stations = build_station_list()
RiverDictionary = stations_by_river(stations)
rivers = rivers_with_station(stations)
assert len(rivers) == len(RiverDictionary)
assert type(RiverDictionary) == dict
for river in RiverDictionary:
assert type(river) == str
assert type(RiverDictionary[river]) == list
for i in RiverDictionary[river]:
assert type(i) == MonitoringStation
def test_rivers_by_station_number():
stations = build_station_list()
rivers = rivers_with_station(stations)
for i in range(len(stations)):
riverList = rivers_by_station_number(stations,i)
if i == 0:
assert len(riverList) == len(rivers)
else:
if len(riverList)!= i:
for j in range(1,len(riverList)-i):
assert riverList[len(riverList)-j] == riverList[len(riverList)-j-1]
else:
assert len(riverList) == i
```
|
{
"source": "JG228250/Team-9-tinkering_graphics",
"score": 3
}
|
#### File: Team-9-tinkering_graphics/Contract 1/Contract#1.py
```python
import pygame, sys
from pygame.locals import *
pygame.init()
# Set up window.
window_main = pygame.display.set_mode((600, 500), 0 , 32)
pygame.display.set_caption('Contract#1')
# Load tile png.
def load_tile():
return pygame.image.load('Grass.png').convert()
# decreased blue and green to seem slightly darker and more red/brown like autumn.
def autumn(surface = pygame.Surface((1, 1))):
pixel = pygame.Color(0, 0, 0)
for x in range(surface.get_width()):
for y in range(surface.get_height()):
pixel = surface.get_at((x, y))
surface.set_at((x, y), pygame.Color(pixel.r, int(pixel.g * 0.9), int(pixel.b * 0.8)))
# increased blue and decreased red and green so it is like puddles form.
def wet(surface = pygame.Surface((1, 1))):
pixel = pygame.Color(0, 0, 0)
for x in range(surface.get_width()):
for y in range(surface.get_height()):
pixel = surface.get_at((x, y))
surface.set_at((x, y), pygame.Color(int(pixel.r * 0.9), int(pixel.g * 0.9), int(pixel.b * 1.1)))
# Increased red and decreased blue to make it look more yellow and dry.
def dried(surface=pygame.Surface((1, 1))):
pixel = pygame.Color(0, 0, 0)
for x in range(surface.get_width()):
for y in range(surface.get_height()):
pixel = surface.get_at((x, y))
surface.set_at((x, y), pygame.Color(int(pixel.r * 1.1), pixel.g, int(pixel.b * 0.9)))
# Instruction / UI setup.
font = pygame.font.Font('freesansbold.ttf', 18)
message1 = font.render('Press 1 for autumn colouration.', True, (0, 0, 0))
message2 = font.render('Press 2 for wet colouration.', True, (0, 0, 0))
message3 = font.render('Press 3 for dried colouration.', True, (0, 0, 0))
message4 = font.render('Press 4 to reset tile.', True, (0, 0, 0))
message1_rect = message1.get_rect()
message2_rect = message2.get_rect()
message3_rect = message3.get_rect()
message4_rect = message4.get_rect()
# Positions for messages.
message1_rect.center = (300, 200)
message2_rect.center = (300, 240)
message3_rect.center = (300, 280)
message4_rect.center = (300, 320)
# FPS clock stops values of colour from changing too quickly.
FPS = 15
FPSClock = pygame.time.Clock()
grass_tile = load_tile()
running = True
# Main loop.
while running == True:
# Quit checker.
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# Checks if a key has been pressed and calls the relevant function.
# 4 reloads tile to reset colour changes.
keys = pygame.key.get_pressed()
if keys[K_1]:
autumn(grass_tile)
if keys[K_2]:
wet(grass_tile)
if keys[K_3]:
dried(grass_tile)
if keys[K_4]:
grass_tile = load_tile()
# Clear screen and draw tile.
window_main.fill((200, 200, 200))
pygame.Surface.blit(window_main, grass_tile, (280, 50))
# Display help messages.
window_main.blit(message1, message1_rect)
window_main.blit(message2, message2_rect)
window_main.blit(message3, message3_rect)
window_main.blit(message4, message4_rect)
# FPS tick and display update.
FPSClock.tick(FPS)
pygame.display.update()
pygame.quit()
```
|
{
"source": "jg40305/OnlineRealtimeActionRecofnitionBaseOnOpenPose",
"score": 2
}
|
#### File: OnlineRealtimeActionRecofnitionBaseOnOpenPose/Pose/pose_visualizer.py
```python
import cv2 as cv
import numpy as np
import tensorflow as tf
from .coco_format import CocoPart, CocoColors, CocoPairsRender
from .pose_estimator import estimate
class TfPoseVisualizer:
# the thickness of showing skeleton
Thickness_ratio = 2
def __init__(self, graph_path, target_size=(368, 368)):
self.target_size = target_size
# load graph
with tf.gfile.GFile(graph_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
self.graph = tf.get_default_graph()
tf.import_graph_def(graph_def, name='TfPoseEstimator')
self.persistent_sess = tf.Session(graph=self.graph)
self.tensor_image = self.graph.get_tensor_by_name('TfPoseEstimator/image:0')
self.tensor_output = self.graph.get_tensor_by_name('TfPoseEstimator/Openpose/concat_stage7:0')
self.heatMat = self.pafMat = None
@staticmethod
def draw_pose_rgb(npimg, humans, imgcopy=False):
if imgcopy:
npimg = np.copy(npimg)
image_h, image_w = npimg.shape[:2]
joints, bboxes, xcenter = [], [], []
# for record and get dataset
record_joints_norm = []
for human in humans:
xs, ys, centers = [], [], {}
# 将所有关节点绘制到图像上
for i in range(CocoPart.Background.value):
if i not in human.body_parts.keys():
# 对于缺失的数据,补0
record_joints_norm += [0.0, 0.0]
continue
body_part = human.body_parts[i]
center_x = body_part.x * image_w + 0.5
center_y = body_part.y * image_h + 0.5
center = (int(center_x), int(center_y))
centers[i] = center
record_joints_norm += [round(center_x/1280, 2), round(center_y/720, 2)]
xs.append(center[0])
ys.append(center[1])
# 绘制关节点
cv.circle(npimg, center, 3, CocoColors[i], thickness=TfPoseVisualizer.Thickness_ratio * 2,
lineType=8, shift=0)
# 将属于同一人的关节点按照各个部位相连
for pair_order, pair in enumerate(CocoPairsRender):
if pair[0] not in human.body_parts.keys() or pair[1] not in human.body_parts.keys():
continue
cv.line(npimg, centers[pair[0]], centers[pair[1]], CocoColors[pair_order],
thickness=TfPoseVisualizer.Thickness_ratio, lineType=8, shift=0)
# 根据每个人的关节点信息生成ROI区域
tl_x = min(xs)
tl_y = min(ys)
width = max(xs) - min(xs)
height = max(ys) - min(ys)
bboxes.append([tl_x, tl_y, width, height])
# 记录每一帧的所有关节点
joints.append(centers)
# 记录coco的1号点作为xcenter
if 1 in centers:
xcenter.append(centers[1][0])
return npimg, joints, bboxes, xcenter, record_joints_norm
@staticmethod
def draw_pose_only(npimg, humans):
picture_path = "Pose/aaa.jpg"
img = cv.imread(picture_path)
image_h, image_w = img.shape[:2]
back_ground = np.ones((image_h, image_w), dtype=np.uint8)
back_ground = cv.cvtColor(back_ground, cv.COLOR_GRAY2BGR)
back_ground = img # use img as back_ground directly
result = TfPoseVisualizer.draw_pose_rgb(back_ground, humans)
return result
def inference(self, npimg):
if npimg is None:
raise Exception('The frame does not exist.')
rois = []
infos = []
# _get_scaled_img
if npimg.shape[:2] != (self.target_size[1], self.target_size[0]):
# resize
npimg = cv.resize(npimg, self.target_size)
rois.extend([npimg])
infos.extend([(0.0, 0.0, 1.0, 1.0)])
output = self.persistent_sess.run(self.tensor_output, feed_dict={self.tensor_image: rois})
heat_mats = output[:, :, :, :19]
paf_mats = output[:, :, :, 19:]
output_h, output_w = output.shape[1:3]
max_ratio_w = max_ratio_h = 10000.0
for info in infos:
max_ratio_w = min(max_ratio_w, info[2])
max_ratio_h = min(max_ratio_h, info[3])
mat_w, mat_h = int(output_w / max_ratio_w), int(output_h / max_ratio_h)
resized_heat_mat = np.zeros((mat_h, mat_w, 19), dtype=np.float32)
resized_paf_mat = np.zeros((mat_h, mat_w, 38), dtype=np.float32)
resized_cnt_mat = np.zeros((mat_h, mat_w, 1), dtype=np.float32)
resized_cnt_mat += 1e-12
for heatMat, pafMat, info in zip(heat_mats, paf_mats, infos):
w, h = int(info[2] * mat_w), int(info[3] * mat_h)
heatMat = cv.resize(heatMat, (w, h))
pafMat = cv.resize(pafMat, (w, h))
x, y = int(info[0] * mat_w), int(info[1] * mat_h)
# add up
resized_heat_mat[max(0, y):y + h, max(0, x):x + w, :] = np.maximum(
resized_heat_mat[max(0, y):y + h, max(0, x):x + w, :], heatMat[max(0, -y):, max(0, -x):, :])
resized_paf_mat[max(0, y):y + h, max(0, x):x + w, :] += pafMat[max(0, -y):, max(0, -x):, :]
resized_cnt_mat[max(0, y):y + h, max(0, x):x + w, :] += 1
self.heatMat = resized_heat_mat
self.pafMat = resized_paf_mat / (np.log(resized_cnt_mat) + 1)
humans = estimate(self.heatMat, self.pafMat)
return humans
```
|
{
"source": "jg719/test_documentation",
"score": 2
}
|
#### File: docs/source/MCMC_analysis.py
```python
import time
import string
import math
import random
import csv
from functools import reduce
from openpyxl import load_workbook
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
import itertools
import selenium
from selenium import webdriver
from selenium.common.exceptions import ElementClickInterceptedException
from webdriver_manager.chrome import ChromeDriverManager
from scipy.optimize import curve_fit
from scipy.stats import norm
from scipy import optimize
from scipy.stats import multivariate_normal
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.graphics.tsaplots import plot_acf
driver = webdriver.Chrome(ChromeDriverManager().install()) # set browser
driver.get('http://tool.globalcalculator.org/') # open website
id_box = driver.find_element_by_id('lets-start') # bypass "Start" screen
id_box.click()
dfs = pd.read_excel("./Output_map.xlsx") # file mapping output lever names to xpaths
dfs_3 = pd.read_excel("./Input_map.xlsx") # file mapping input names to xpaths
for i in range(len(dfs)): # generate html lever addresses and put them in the dataframe
dfs.iloc[i, 2] = '/html/body/table[1]/tbody/tr/td/table/tbody/tr[2]/td[1]/div[13]/div/table/tbody/tr[' + str(dfs.iloc[i, 1]).strip("%") + ']/td[5]/div/font'
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D']
def multi_sampler_2D(observations, all_levers_current, all_thresholds, samples=4, mu_init=[3000, 0.5], plot=False, mu_prior_mu=[3100, 1], mu_prior_sd=[[200, 0],[0, 0.3]], imprimir = False):
"""
Implementation of a variant of Markov Chain Monte-Carlo (MCMC). Given some prior
information and a set of observations, this function performs MCMC. It calculates the posterior
distribution of temperature and cost values and the lever values used in doing so.
**Args**:
- observations (list of lists (N x 2)): Contains temperature and cost values.
- all_levers_current (list): Current values of input levers.
- all_thresholds (list of lists (48 x 2)): Each entry contains an upper and lower bound for each lever.
- samples (int): Number of MCMC steps.
- mu_init (list): Initial guess of temperature and cost values.
- plot (boolean): Flag used for plotting.
- mu_prior_mu (list): Mean temperature and cost values of prior distribution (assummed Gaussian).
- mu_prior_sd (list of lists (2 x 2)): Diagonal matrix containing the standard deviation of the 2D prior.
- imprimir (boolean): Flag used for printing useful information.
**Returns**:
- posterior (list of lists (N x 2)): Contains trace of all temperature and cost values.
- accepted (list): Contains all the lever values corresponding the proposal accepted by MCMC.
- rate (list): Contains the probability of each temperature and cost pair proposal.
- accepted_values (list of lists (M x 2)): Contains accepted temperature and cost values.
"""
# Initialisations
mu_current = mu_init # Set the current temperature and cost value
posterior = [mu_current] # First value of the trace
accepted = []; accepted_values = []; rate = []
address = str(driver.current_url) # Get current URL (website must be TIAM-UCL's 2DS pathway)
# Perform an MCMC step
for i in range(samples):
all_levers_temp = all_levers_current.copy() # Store current lever combination in a temp variable
# Moves the calculator's levers using the sampler. Reads their corresponding temperature and cost values (proposal).
all_levers_current, mu_proposal = generate_mu_proposal_2D(all_levers_current, all_thresholds, address = address)
# Compute likelihood ratio of proposed temperature and cost values
likelihood_ratio = np.prod(multivariate_normal(mu_proposal, [[1000000, 0], [0, 100]]).pdf(observations) / multivariate_normal(mu_current, [[1000000, 0], [0, 100]]).pdf(observations))
# Compute the prior probability ratio of the proposed temperature and cost values
prior_ratio = multivariate_normal(mu_prior_mu, mu_prior_sd).pdf(mu_proposal) / multivariate_normal(mu_prior_mu, mu_prior_sd).pdf(mu_current)
# Probability of accepting the proposal
p_accept = likelihood_ratio*prior_ratio
rate.append(p_accept)
# Printing routine
if imprimir == True:
print("Iteration: ", i, "Current: ", mu_current, " Proposal: ", mu_proposal)
print("Likelihood ratio: ", likelihood_ratio, "Prior ratio: ", prior_ratio, "Acceptance probability: ", p_accept, "\n")
# Decide whether to accept or reject the temperature and cost values proposal
accept = np.random.rand() < p_accept
# Temperature and cost values accepted
if accept:
address = str(driver.current_url) # Change URL address to current lever values (otherwise it remains the same)
mu_current = mu_proposal # Update current temperature and cost values
accepted = accepted[:].copy() + [all_levers_current.copy()] # Save accepted combination of lever values
accepted_values.append(mu_current.copy()) # Save accepted temperature and cost values
# Temperature and cost values rejected
else:
all_levers_current = all_levers_temp.copy() # Return lever values to last accepted iteration
# Update trace of temperature and cost values
posterior.append(mu_current.copy())
return posterior, accepted, rate, accepted_values
def move_lever(lever, value, costs = False, address = str(driver.current_url)):
"""
Sets a lever to a given value. Reads corresponding temperature and, if selected, cost values.
*Args*:
- lever (list of strings): Contains the names of the levers to be moved.
- value (list of floats): Contains the value of the levers to be moved - Automatically matched to lever names.
- costs (optional, boolean): Flag to decide whether to read cost values or not.
- address (optional, string): URL address corresponding to given lever combination.
"""
# Update URL address with input lever names and values, one at a time
for i in range(len(lever)):
address = new_URL(lever[i], value[i], address = address)
# Open website corresponding to the input values
driver.get(address)
########################################## IMPORTANT ####################################################
# All of the lines below are in charge of webscraping the temperature and, if selected, the cost values.
# The Global Calculator is a hard to webscrape website (sometimes, it results in bugs or uncoherent
# temperature and cost values). The code below ensures that, no matter what, the values will be read.
# To do so it performs different actions based on the current state of the website and the output values.
#########################################################################################################
time.sleep(0.2)
id_box = driver.find_element_by_id('lets-start') # Bypass "Start" screen
id_box.click()
time.sleep(1)
# Read temperature values
try:
output = int(read_CO2()[:4]) # Read output CO2
except: # Problem reading output CO2? The code below sorts it
time.sleep(1)
open_lever_menus() # Open lever menus
move_lever([lever[0]],[1.3], costs = False) # Move lever to an arbitrary value
driver.get(address) # Open website back
time.sleep(0.2)
id_box = driver.find_element_by_id('lets-start') # Bypass "Start" screen
id_box.click()
output = int(read_CO2()[:4]) # Read output CO2
# Read cost values
if costs == True:
driver.find_element_by_xpath('//*[@id="mn-6"]').click() # Move to compare tab
time.sleep(0.1)
userid_element = driver.find_element_by_xpath('//*[@id="container_costs_vs_counterfactual"]/div/div[11]') # Read GDP
cost_output = userid_element.text
try:
cost_output = float(cost_output[:4].rstrip("%")) # Convert GDP from string to float
except: # Problem converting GDP? The code below sorts it
cost_output = float(cost_output[:3].rstrip("%"))
# Reload the page and bypass start
driver.refresh() # Refresh
time.sleep(1)
id_box = driver.find_element_by_id('lets-start') # Bypass "Start" screen
id_box.click()
userid_element = driver.find_element_by_xpath('//*[@id="container_costs_vs_counterfactual"]/div/div[12]') # Read text below GDP value
cost_flag = userid_element.text
# Find sign of GDP (less expensive => increase; more expensive => decrease)
if cost_flag == 'less expensive':
cost_output = -cost_output # Reverse sign
# Go back to the overview section
try:
driver.find_element_by_xpath('//*[@id="mn-1"]').click()
except: # Problem going back to the overview section? The code below sorts it
time.sleep(0.2)
id_box = driver.find_element_by_id('lets-start') # Bypass "Start" screen
id_box.click()
output = [output, cost_output] # Output temperature and cost values
return output
def open_lever_menus():
"""Opens all the lever menus of the Global Calculator"""
for i in range(1, 16): # Iterate through menus
try: # Tries to open the menu
driver.find_element_by_xpath('//*[@id="ml-open-close-link-' + str(i) + '"]' ).click() # Open menu
time.sleep(0.3) # Server can't respond quicker than this
except ElementClickInterceptedException: # If opening menus too fast, then slow down
time.sleep(1)
driver.find_element_by_xpath('//*[@id="ml-open-close-link-' + str(i) + '"]' ).click()
return
def find_lever_ref(name, box = 1):
"""Given a lever name and box position, return its XPath"""
ref = str(dfs[dfs.iloc[:, 1].str.match(name)].iloc[0, 3]) # Get lever xpath
ref = ref[:-2] + str(2 + box) + ref[-2 + 1:] # Adjust address for given box
return ref
def read_lever(name):
"""Given a lever name, return its ID"""
pos = str(dfs[dfs.iloc[:, 1].str.match(name)].iloc[0, 2]) # Find lever ID
return 'fb-l-' + pos
def read_CO2():
"""For the current lever combination, return the CO2 level (GtCO2)"""
userid_element = driver.find_element_by_xpath('//*[@id="container_dashboard_co2_budget"]') # Find element that contains CO2 value
time.sleep(0.05)
co2 = userid_element.text.splitlines()[-6] # Get CO2 value from the container
return co2
def read_outputs():
"""Reads all outputs and returns them as a list"""
out_vals = []
for i in range(len(dfs)):
userid_element = driver.find_element_by_xpath(dfs.iloc[i, 2])
out_vals.append(float(userid_element.text.rstrip("%")))
return out_vals
def map_to_letter(value):
"""Takes a float value in the range [1, 4.0] and returns its corresponding URL character"""
if value != 2 and value != 3 and value != 4: # Special cases
if value < 4:
pos = int((value - 1.0)*10)
try:
back = letters[pos]
except: # Oops, the value is out of bounds
print("Not enough letters, fetching position: ", pos, " corresponding to value: ", value)
else: # Special case: Value = 4
back = letters[-1]
else:
back = int(value)
return back
def random_URL():
"""Generates and return a random URL (address) and its corresponding lever values (input_levers)"""
address = []; input_levers = []
string = "" # URL address to be stored here
for i in range(49): # Generate a random value for each lever, map it to a letter and save it
rand_float = random.randint(18, 32)/10 # Define bounds for random number generator (currently set to [1.8, 3.2])
input_levers.append(rand_float); address.append(map_to_letter(rand_float)) # Store them
address[43:47] = [1, 1, 1, 1] # CCS values are fixed at 1 for the moment
input_levers[43:47] = [1, 1, 1, 1] # CCS values are fixed at 1 for the moment
for i in address: # Construct string containing the current lever combination
string = string + str(i)
address = "http://tool.globalcalculator.org/globcalc.html?levers=" + string + "2211111111/technology/en" # Construct URL address
return address, input_levers
def training_sample():
"""Generates a random training sample. It returns the input (input_levers) and output (random_output) values"""
address, input_levers = random_URL() # Generate random URL address
driver.get(address) # Open that URL address
time.sleep(1)
id_box = driver.find_element_by_id('lets-start') # Bypass "Start now" menu
id_box.click()
time.sleep(0.2)
compare_box = driver.find_element_by_xpath('//*[@id="mp-nav-compare"]') # Move to the "Compare" section
compare_box.click()
random_output = read_outputs(dfs) # Read output
return input_levers, random_output
def log_training_sample():
"""Generate training sample and save it to a CSV file"""
Input, Output = training_sample() # Generate random training sample
with open(r'Training_set.csv', 'a', newline='') as f: # Append as Excel row
writer = csv.writer(f)
writer.writerow(Input + Output)
return
def find_lever_URL_position(name):
"""Given a lever name, return its position in the URL"""
return str(dfs_3[dfs_3.iloc[:, 0].str.match(name)].iloc[0, 1]) # Get lever position to insert in the URL
def new_URL(name, value, address = "http://tool.globalcalculator.org/globcalc.html?levers=l2wz222CBpp3pC3f2Dw3DC3plzgj1tA13pp2p223ri11111p22211111111/dashboard/en"):
"""
Generate a new URL address by changing a lever value.
**Args**:
- Name (string): Target lever name
- Value (float): Target value for lever
- Address (string): URL where lever will be changed. Set to TIAM-UCL 2DS pathway by default.
**Returns**:
URL after changes are applied.
"""
value = map_to_letter(value) # Map value to letter
index = int(find_lever_URL_position(name)) # Find URL position of given lever
URL = address[ : 53 + index] + str(value) + address[54 + index :] # Insert given value in its corresponding URL position
return URL
def find_lever_sensitivities():
"""
Analysis of climate impact sensitivity to changes in the inputs.
Takes the default pathway (TIAM UCL 2DS) and changes each lever value at a time (between 1.0 and 4.0),
reading its corresponding output.
"""
all_sensitivities = np.zeros((30, len(dfs_3.iloc[:, 0]))) # Store lever sensitivities here
col = 0 # Counter used for indexing
for lever in dfs_3.iloc[:, 0]: # Iterate through levers, uncomment for testing: # print("Putting lever: ", lever, " in column: ", col)
sensitivity = []
for i in np.linspace(1, 3.9, 30): # Move lever one increment at a time
sensitivity.append(move_lever([lever], [round(i, 2)])) # Move lever and store CO2 value # print(sensitivity)
all_sensitivities[:, col] = sensitivity # Append
col += 1
set_to_benchmark() # Reset levers to benchmark pathway
### Plotting routine ###
x_lever = np.linspace(1, 3.9, 30) # X axis
mean = 3000 # Mean threshold
upper = mean + mean*0.05 # Upper threshold
lower = mean - mean*0.05 # Lower threshold
plt.figure(figsize = (20, 10))
for i in range(48):
plt.plot(x_lever, all_sensitivities[:, i])
plt.title("Temperature values and thresholds")
plt.xlabel("Lever position")
plt.ylabel("GtCO2 per capita")
plt.axhline(y=3000, color='b', linestyle='-') # Plot thresholds
plt.axhline(y=lower, color='g', linestyle='--')
plt.axhline(y=upper, color='g', linestyle='--')
plt.ylim([2250, 3750])
plt.figure(figsize = (20, 10))
thresholds = np.zeros((48, 2))
lever_number = 0
for i in all_sensitivities.T: # Calculate lever values corresponding to thresholds
temp = []
pos = []
count = 0
for j in i:
if j<upper and j>lower:
temp.append(j)
pos.append(round(x_lever[count], 2))
count += 1
thresholds[lever_number, :] = [pos[temp.index(max(temp))], pos[temp.index(min(temp))]]
plt.plot(pos, temp)
plt.title("Temperature values within thresholds")
plt.xlabel("Lever position")
plt.ylabel("GtCO2 per capita")
lever_number+=1
plt.figure(figsize = (20, 20))
count = 0
for i in thresholds:
plt.plot(np.linspace(i[0], i[1], 10), np.linspace(count, count, 10))
count += 1
plt.yticks(np.arange(48), list(dfs_3.iloc[:, 0].to_numpy()), fontsize = 20)
plt.title("Lever ranges that meet temperature thresholds")
plt.xlabel("Value range")
plt.ylabel("Lever")
### End of plotting routine ###
return thresholds
def lever_step(lever_value, thresholds):
"""Naive modification of the Metropolis Hastings algorithm - moves a lever randomly up or down by 0.1. Return the new lever value"""
move = -0.
prob = random.randint(0, 100)/100 # Generate random number
if prob < 0.5: move = -0.1 # Move lever down
else: move = 0.1 # Move lever up
# If the lever value is out of bounds, reverse direction of step
if (lever_value + move < thresholds[0]) or (lever_value + move > thresholds[1]):
move = -move
return round(lever_value + move, 3)
def cost_sensitivity():
"""
Analysis of GDP sensitivity to changes in the inputs.
Sets all levers to 2 and moves each lever to 3 at a time,
reading its corresponding output.
"""
for lever in dfs_3.iloc[:, 0]: # Set all levers to 2
move_lever([lever], [2])
costs_sensitivity = []
for lever in dfs_3.iloc[:, 0]: # Move each lever to 3 at a time
print("Moving lever: ", lever)
costs_temp = move_lever([lever], [3], costs = True)[1]
costs_sensitivity.append(costs_temp)
print("Marginal cost: ", costs_temp)
print("Returning lever back to normal... \n")
move_lever([lever], [2], costs = False) # Put the lever back to 2
reference = move_lever(['Calories consumed'], [2], costs = True)[1] # Read the benchmark cost
data = {'Lever': list(dfs_3.iloc[:, 0].to_numpy()), # Dictionary containing costs and lever names
'Marginal cost': costs_sensitivity
}
costs_df = pd.DataFrame(data, columns = ['Lever', 'Marginal cost']) # Put cost values into dataframe
costs_df = costs_df.sort_values(by=['Marginal cost'], ascending = False) # Sort costs
costs_df.iloc[0, 1] = -0.08 # Truncate first value (very high, reverses direction of GDP, leading to bug)
costs_df = costs_df.sort_values(by=['Marginal cost'], ascending = False)
costs_df.iloc[-1, 1] = 0.46
costs_df = costs_df.sort_values(by=['Marginal cost'], ascending = True)
costs_df['Marginal cost'] = costs_df['Marginal cost'] - reference # Calculate cost change wrt benchmark
### Plotting routine ###
plt.figure(figsize = (20, 10))
plt.xticks(rotation=45, horizontalalignment='right')
plt.bar(costs_df.iloc[:, 0], costs_df.iloc[:, 1])
plt.ylabel("$\Delta$GDP decrease")
plt.title("∆GDP decrease with respect to TIAM-UCL 2DS benchmark pathway – Moving each lever from 2 to 3")
### End of plotting routine ###
return
def set_to_benchmark():
"""Set Global Calculator to TIMA-UCL 2DS's benchmark pathway"""
driver.get('http://tool.globalcalculator.org/globcalc.html?levers=l2wz222CBpp3pC3f2Dw3DC3plzgj1tA13pp2p223ri11111p22211111111/dashboard/en')
id_box = driver.find_element_by_id('lets-start') # Bypass "Start now" screen
id_box.click()
return
def random_lever_value(lever_name):
"""Moves a given lever (lever_name) to a random position between 1 and 3.9"""
rand_val = random.randint(10, 39)/10 # Generate random value between 1 and 3.9
return move_lever([lever_name], [round(rand_val, 2)], costs = True) # Move lever and return CO2 and GDP values
def new_lever_combination():
"""Returns an array containing a random value for each lever"""
random_lever_values = []
for i in range(len(lever_names)):
random_lever_values.append(random.randint(10, 39)/10) # Generate random lever value
return random_lever_values
def generate_mu_proposal_2D(all_levers_current, all_thresholds, address = str(driver.current_url)):
"""Used in MCMC. Takes arrays containing all current values and thresholds and generates a new mu proposal"""
for i in range(len(lever_names)): # Take discrete MH step for each lever
all_levers_current[i] = lever_step(all_levers_current[i], all_thresholds[i])
# Pass list with all lever names and current values. Read temperature and costs.
output = move_lever(lever_names, all_levers_current, costs = True, address = address)
return all_levers_current, output
def save_all():
"""Save all accepted lever combinations, temperature and cost values, trace and probability values to a .XLSX file"""
df1 = pd.DataFrame(np.array(posterior[:-1])); # Dataframe with posterior
df1['2'] = rate # Append rate to it
writer = pd.ExcelWriter('MCMC_output_1.xlsx', engine='openpyxl') # Open Excel file
writer.book = load_workbook('MCMC_output_1.xlsx') # Load current workbook
writer.sheets = dict((ws.title, ws) for ws in writer.book.worksheets) # Load all sheets
reader = pd.read_excel(r'MCMC_output_1.xlsx') # Read current file
df1.to_excel(writer,index=False,header=False,startrow=len(reader)+1) # Write out the new sheet
writer.close() # Close Excel file
df2 = pd.DataFrame(np.array(accepted_inputs)); # Dataframe with accepted lever combinations
df2['48'] = np.array(accepted_values)[:, 0]; df2['49'] = np.array(accepted_values)[:, 1]; # Append accepted temperature and cost values
writer = pd.ExcelWriter('MCMC_output_2.xlsx', engine='openpyxl') # Open Excel file
writer.book = load_workbook('MCMC_output_2.xlsx') # Load current workbook
writer.sheets = dict((ws.title, ws) for ws in writer.book.worksheets) # Load all sheets
reader = pd.read_excel(r'MCMC_output_2.xlsx') # Read current file
df2.to_excel(writer,index=False,header=False,startrow=len(reader)+1) # Write out the new sheet
writer.close() # Close Excel file
return
def set_lever(target, lever_name):
"""Set a given lever (lever_name) to a value (target) by clicking on it - Using a minimum number of clicks."""
n_clicks = 0 # Set to 0 by default (<=> do nothing)
current = driver.find_element_by_id(read_lever(lever_name)) # Get lever id
current = float(current.get_attribute('textContent')) # Read current lever value
# Two possibilities: same box, or different box
jump = math.trunc(target) - math.trunc(current)
diff = target - current
# If the lever is already set
if target == current:
# print("Current value = Target value")
box_number = math.trunc(current)
# Same box -> 2 possibilities: up or down (down can hit boundary or not)
elif jump == 0:
#print("Same box case")
# Up
# Non boundary
box_number = math.trunc(current) + 1 # Current box
if diff > 0:
#print("Lever up")
#print("Non boundary")
n_clicks = int(((current - math.trunc(current)) + (math.trunc(target) + 1 - target))*10)
# Down
elif diff < 0:
#print("Lever down")
# Non boundary
if target%math.trunc(target) != 0:
#print("Non boundary")
n_clicks = int(round(abs(diff*10)))
# Boundary: click previous level, then current
else:
#print("Boundary")
n_clicks = 0 # Clicking done here (do not click later on)
# Watch out for boundary case: box number 1
if math.trunc(current) == 1:
#print("Special case = 1")
userid_element = driver.find_elements_by_xpath(find_lever_ref(lever_name, box = 1))[0]
userid_element.click()
else:
userid_element = driver.find_elements_by_xpath(find_lever_ref(lever_name, box = box_number - 1))[0]
userid_element.click()
# Different box -> 2 possibilities: up or down (each can be boundary or non boundary)
elif jump != 0:
#print ("Different box case")
box_number = math.trunc(current) + 1 # Current box (default)
# Up
if diff > 0:
#print("Lever up")
# Boundary
if target%math.trunc(target) == 0:
if jump == 1:
#print("Special case - Different box, boundary closest box")
userid_element = driver.find_elements_by_xpath(find_lever_ref(lever_name, box = box_number+1))[0]
userid_element.click()
box_number = target
n_clicks = 1
else:
#print("Boundary")
box_number = target
n_clicks = 1
# Non boundary
else:
#print("Non boundary")
box_number = math.trunc(target) + 1
userid_element = driver.find_elements_by_xpath(find_lever_ref(lever_name, box = box_number))[0]
userid_element.click()
n_clicks = int(round((math.trunc(target) + 1 - target)*10))
# Down
elif diff < 0:
#print("Lever down")
# Boundary
if target%math.trunc(target) == 0:
#print("Boundary")
box_number = target
n_clicks = 1
# Non boundary
else:
#print("Non boundary")
box_number = math.trunc(target) + 1
userid_element = driver.find_elements_by_xpath(find_lever_ref(lever_name, box = box_number))[0]
userid_element.click()
n_clicks = int(round((math.trunc(target) + 1 - target)*10))
userid_element = driver.find_elements_by_xpath(find_lever_ref(lever_name, box = box_number))[0]
#print("Number of clicks: ", n_clicks)
for i in range(n_clicks):
userid_element.click()
time.sleep(0.25)
print("CO2 emissions: ", read_CO2(), " \t Meets 2C target?", int(read_CO2()[:4]) < 3010)
driver.find_element_by_xpath('/html/body/table[1]/tbody/tr/td/table/tbody/tr[1]/td/table/tbody/tr[1]/td[1]').click()
# move mouse away to avoid collisions
return
```
|
{
"source": "jg-725/IS219-FlaskAppProject",
"score": 3
}
|
#### File: app/cli/__init__.py
```python
import os
import click
from flask.cli import with_appcontext
from app.db import db
@click.command(name='create-db')
@with_appcontext
def create_database():
# get root directory of project
root = os.path.dirname(os.path.abspath(__file__))
# set the name of the apps log folder to logs
dbdir = os.path.join(root, '../../database')
# make a directory if it doesn't exist
if not os.path.exists(dbdir):
os.mkdir(dbdir)
db.create_all()
```
#### File: app/logging_config/__init__.py
```python
import logging
import os
from logging.config import dictConfig
import flask
from flask import request, current_app
#from app.logging_config.log_formatters import RequestFormatter
from app import config
log_con = flask.Blueprint('log_con', __name__)
@log_con.after_app_request
def after_request_logging(response):
if request.path == '/favicon.ico':
return response
elif request.path.startswith('/static'):
return response
elif request.path.startswith('/bootstrap'):
return response
return response
@log_con.before_app_first_request
def setup_logs():
# set the name of the apps log folder to logs
logdir = config.Config.LOG_DIR
# make a directory if it doesn't exist
if not os.path.exists(logdir):
os.mkdir(logdir)
logging.config.dictConfig(LOGGING_CONFIG)
LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'default': {
'level': 'DEBUG',
'formatter': 'standard',
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout', # Default is stderr
},
'file.handler': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'standard',
'filename': os.path.join(config.Config.LOG_DIR,'handler.log'),
'maxBytes': 10000000,
'backupCount': 5,
},
'file.handler.myapp': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'standard',
'filename': os.path.join(config.Config.LOG_DIR,'myapp.log'),
'maxBytes': 10000000,
'backupCount': 5,
},
'file.handler.request': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'standard',
'filename': os.path.join(config.Config.LOG_DIR,'request.log'),
'maxBytes': 10000000,
'backupCount': 5,
},
'file.handler.errors': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'standard',
'filename': os.path.join(config.Config.LOG_DIR,'errors.log'),
'maxBytes': 10000000,
'backupCount': 5,
},
'file.handler.sqlalchemy': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'standard',
'filename': os.path.join(config.Config.LOG_DIR,'sqlalchemy.log'),
'maxBytes': 10000000,
'backupCount': 5,
},
'file.handler.werkzeug': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'standard',
'filename': os.path.join(config.Config.LOG_DIR,'werkzeug.log'),
'maxBytes': 10000000,
'backupCount': 5,
},
},
'loggers': {
'': { # root logger
'handlers': ['default','file.handler'],
'level': 'DEBUG',
'propagate': True
},
'__main__': { # if __name__ == '__main__'
'handlers': ['default','file.handler'],
'level': 'DEBUG',
'propagate': True
},
'werkzeug': { # if __name__ == '__main__'
'handlers': ['file.handler.werkzeug'],
'level': 'DEBUG',
'propagate': False
},
'sqlalchemy.engine': { # if __name__ == '__main__'
'handlers': ['file.handler.sqlalchemy'],
'level': 'INFO',
'propagate': False
},
'myApp': { # if __name__ == '__main__'
'handlers': ['file.handler.myapp'],
'level': 'DEBUG',
'propagate': False
},
'myerrors': { # if __name__ == '__main__'
'handlers': ['file.handler.errors'],
'level': 'DEBUG',
'propagate': False
},
}
}
```
#### File: IS219-FlaskAppProject/tests/test_denying_access.py
```python
def test_deny_access(bad_user):
assert bad_user.email == '<EMAIL>'
assert bad_user.password == '<PASSWORD>'
response = bad_user.get("/")
assert response.status_code == 302
```
#### File: IS219-FlaskAppProject/tests/test_successful_login.py
```python
from flaskr import flaskr
def login(client, email, password):
return client.post('/login', data=dict(
email=email,
password=password
), follow_redirects=True)
def test_successful_login(client):
response = login(client, flaskr.app.config['USERNAME'], flaskr.app.config['PASSWORD'])
# Check the bad email/username.
assert b'Congrats, You were logged in!' in response.data
```
#### File: IS219-FlaskAppProject/tests/test_successful_registration.py
```python
def test_successful_register(successful_registration):
assert successful_registration.email == '<EMAIL>'
assert successful_registration.password == 'Password'
assert successful_registration.confirm == 'Password'
response = successful_registration.get("/dashboard")
assert response.status_code == 302
assert b'Congrats, registration success' in response.data
```
|
{
"source": "jg8481/leon",
"score": 2
}
|
#### File: Workshop-Examples/Shared-Resources/TimeTrackingListener.py
```python
class TimeTrackingListener(object):
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
ROBOT_LISTENER_API_VERSION = 2
def __init__(self):
self.ROBOT_LIBRARY_LISTENER = self
def _end_test(self, name, attrs):
print ('%s => status: %s, elapsed time: %s ms' % (name, attrs['status'], attrs['elapsedtime']))
```
#### File: packages/robotframework-test-assistant/robotframework-test-assistant.py
```python
import utils
import os
import os.path
import sys
import subprocess
import re
import time
filepath = os.path.dirname(os.path.realpath(__file__))
small_time_delay = 5 ##--> Use this to set up your small time delay. This time delay is in seconds.
medium_time_delay = 20 ##--> Use this to set up your medium time delay. This time delay is in seconds.
large_time_delay = 600 ##--> Use this to set up your large time delay. This time delay is in seconds.
def Clean_Up_Results(string, entities):
"""Leon will clean up the results folder"""
subprocess.call(filepath + '/robotframework-runner.sh Clean-Up-Results', shell=True)
return utils.output('end', 'clean_up_results_ran', utils.translate('clean_up_results_ran'))
def Check_One(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Check-One', shell=True)
return utils.output('end', 'single_check_ran', utils.translate('single_check_ran'))
def Check_Two(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Check-Two', shell=True)
return utils.output('end', 'single_check_ran', utils.translate('single_check_ran'))
def Check_Three(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Check-Three', shell=True)
return utils.output('end', 'single_check_ran', utils.translate('single_check_ran'))
def Check_Four(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Check-Four', shell=True)
return utils.output('end', 'single_check_ran', utils.translate('single_check_ran'))
def Set_Up_Runner_One(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Runner-One', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Check_One(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-One', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Check_Two(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Two', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Check_Three(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Three', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Check_Four(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Four', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Custom_Runner_One(string, entities):
"""Leon will start a custom Robot Framework automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Custom-Runner-One', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Display_Runner_One(string, entities):
"""Leon will display the results of the Robot Framework automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Display-Runner-One', shell=True)
return utils.output('end', 'display_results', utils.translate('display_results'))
def Group_One(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Group-One', shell=True)
return utils.output('end', 'parallel_checks_ran', utils.translate('parallel_checks_ran'))
def Group_Two(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Group-Two', shell=True)
return utils.output('end', 'parallel_checks_ran', utils.translate('parallel_checks_ran'))
def Group_Three(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Group-Three', shell=True)
return utils.output('end', 'parallel_checks_ran', utils.translate('parallel_checks_ran'))
def Group_Four(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Group-Four', shell=True)
return utils.output('end', 'parallel_checks_ran', utils.translate('parallel_checks_ran'))
def Set_Up_Runner_Two(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Runner-Two', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Group_One(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-One', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Group_Two(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-Two', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Group_Three(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-Three', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Group_Four(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-Four', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Custom_Runner_Two(string, entities):
"""Leon will start a custom Robot Framework automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Custom-Runner-Two', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Display_Runner_Two(string, entities):
"""Leon will display the results of the Robot Framework automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Display-Runner-Two', shell=True)
return utils.output('end', 'display_results', utils.translate('display_results'))
def Slack_Notification_Send_All(string, entities):
"""Leon will send the console log results of the Robot Framework automated check runs to Slack"""
subprocess.call(filepath + '/robotframework-runner.sh Slack-Notification-Send-All', shell=True)
return utils.output('end', 'notify_the_team', utils.translate('notify_the_team'))
def Build_Docker_Containers(string, entities):
"""Leon will build Docker Containers for running Robot Framework scripts"""
subprocess.call(filepath + '/robotframework-runner.sh Build-Docker-Containers', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Clean_Up_Docker_Containers(string, entities):
"""Leon will stop and remove Docker Containers"""
subprocess.call(filepath + '/robotframework-runner.sh Clean-Up-Docker-Containers', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Robot_Framework_Docker_API_Checks(string, entities):
"""Leon will run Robot Framework scripts from within Docker Containers that run locally"""
subprocess.call(filepath + '/robotframework-runner.sh Robot-Framework-Docker-API-Checks', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Robot_Framework_Docker_Random_Order_API_Checks(string, entities):
"""Leon will run Robot Framework scripts from within Docker Containers that run locally"""
subprocess.call(filepath + '/robotframework-runner.sh Robot-Framework-Docker-Random-Order-API-Checks', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Robot_Framework_Docker_MBT_Graphwalker_Checks(string, entities):
"""Leon will run Robot Framework scripts from within Docker Containers that run locally"""
subprocess.call(filepath + '/robotframework-runner.sh Robot-Framework-Docker-MBT-Graphwalker-Checks', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Display_Current_MBT_Graphwalker_Path(string, entities):
"""Leon will display the results of the current Graphwalker Path generated by the Robot Framework Docker Container"""
subprocess.call(filepath + '/robotframework-runner.sh Display-Current-MBT-Graphwalker-Results', shell=True)
return utils.output('end', 'display_results', utils.translate('display_results'))
def Run_Same_Robot_Framework_Docker_MBT_Graphwalker_Checks_Again(string, entities):
"""Leon will run Robot Framework scripts from within Docker Containers that run locally"""
subprocess.call(filepath + '/robotframework-runner.sh Run-Same-Robot-Framework-Docker-MBT-Graphwalker-Checks-Again', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Robot_Framework_Selenium_Desktop_Web_Checks(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Robot-Framework-Selenium-Desktop-Web-Checks', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Start_Remote_API_Check_Process_Webhook_Docker_Container(string, entities):
"""Leon will start a Docker Container for running remote Robot Framework scripts triggered by a webhook"""
subprocess.call(filepath + '/robotframework-runner.sh Start-Remote-API-Check-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Start_Remote_Selenium_Process_Webhook_Container(string, entities):
"""Leon will start a Docker Container for running remote Robot Framework scripts triggered by a webhook"""
subprocess.call(filepath + '/robotframework-runner.sh Start-Remote-Selenium-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Trigger_Remote_API_Check_Process_Webhook_Docker_Container(string, entities):
"""Leon will trigger a Docker Container for running remote Robot Framework scripts using a webhook"""
subprocess.call(filepath + '/robotframework-runner.sh Trigger-Remote-API-Check-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Trigger_Remote_Selenium_Process_Webhook_Container(string, entities):
"""Leon will trigger a Docker Container for running remote Robot Framework scripts using a webhook"""
subprocess.call(filepath + '/robotframework-runner.sh Trigger-Remote-Selenium-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Trigger_Both_Webhook_Docker_Containers_For_Parallel_Run(string, entities):
"""Leon will trigger a Docker Container for running remote Robot Framework scripts using a webhook"""
subprocess.call(filepath + '/robotframework-runner.sh Trigger-Both-Webhook-Docker-Containers-For-Parallel-Run', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Custom_Tasks_And_Suites_Runner(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Custom-Tasks-And-Suites-Runner', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('multiple_checks_ran'))
def Set_Up_Custom_Tasks_And_Suites_Runner(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Custom-Tasks-And-Suites-Runner', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Slack_Notification_Send_All(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Slack-Notification-Send-All', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Build_Docker_Containers(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Build-Docker-Containers', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Clean_Up_Docker_Containers(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Clean-Up-Docker-Containers', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Start_Remote_API_Check_Process_Webhook_Docker_Container(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Start-Remote-API-Check-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Start_Remote_Selenium_Process_Webhook_Docker_Container(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Start-Remote-Selenium-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Trigger_Remote_API_Check_Process_Webhook_Docker_Container(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Trigger-Remote-API-Check-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Trigger_Remote_Selenium_Process_Webhook_Docker_Container(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Trigger-Remote-Selenium-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Trigger_Both_Webhook_Docker_Containers_For_Parallel_Run(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Trigger-Both-Webhook-Docker-Containers-For-Parallel-Run', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Generate_Bug_Risk_Prediction_Scores_For_A_GitHub_Repo(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Generate-Bug-Risk-Prediction-Scores-For-A-GitHub-Repo', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Display_Custom_Tasks_And_Suites_Runner(string, entities):
"""Leon will display the results of the Robot Framework automated RPA tasks run"""
subprocess.call(filepath + '/robotframework-runner.sh Display-Custom-Tasks-And-Suites-Runner', shell=True)
return utils.output('end', 'display_results', utils.translate('display_results'))
def Generic_Customizable_Time_Delayed_Runner_One(string, entities):
"""Leon will set up a time delayed generic task runner"""
##--> Suggestion: Feel free to change the time.sleep to small_time_delay, medium_time_delay or large_time_delay.
time.sleep(small_time_delay)
##--> Suggestion: Feel free to set the following subprocess.call to any of the previously defined commands in this robotframework-test-assistant.py leon-ai module. The following is just an example triggering a single time delayed check.
subprocess.call(filepath + '/robotframework-runner.sh Check-One', shell=True)
return utils.output('end', 'generic_time_delayed_task_ran', utils.translate('generic_time_delayed_task_ran'))
def Generic_Customizable_Time_Delayed_Runner_Two(string, entities):
"""Leon will set up a time delayed generic task runner"""
##--> Suggestion: Feel free to change the time.sleep to small_time_delay, medium_time_delay or large_time_delay.
time.sleep(small_time_delay)
##--> Suggestion: Feel free to set the following subprocess.call to any of the previously defined commands in this robotframework-test-assistant.py leon-ai module. The following example builds off of a previously created Custom_Runner_Two .csv file.
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Runner-One', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Three', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Two', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Three', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-One', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Custom-Runner-One', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Display-Runner-One', shell=True)
return utils.output('end', 'generic_time_delayed_task_ran', utils.translate('generic_time_delayed_task_ran'))
def Generic_Customizable_Time_Delayed_Runner_Three(string, entities):
"""Leon will set up a time delayed generic task runner"""
##--> Suggestion: Feel free to change the time.sleep to small_time_delay, medium_time_delay or large_time_delay.
time.sleep(small_time_delay)
##--> Suggestion: Feel free to set the following subprocess.call to any of the previously defined commands in this robotframework-test-assistant.py leon-ai module. The following example will chain together the commands for a new Custom_Runner_One .csv file, runs it, and displays results.
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Custom-Tasks-And-Suites-Runner', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Clean-Up-Docker-Containers', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Build-Docker-Containers', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Generate-Bug-Risk-Prediction-Scores-For-A-GitHub-Repo', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Custom-Tasks-And-Suites-Runner', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Display-Custom-Tasks-And-Suites-Runner', shell=True)
return utils.output('end', 'generic_time_delayed_task_ran', utils.translate('generic_time_delayed_task_ran'))
def Generic_Customizable_On_Demand_Runner(string, entities):
"""Leon will set up a generic on-demand task runner"""
##--> Suggestion: Feel free to set the following subprocess.call to any of the previously defined commands in this robotframework-test-assistant.py leon-ai module. The following example will chain together the commands for all of the custom runners and sends notifications to the team.
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Custom-Tasks-And-Suites-Runner', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Clean-Up-Docker-Containers', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Build-Docker-Containers', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Generate-Bug-Risk-Prediction-Scores-For-A-GitHub-Repo', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Custom-Tasks-And-Suites-Runner', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Runner-One', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Three', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Two', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Four', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-One', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Custom-Runner-One', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Robot-Framework-Selenium-Desktop-Web-Checks', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Runner-Two', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-Two', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-One', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-Four', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-Three', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Custom-Runner-Two', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Robot-Framework-Docker-MBT-Graphwalker-Checks', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Custom-Tasks-And-Suites-Runner', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Clean-Up-Docker-Containers', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Build-Docker-Containers', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Start-Remote-API-Check-Process-Webhook-Docker-Container', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Start-Remote-Selenium-Process-Webhook-Docker-Container', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Trigger-Both-Webhook-Docker-Containers-For-Parallel-Run', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Trigger-Remote-Selenium-Process-Webhook-Docker-Container', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Trigger-Remote-API-Check-Process-Webhook-Docker-Container', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Custom-Tasks-And-Suites-Runner', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Gather-All-Robot-Framework-Test-Results-And-Deploy-Dashboard-To-Heroku', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Slack-Notification-Send-All', shell=True)
return utils.output('end', 'generic_on_demand_task_ran', utils.translate('generic_on_demand_task_ran'))
def Gather_All_Robot_Framework_Test_Results_And_Deploy_Dashboard_To_Heroku(string, entities):
"""Leon will run Robot Framework ReBot and Git commands to deploy a results file to Heroku"""
subprocess.call(filepath + '/robotframework-runner.sh Gather-All-Robot-Framework-Test-Results-And-Deploy-Dashboard-To-Heroku', shell=True)
return utils.output('end', 'gathered_test_results_and_deployed_dashboard_to_heroku', utils.translate('gathered_test_results_and_deployed_dashboard_to_heroku'))
def Help_Confused_Users(string, entities):
"""Leon will try to help confused users who don't know how to use this leon-ai package"""
return utils.output('end', 'help_confused_users', utils.translate('help_confused_users'))
def Jira_Task_Runner(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Jira-Task-Runner', shell=True)
return utils.output('end', 'single_check_ran', utils.translate('single_check_ran'))
```
|
{
"source": "jg8481/pycharles",
"score": 3
}
|
#### File: pycharles/pycharles/session.py
```python
import request
import json
def _assign_indexes(charles_requests):
current_index = 0
for _ in charles_requests:
_.index = current_index
current_index += 1
class CharlesSession(object):
"""Charles session object, initialized with a path to the file.
At its core, the session is a list of request objects, which makes it easy to fool around with.
"""
def __init__(self, request_list=list(), path=None):
if len(request_list) == 0 and path is not None:
with open(path, 'r') as f:
self._all_requests = [request.CharlesRequest(r) for r in json.load(f)]
elif len(request_list) > 0 and path is None:
self._all_requests = [request.CharlesRequest(r) for r in request_list]
else:
self.fail('only one initialization parameter should be specified')
_assign_indexes(self._all_requests)
def requests_count(self):
return len(self._all_requests)
def query_request_with_index(self, index):
"""Query and return the request at the specified index.
Parameters
----------
index: the index of the request
Returns
-------
the query result as a charles request.
"""
assert self._all_requests[index].index == index
return self._all_requests[index]
def query_requests_with_properties(self, properties):
def _request_key_matches_query(r, k, v):
if k[-1] == '.':
self.fail('cannot end property key with \'.\'')
if not k.__contains__('.'):
return r[k] == v
nested_keys = k.split('.') # will *always* be at least 2 keys in length
property_value = r[nested_keys[0]] # enumerate once
for nested_key in nested_keys[1:]:
property_value = property_value[nested_key]
return property_value == v
"""Query and return requests with the defined properties in a CharlesSession object.
Parameters
----------
properties: dict with properties to query.
example: {
"host": "httpbin.org",
"ssl.protocol": "TLSv1.2"
}
Returns
-------
result: the query result as a new session.
if the example is used as an argument, all requests using TLSv1.2 with the host httpbin.org will be returned
"""
result = list()
for charles_request in [_charles_request for _charles_request in self._all_requests]:
for k, v in properties.items():
if _request_key_matches_query(charles_request.request_dict, k, v):
result.append(charles_request.request_dict)
return CharlesSession(request_list=result)
def delete_request_at(self, index):
"""Delete the request at the specified index"""
assert self._all_requests[index].index == index
del self._all_requests[index]
def save(self, path):
"""Query and return requests with the defined properties in a CharlesSession object.
Parameters
----------
path: path to save the new session file.
"""
bag = [charles_request.request_dict for charles_request in self._all_requests]
with open(path, 'w+') as f:
json.dump(bag, f)
def fail(self, msg):
print('CharlesSession instance failed with error: {}'.format(msg))
exit(-1)
def print_json(self):
print(json.dumps(self._all_requests, indent=4))
```
|
{
"source": "jg8610/jack",
"score": 3
}
|
#### File: io/embeddings/glove.py
```python
import logging
import numpy as np
logger = logging.getLogger(__name__)
def load_glove(stream, vocab=None):
"""Loads GloVe file and merges it if optional vocabulary
Args:
stream (iterable): An opened filestream to the GloVe file.
vocab (dict=None): Word2idx dict of existing vocabulary.
Returns:
return_vocab (Vocabulary), lookup (matrix); Vocabulary contains the
word2idx and the matrix contains the embedded words.
"""
logger.info('Loading GloVe vectors ..')
word2idx = {}
first_line = stream.readline()
dim = len(first_line.split()) - 1
lookup = np.empty([500000, dim], dtype=np.float)
lookup[0] = np.fromstring(first_line.split(maxsplit=1)[1], sep=' ')
word2idx[first_line.split(maxsplit=1)[0].decode('utf-8')] = 0
n = 1
for line in stream:
word, vec = line.rstrip().split(maxsplit=1)
if vocab is None or word in vocab and word not in word2idx:
word = word.decode('utf-8')
idx = len(word2idx)
word2idx[word] = idx
if idx > np.size(lookup, axis=0) - 1:
lookup.resize([lookup.shape[0] + 500000, lookup.shape[1]])
lookup[idx] = np.fromstring(vec, sep=' ')
n += 1
lookup.resize([len(word2idx), dim])
logger.info('Loading GloVe vectors completed.')
return word2idx, lookup
if __name__ == "__main__":
pickle_tokens = False
import zipfile
with zipfile.ZipFile('../data/GloVe/glove.840B.300d.zip') as zf:
with zf.open('glove.840B.300d.txt', 'r') as f:
vocab, lookup = load_glove(f)
# pickle token set
if pickle_tokens:
import pickle
glove_words = set(vocab.get_all_words())
pickle.dump(glove_words, open('./data/glove_tokens.pickle', 'wb'))
```
#### File: jack/io/FB15K2jtr.py
```python
from collections import defaultdict
import gc
import json
import argparse
def load_fb15k_triples(path):
"""
Loads the raw data from file provided.
Args:
path: path to the file
Returns: triples
"""
with open(path, 'r') as f:
triples = [line.strip('\n').split('\t') for line in f.readlines()]
return triples
def extract_unique_entities_and_relations(triples):
"""
Identifies unique entities and relation types in collection of triples.
Args:
triples: List of string triples.
Returns:
unique_entities: List of strings
unique_relations: List of strings
"""
s_entities = set([triple[0] for triple in triples])
o_entities = set([triple[2] for triple in triples])
r_types = set([triple[1] for triple in triples])
unique_relations = sorted(list(r_types))
unique_entities = sorted(list(s_entities | o_entities)) # union of sets
return unique_entities, unique_relations
def get_facts_per_entity(triples):
"""
Obtain dictionary with all train fact ids that contain an entity.
Args:
triples: List of fact triples
Returns:
Dictionary entity --> fact IDs it participates in
"""
d = defaultdict(set)
for i_triple, triple in enumerate(triples):
d[triple[0]].add(i_triple)
d[triple[2]].add(i_triple)
return d
def get_facts_per_relation(triples):
"""
Obtain dictionary with all train fact ids that contain a relation type.
Args:
triples: List of fact triples
Returns:
Dictionary relation type --> fact IDs it participates in
"""
d = defaultdict(set)
for i_triple, triple in enumerate(triples):
d[triple[1]].add(i_triple)
return d
def get_fact_neighbourhoods(triples, facts_per_entity, facts_per_relation,
include_relations=False):
"""
Extracts neighbouring facts for a collection of triples. neighbouring
facts of fact f are such facts that share at least an entity with f.
If relations are included, facts which share a relation are also considered
neighbours.
Args:
triples: list of facts triples
facts_per_entity: dictionary; The facts an entity appears in
facts_per_relation: dictionary; The facts a relation appears in
include_relations: boolean. whether facts sharing the relation should
be considered neighbours as well.
Returns:
fact_neighbourhoods: dictionary mapping fact ID to set of fact IDs.
"""
fact_neighbourhoods = defaultdict(set)
for i_triple, triple in enumerate(triples):
# get triple ids which share subject, object or rel. with current triple
subject_neighbours = facts_per_entity[triple[0]]
object_neighbours = facts_per_entity[triple[2]]
relation_neighbours = set()
if include_relations:
relation_neighbours = facts_per_relation[triple[1]]
fact_neighbourhoods[i_triple].update(subject_neighbours)
fact_neighbourhoods[i_triple].update(object_neighbours)
fact_neighbourhoods[i_triple].update(relation_neighbours)
return fact_neighbourhoods
def convert_fb15k2(triples, neighbourhoods, unique_entities):
"""
Converts into jack format.
Args:
triples: fact triples that should be converted.
neighbourhoods: dictionary of supporting facts per triple
unique_entities: List of strings
Returns:
jack formatted fb15k data.
"""
# figure out cases with multiple possible true answers
multiple_answers_dict = defaultdict(set)
for triple in triples:
multiple_answers_dict[triple[:2]].add(triple[2])
instances = []
for i, triple in enumerate(triples):
if not i % 1000:
# print(i)
gc.collect()
# correct answers for this (s,r,.) case
correct_answers = multiple_answers_dict[triple[:2]]
# obtain supporting facts for this triple
neighbour_ids = neighbourhoods[i]
neighbour_triples = [triples[ID] for ID in neighbour_ids]
# create a single jack instance
qset_dict = {}
support_texts = [" ".join([str(s), str(r), str(o)]) for (s, r, o) in neighbour_triples]
qset_dict['support'] = [{'text': t} for t in support_texts]
qset_dict['questions'] = [{
"question": " ".join([str(triple[0]), str(triple[1])]), # subject and relation
"candidates": [], # use global candidates instead.
"answers": [{'text': str(a)} for a in correct_answers] # object
}]
instances.append(qset_dict)
return {
'meta': 'FB15K with entity neighbours as supporting facts.',
'globals': {
'candidates': [{'text': str(i)} for (i, u) in enumerate(unique_entities)]
},
'instances': instances
}
def compress_triples(string_triples, unique_entities, unique_relations):
id_triples = []
dict_unique_entities = {elem: i for i, elem in enumerate(unique_entities)}
dict_unique_relations = {elem: i for i, elem in enumerate(unique_relations)}
for (s, r, o) in string_triples:
s_id = dict_unique_entities[s]
r_id = dict_unique_relations[r]
o_id = dict_unique_entities[o]
id_triples.append((s_id, r_id, o_id))
return id_triples
def main():
parser = argparse.ArgumentParser(description='FB15K2 dataset to jack format converter.')
#
parser.add_argument('infile',
help="dataset path you're interested in, train/dev/test."
"(e.g. data/FB15k/FB15k/freebase_mtr100_mte100-train.txt)")
parser.add_argument('reffile',
help="reference file - use training set path here.")
parser.add_argument('outfile',
help="path to the jack format -generated output file (e.g. data/FB15K2/FB15k_train.jack.json)")
# parser.add_argument('dataset', choices=['cnn', 'dailymail'],
# help="which dataset to access: cnn or dailymail")
# parser.add_argument('split', choices=['train', 'dev', 'test'],
# help="which split of the dataset to io: train, dev or test")
args = parser.parse_args()
# load data from files into fact triples
triples = load_fb15k_triples(args.infile)
reference_triples = load_fb15k_triples(args.reffile)
# unique entity and relation types in reference triples
unique_entities, unique_relations = \
extract_unique_entities_and_relations(reference_triples)
# represent string triples with numeric IDs for entities and relations
triples = compress_triples(triples, unique_entities, unique_relations)
reference_triples = compress_triples(reference_triples, unique_entities, unique_relations)
# get neighbouring facts for each fact in triples
facts_per_entity = get_facts_per_entity(reference_triples)
facts_per_relation = get_facts_per_relation(reference_triples)
neighbourhoods = get_fact_neighbourhoods(triples, facts_per_entity, facts_per_relation)
# dump the entity and relation ids for understanding the jack contents.
with open('fb15k_entities_relations.json', 'w') as f:
d = {"unique_entities": unique_entities,
"unique_relations": unique_relations}
json.dump(d, f)
corpus = convert_fb15k2(triples, neighbourhoods, unique_entities)
with open(args.outfile, 'w') as outfile:
json.dump(corpus, outfile, indent=2)
if __name__ == "__main__":
main()
```
#### File: extractive_qa/tensorflow/answer_layer.py
```python
import tensorflow as tf
from jack.tfutil import misc
from jack.tfutil import sequence_encoder
from jack.tfutil.segment import segment_top_k
def answer_layer(encoded_question, question_length, encoded_support, support_length,
support2question, answer2support, is_eval, correct_start=None, beam_size=1, max_span_size=10000,
encoder=None, module='bilinear', repr_dim=100, **kwargs):
if module == 'bilinear':
return bilinear_answer_layer(
repr_dim, encoded_question, question_length, encoded_support, support_length,
support2question, answer2support, is_eval, beam_size, max_span_size)
elif module == 'mlp':
return mlp_answer_layer(repr_dim, encoded_question, question_length, encoded_support, support_length,
support2question, answer2support, is_eval, beam_size, max_span_size)
elif module == 'conditional':
return conditional_answer_layer(
repr_dim, encoded_question, question_length, encoded_support, support_length,
correct_start, support2question, answer2support, is_eval, beam_size, max_span_size)
elif module == 'conditional_bilinear':
return conditional_answer_layer(
repr_dim, encoded_question, question_length, encoded_support, support_length,
correct_start, support2question, answer2support, is_eval, beam_size, max_span_size, bilinear=True)
elif module == 'conditional_bilinear':
return conditional_answer_layer(
repr_dim, encoded_question, question_length, encoded_support, support_length,
correct_start, support2question, answer2support, is_eval, beam_size, max_span_size, bilinear=True)
elif module == 'bidaf':
encoded_support_end = sequence_encoder.encoder(
encoded_support, support_length, name='encoded_support_end', **encoder)
encoded_support_end = tf.concat([encoded_support, encoded_support_end], 2)
return bidaf_answer_layer(encoded_support, encoded_support_end, support_length,
support2question, answer2support, is_eval, beam_size=1, max_span_size=10000)
else:
raise ValueError("Unknown answer layer type: %s" % module)
def compute_question_state(encoded_question, question_length):
attention_scores = tf.layers.dense(encoded_question, 1, name="question_attention")
q_mask = misc.mask_for_lengths(question_length)
attention_scores = attention_scores + tf.expand_dims(q_mask, 2)
question_attention_weights = tf.nn.softmax(attention_scores, 1, name="question_attention_weights")
question_state = tf.reduce_sum(question_attention_weights * encoded_question, 1)
return question_state
def bilinear_answer_layer(size, encoded_question, question_length, encoded_support, support_length,
support2question, answer2support, is_eval, beam_size=1,
max_span_size=10000):
"""Answer layer for multiple paragraph QA."""
# computing single time attention over question
size = encoded_support.get_shape()[-1].value
question_state = compute_question_state(encoded_question, question_length)
# compute logits
hidden = tf.gather(tf.layers.dense(question_state, 2 * size, name="hidden"), support2question)
hidden_start, hidden_end = tf.split(hidden, 2, 1)
support_mask = misc.mask_for_lengths(support_length)
start_scores = tf.einsum('ik,ijk->ij', hidden_start, encoded_support)
start_scores = start_scores + support_mask
end_scores = tf.einsum('ik,ijk->ij', hidden_end, encoded_support)
end_scores = end_scores + support_mask
return compute_spans(start_scores, end_scores, answer2support, is_eval, support2question,
beam_size, max_span_size)
def mlp_answer_layer(size, encoded_question, question_length, encoded_support, support_length,
support2question, answer2support, is_eval, beam_size=1, max_span_size=10000):
"""Answer layer for multiple paragraph QA."""
# computing single time attention over question
question_state = compute_question_state(encoded_question, question_length)
# compute logits
static_input = tf.concat([tf.gather(tf.expand_dims(question_state, 1), support2question) * encoded_support,
encoded_support], 2)
hidden = tf.gather(tf.layers.dense(question_state, 2 * size, name="hidden_1"), support2question)
hidden = tf.layers.dense(
static_input, 2 * size, use_bias=False, name="hidden_2") + tf.expand_dims(hidden, 1)
hidden_start, hidden_end = tf.split(tf.nn.relu(hidden), 2, 2)
support_mask = misc.mask_for_lengths(support_length)
start_scores = tf.layers.dense(hidden_start, 1, use_bias=False, name="start_scores")
start_scores = tf.squeeze(start_scores, [2])
start_scores = start_scores + support_mask
end_scores = tf.layers.dense(hidden_end, 1, use_bias=False, name="end_scores")
end_scores = tf.squeeze(end_scores, [2])
end_scores = end_scores + support_mask
return compute_spans(start_scores, end_scores, answer2support, is_eval, support2question,
beam_size, max_span_size)
def compute_spans(start_scores, end_scores, answer2support, is_eval, support2question,
beam_size=1, max_span_size=10000, correct_start=None):
max_support_length = tf.shape(start_scores)[1]
_, _, num_doc_per_question = tf.unique_with_counts(support2question)
offsets = tf.cumsum(num_doc_per_question, exclusive=True)
doc_idx_for_support = tf.range(tf.shape(support2question)[0]) - tf.gather(offsets, support2question)
def train():
gathered_end_scores = tf.gather(end_scores, answer2support)
gathered_start_scores = tf.gather(start_scores, answer2support)
if correct_start is not None:
# assuming we know the correct start we only consider ends after that
left_mask = misc.mask_for_lengths(tf.cast(correct_start, tf.int32), max_support_length, mask_right=False)
gathered_end_scores = gathered_end_scores + left_mask
predicted_start_pointer = tf.argmax(gathered_start_scores, axis=1, output_type=tf.int32)
predicted_end_pointer = tf.argmax(gathered_end_scores, axis=1, output_type=tf.int32)
return (start_scores, end_scores,
tf.gather(doc_idx_for_support, answer2support), predicted_start_pointer, predicted_end_pointer)
def eval():
# we collect spans for top k starts and top k ends and select the top k from those top 2k
doc_idx1, start_pointer1, end_pointer1, span_score1 = _get_top_k(
start_scores, end_scores, beam_size, max_span_size, support2question)
doc_idx2, end_pointer2, start_pointer2, span_score2 = _get_top_k(
end_scores, start_scores, beam_size, -max_span_size, support2question)
doc_idx = tf.concat([doc_idx1, doc_idx2], 1)
start_pointer = tf.concat([start_pointer1, start_pointer2], 1)
end_pointer = tf.concat([end_pointer1, end_pointer2], 1)
span_score = tf.concat([span_score1, span_score2], 1)
_, idx = tf.nn.top_k(span_score, beam_size)
r = tf.range(tf.shape(span_score)[0], dtype=tf.int32)
r = tf.reshape(tf.tile(tf.expand_dims(r, 1), [1, beam_size]), [-1, 1])
idx = tf.concat([r, tf.reshape(idx, [-1, 1])], 1)
doc_idx = tf.gather_nd(doc_idx, idx)
start_pointer = tf.gather_nd(start_pointer, idx)
end_pointer = tf.gather_nd(end_pointer, idx)
return (start_scores, end_scores, tf.gather(doc_idx_for_support, doc_idx), start_pointer, end_pointer)
return tf.cond(is_eval, eval, train)
def _get_top_k(scores1, scores2, k, max_span_size, support2question):
max_support_length = tf.shape(scores1)[1]
doc_idx, pointer1, topk_scores1 = segment_top_k(scores1, support2question, k)
# [num_questions * beam_size]
doc_idx_flat = tf.reshape(doc_idx, [-1])
pointer_flat1 = tf.reshape(pointer1, [-1])
# [num_questions * beam_size, support_length]
scores_gathered2 = tf.gather(scores2, doc_idx_flat)
if max_span_size < 0:
pointer_flat1, max_span_size = pointer_flat1 + max_span_size + 1, -max_span_size
left_mask = misc.mask_for_lengths(tf.cast(pointer_flat1, tf.int32),
max_support_length, mask_right=False)
right_mask = misc.mask_for_lengths(tf.cast(pointer_flat1 + max_span_size, tf.int32),
max_support_length)
scores_gathered2 = scores_gathered2 + left_mask + right_mask
pointer2 = tf.argmax(scores_gathered2, axis=1, output_type=tf.int32)
topk_score2 = tf.gather_nd(scores2, tf.stack([doc_idx_flat, pointer2], 1))
return doc_idx, pointer1, tf.reshape(pointer2, [-1, k]), topk_scores1 + tf.reshape(topk_score2, [-1, k])
def conditional_answer_layer(size, encoded_question, question_length, encoded_support, support_length,
correct_start, support2question, answer2support, is_eval, beam_size=1, max_span_size=10000,
bilinear=False):
question_state = compute_question_state(encoded_question, question_length)
question_state = tf.gather(question_state, support2question)
# Prediction
# start
if bilinear:
hidden_start = tf.layers.dense(question_state, size, name="hidden_start")
start_scores = tf.einsum('ik,ijk->ij', hidden_start, encoded_support)
else:
static_input = tf.concat([tf.expand_dims(question_state, 1) * encoded_support, encoded_support], 2)
hidden_start = tf.layers.dense(question_state, size, name="hidden_start_1")
hidden_start = tf.layers.dense(
static_input, size, use_bias=False, name="hidden_start_2") + tf.expand_dims(hidden_start, 1)
start_scores = tf.layers.dense(tf.nn.relu(hidden_start), 1, use_bias=False, name="start_scores")
start_scores = tf.squeeze(start_scores, [2])
support_mask = misc.mask_for_lengths(support_length)
start_scores = start_scores + support_mask
max_support_length = tf.shape(start_scores)[1]
_, _, num_doc_per_question = tf.unique_with_counts(support2question)
offsets = tf.cumsum(num_doc_per_question, exclusive=True)
doc_idx_for_support = tf.range(tf.shape(support2question)[0]) - tf.gather(offsets, support2question)
doc_idx, start_pointer = tf.cond(
is_eval,
lambda: segment_top_k(start_scores, support2question, beam_size)[:2],
lambda: (tf.expand_dims(answer2support, 1), tf.expand_dims(correct_start, 1)))
doc_idx_flat = tf.reshape(doc_idx, [-1])
start_pointer = tf.reshape(start_pointer, [-1])
start_state = tf.gather_nd(encoded_support, tf.stack([doc_idx_flat, start_pointer], 1))
start_state.set_shape([None, size])
encoded_support_gathered = tf.gather(encoded_support, doc_idx_flat)
question_state = tf.gather(question_state, doc_idx_flat)
if bilinear:
hidden_end = tf.layers.dense(tf.concat([question_state, start_state], 1), size, name="hidden_end")
end_scores = tf.einsum('ik,ijk->ij', hidden_end, encoded_support_gathered)
else:
end_input = tf.concat([tf.expand_dims(start_state, 1) * encoded_support_gathered,
tf.gather(static_input, doc_idx_flat)], 2)
hidden_end = tf.layers.dense(tf.concat([question_state, start_state], 1), size,
name="hidden_end_1")
hidden_end = tf.layers.dense(
end_input, size, use_bias=False, name="hidden_end_2") + tf.expand_dims(hidden_end, 1)
end_scores = tf.layers.dense(tf.nn.relu(hidden_end), 1, use_bias=False, name="end_scores")
end_scores = tf.squeeze(end_scores, [2])
end_scores = end_scores + tf.gather(support_mask, doc_idx_flat)
def train():
predicted_end_pointer = tf.argmax(end_scores, axis=1, output_type=tf.int32)
return start_scores, end_scores, doc_idx, start_pointer, predicted_end_pointer
def eval():
# [num_questions * beam_size, support_length]
left_mask = misc.mask_for_lengths(tf.cast(start_pointer, tf.int32),
max_support_length, mask_right=False)
right_mask = misc.mask_for_lengths(tf.cast(start_pointer + max_span_size, tf.int32),
max_support_length)
masked_end_scores = end_scores + left_mask + right_mask
predicted_ends = tf.argmax(masked_end_scores, axis=1, output_type=tf.int32)
return (start_scores, masked_end_scores,
tf.gather(doc_idx_for_support, doc_idx_flat), start_pointer, predicted_ends)
return tf.cond(is_eval, eval, train)
def bidaf_answer_layer(encoded_support_start, encoded_support_end, support_length,
support2question, answer2support, is_eval, beam_size=1, max_span_size=10000):
# BiLSTM(M) = M^2 = encoded_support_end
start_scores = tf.squeeze(tf.layers.dense(encoded_support_start, 1, use_bias=False), 2)
end_scores = tf.squeeze(tf.layers.dense(encoded_support_end, 1, use_bias=False), 2)
# mask out-of-bounds slots by adding -1000
support_mask = misc.mask_for_lengths(support_length)
start_scores = start_scores + support_mask
end_scores = end_scores + support_mask
return compute_spans(start_scores, end_scores, answer2support, is_eval,
support2question, beam_size=beam_size, max_span_size=max_span_size)
```
#### File: readers/knowledge_base_population/scores.py
```python
import abc
import sys
import tensorflow as tf
from jack.readers.knowledge_base_population.similarities import negative_l1_distance
class BaseModel(metaclass=abc.ABCMeta):
def __init__(self, subject_embeddings=None, predicate_embeddings=None, object_embeddings=None, *args, **kwargs):
"""
Abstract class inherited by all models.
Args:
subject_embeddings: (batch_size, entity_embedding_size) Tensor.
predicate_embeddings: (batch_size, predicate_embedding_size) Tensor.
object_embeddings: (batch_size, entity_embedding_size) Tensor.
"""
self.subject_embeddings = subject_embeddings
self.predicate_embeddings = predicate_embeddings
self.object_embeddings = object_embeddings
@abc.abstractmethod
def __call__(self):
raise NotImplementedError
@property
def parameters(self):
return []
class TranslatingModel(BaseModel):
def __init__(self, similarity_function=negative_l1_distance, *args, **kwargs):
"""
Implementation of the Translating Embeddings model [1].
[1] <NAME> al. - Translating Embeddings for Modeling Multi-relational Data - NIPS 2013
Args:
similarity_function: Similarity function.
"""
super().__init__(*args, **kwargs)
self.similarity_function = similarity_function
def __call__(self):
"""
:return: (batch_size) Tensor containing the scores associated by the models to the walks.
"""
translated_subject_embedding = self.subject_embeddings + self.predicate_embeddings
return self.similarity_function(translated_subject_embedding, self.object_embeddings)
class BilinearDiagonalModel(BaseModel):
def __init__(self, *args, **kwargs):
"""
Implementation of the Bilinear-Diagonal model [1]
[1] <NAME>. et al. - Embedding Entities and Relations for Learning and Inference in Knowledge Bases - ICLR 2015
Args:
similarity_function: Similarity function.
"""
super().__init__(*args, **kwargs)
def __call__(self):
"""
:return: (batch_size) Tensor containing the scores associated by the models to the walks.
"""
scaled_subject_embedding = self.subject_embeddings * self.predicate_embeddings
return tf.reduce_sum(scaled_subject_embedding * self.object_embeddings, axis=1)
class BilinearModel(BaseModel):
def __init__(self, *args, **kwargs):
"""
Implementation of the Bilinear model [1]
[1] <NAME> al. - A Three-Way Model for Collective Learning on Multi-Relational Data - ICML 2011
ArgS:
similarity_function: Similarity function.
"""
super().__init__(*args, **kwargs)
def __call__(self):
"""
:return: (batch_size) Tensor containing the scores associated by the models to the walks.
"""
es, emb_size = tf.expand_dims(self.subject_embeddings, 1), tf.shape(self.subject_embeddings)[1]
W = tf.reshape(self.predicate_embeddings, (-1, emb_size, emb_size))
sW = tf.matmul(es, W)[:, 0, :]
return tf.reduce_sum(sW * self.object_embeddings, axis=1)
class ComplexModel(BaseModel):
def __init__(self, *args, **kwargs):
"""
Implementation of the ComplEx model [1]
[1] <NAME> al. - Complex Embeddings for Simple Link Prediction - ICML 2016
Args:
embedding size: Embedding size.
"""
super().__init__(*args, **kwargs)
def __call__(self):
"""
:return: (batch_size) Tensor containing the scores associated by the models to the walks.
"""
es_re, es_im = tf.split(value=self.subject_embeddings, num_or_size_splits=2, axis=1)
eo_re, eo_im = tf.split(value=self.object_embeddings, num_or_size_splits=2, axis=1)
ew_re, ew_im = tf.split(value=self.predicate_embeddings, num_or_size_splits=2, axis=1)
def dot3(arg1, rel, arg2):
return tf.reduce_sum(arg1 * rel * arg2, axis=1)
score = dot3(es_re, ew_re, eo_re) + dot3(es_re, ew_im, eo_im) + dot3(es_im, ew_re, eo_im) - dot3(es_im, ew_im, eo_re)
return score
# Aliases
TransE = TranslatingEmbeddings = TranslatingModel
DistMult = BilinearDiagonal = BilinearDiagonalModel
RESCAL = Bilinear = BilinearModel
ComplEx = ComplexE = ComplexModel
def get_function(function_name):
this_module = sys.modules[__name__]
if not hasattr(this_module, function_name):
raise ValueError('Unknown model: {}'.format(function_name))
return getattr(this_module, function_name)
```
#### File: jack/tfutil/segment.py
```python
import tensorflow as tf
def segment_softmax(scores, segment_ids):
"""Given scores and a partition, converts scores to probs by performing
softmax over all rows within a partition."""
# Subtract max
num_segments = tf.reduce_max(segment_ids) + 1
if len(scores.get_shape()) == 2:
max_per_partition = tf.unsorted_segment_max(tf.reduce_max(scores, axis=1), segment_ids, num_segments)
scores -= tf.expand_dims(tf.gather(max_per_partition, segment_ids), axis=1)
else:
max_per_partition = tf.unsorted_segment_max(scores, segment_ids, num_segments)
scores -= tf.gather(max_per_partition, segment_ids)
# Compute probs
scores_exp = tf.exp(scores)
if len(scores.get_shape()) == 2:
scores_exp_sum_per_partition = tf.unsorted_segment_sum(tf.reduce_sum(scores_exp, axis=1), segment_ids,
num_segments)
probs = scores_exp / tf.expand_dims(tf.gather(scores_exp_sum_per_partition, segment_ids), axis=1)
else:
scores_exp_sum_per_partition = tf.unsorted_segment_sum(scores_exp, segment_ids, num_segments)
probs = scores_exp / tf.gather(scores_exp_sum_per_partition, segment_ids)
return probs
def segment_argmax(input, segment_ids):
"""Computes row and col indices Tensors of the segment max in the 2D input."""
with tf.name_scope("segment_argmax"):
num_partitions = tf.reduce_max(segment_ids) + 1
is_max = segment_is_max(input, segment_ids)
# The current is_max could still contain multiple True entries per
# partition. As long as they are in the same row, that is not a problem.
# However, we do need to remove duplicate Trues in the same partition
# in multiple rows.
# For that, we'll multiply is_max with the row indices + 1 and perform
# segment_is_max() again.
rows = tf.shape(input)[0]
cols = tf.shape(input)[1]
row_indices = tf.tile(tf.expand_dims(tf.range(rows), 1), [1, cols])
is_max = segment_is_max(tf.cast(is_max, tf.int32) * (row_indices + 1), segment_ids)
# Get selected rows and columns
row_selected = tf.reduce_any(is_max, axis=1)
row_indices = tf.squeeze(tf.where(row_selected))
rows_selected = tf.reduce_sum(tf.cast(row_selected, tf.int64))
# Assert rows_selected is correct & ensure row_indices is always 1D
with tf.control_dependencies([tf.assert_equal(rows_selected, num_partitions)]):
row_indices = tf.reshape(row_indices, [-1])
selected_rows_is_max = tf.gather(is_max, row_indices)
col_indices = tf.argmax(tf.cast(selected_rows_is_max, tf.int64), axis=1)
# Pack indices
return row_indices, col_indices
def segment_is_max(inputs, segment_ids):
num_segments = tf.reduce_max(segment_ids) + 1
if len(inputs.get_shape()) > 1:
inputs_max = tf.reduce_max(inputs, reduction_indices=list(range(1, len(inputs.get_shape()))))
else:
inputs_max = inputs
max_per_partition = tf.unsorted_segment_max(inputs_max, segment_ids, num_segments)
return tf.equal(inputs, tf.gather(max_per_partition, segment_ids))
def segment_sample_select(probs, segment_ids):
num_segments = tf.reduce_max(segment_ids) + 1
sampled = tf.random_uniform([num_segments])
def scan_fn(acc, x):
p, i = x[0], x[1]
prev_v = tf.gather(acc[0], i)
new_probs = acc[0] + tf.one_hot(i, num_segments, p)
select = tf.logical_and(tf.less(prev_v, 0.0), tf.greater_equal(prev_v + p, 0.0))
return new_probs, select
_, selection = tf.scan(scan_fn, (probs, segment_ids), initializer=(-sampled, False))
return selection
def segment_top_k(input, segment_ids, k):
"""Computes top k elements for segments in 2D input.
segment_idx needs to be sorted.
Returns:
[num_segments, k]- tensors rows, columns, scores for best k results for each segment
"""
with tf.name_scope("segment_top_k"):
all_top_k_scores, all_top_k_indices = tf.nn.top_k(input, k)
rows = tf.tile(tf.expand_dims(tf.range(tf.shape(input)[0], dtype=tf.int32), 1), [1, k])
result_rows = tf.zeros([k], tf.int32)
result_columns = tf.zeros([k], tf.int32)
result_scores = tf.zeros([k], tf.float32)
def replace(old, new):
return tf.concat([old[:-1], tf.expand_dims(new, 0)], 0)
def scan_fn(acc, x):
result_row, result_column, result_score, last_index = acc
row_indices = x[0]
segment_idx = x[1]
top_k_scores = x[2]
col_indices = x[3]
def merge():
new_result_row = tf.concat([result_row, row_indices], 0)
new_result_column = tf.concat([result_column, col_indices], 0)
new_result_score = tf.concat([result_score, top_k_scores], 0)
new_result_score, new_top_k_indices = tf.nn.top_k(new_result_score, k)
new_result_row = tf.gather(new_result_row, new_top_k_indices[:k])
new_result_column = tf.gather(new_result_column, new_top_k_indices[:k])
return new_result_row, new_result_column, new_result_score, segment_idx
return tf.cond(segment_idx > last_index,
lambda: (row_indices, col_indices, top_k_scores, segment_idx),
merge)
last_index = -1
result_rows, result_columns, result_scores, _ = tf.scan(
scan_fn, (rows, segment_ids, all_top_k_scores, all_top_k_indices),
initializer=(result_rows, result_columns, result_scores, last_index))
to_gather = tf.squeeze(tf.where((segment_ids[1:] - segment_ids[:-1]) > 0))
to_gather = tf.concat([to_gather, tf.shape(segment_ids, out_type=tf.int64) - 1], 0)
return tf.gather(result_rows, to_gather), tf.gather(result_columns, to_gather), tf.gather(result_scores,
to_gather)
```
#### File: jack/preprocess/test_vocab_prune.py
```python
from pprint import pprint
from jack.core import QASetting
from jack.util import preprocessing
def test_vocab():
train_data = [
QASetting(question='A person is training his horse for a competition.',
support=['A person on a horse jumps over a broken down airplane.'],
atomic_candidates=['entailment', 'neutral', 'contradiction'])
]
print('build vocab based on train data')
train_vocab = preprocessing.fill_vocab(train_data)
train_vocab.freeze()
pprint(train_vocab.sym2freqs)
pprint(train_vocab.sym2id)
MIN_VOCAB_FREQ, MAX_VOCAB_CNT = 2, 10
train_vocab = train_vocab.prune(MIN_VOCAB_FREQ, MAX_VOCAB_CNT)
pprint(train_vocab.sym2freqs)
pprint(train_vocab.sym2id)
print('encode train data')
train_data = preprocessing.nlp_preprocess(train_data[0].question, train_vocab)[0]
print(train_data)
```
|
{
"source": "jgabes/mortgage_tool",
"score": 4
}
|
#### File: jgabes/mortgage_tool/budget.py
```python
__author__ = "<NAME> <<EMAIL>>"
import dates
import history
class Budget:
"""Budget object which keeps a running tally of all the money"""
def __init__(self, name, starting_amount=0, date=dates.Date.today()):
self._name = name
self._cash = starting_amount
self._incomes = []
self._expenses = []
self._properties = []
self._taxes = 0
self._date = date
self._pre_tax_income = 0
self._pre_tax_expenses = 0
self._history = history.History(self._date, self)
def step(self, days=0, months=0, years=0):
for y in range(years):
self._step_year()
for m in range(months):
self._step_month()
for d in range(days):
self._step_day()
def _step_year(self):
day = self._date.day
month = self._date.month
self._step_day()
while day != self._date.day or month != self._date.month:
self._step_day()
def _step_month(self):
day = self._date.day
self._step_day()
while day != self._date.day:
self._step_day()
def _step_day(self):
self._history.update()
eom = self._date.eom
self.on_day_end()
if eom:
self.on_month_end()
self._date.next()
@property
def amount(self):
return self._cash
@property
def name(self):
return self._name
@property
def date(self):
return self._date
@property
def history(self):
return self._history
def deposit(self, other):
self._cash += other
def withdraw(self, other):
self._cash -= other
def register_expenses(self, expense):
self._expenses.append(expense)
self._pre_tax_expenses += expense.amount
def register_income(self, income):
self._incomes.append(income)
self._pre_tax_income += income.amount
def register_property(self, property):
self._properties.append(property)
self.deposit(property.value)
def on_month_end(self):
for income in self._incomes:
income.on_month_end()
for expense in self._expenses:
expense.on_month_end()
for property in self._properties:
property.on_month_end()
def on_day_end(self):
for income in self._incomes:
income.on_day_end()
for expense in self._expenses:
expense.on_day_end()
```
#### File: jgabes/mortgage_tool/history.py
```python
import collections
class History:
def __init__(self, start_date, reference_budget):
self._budget = reference_budget
self._history = collections.OrderedDict()
self._history[start_date] = self._budget.amount
def update(self):
if self._budget.date in self._history:
raise ValueError("This day is already recorded")
else:
self._history[self._budget.date] = self._budget.amount
def __call__(self, *args, **kwargs):
return self._history
```
#### File: jgabes/mortgage_tool/property.py
```python
import cash_flow
class Property:
def __init__(self, name, budget, initial_value, appreciation=None,
financing: cash_flow.Loan = None):
"""
Args:
budget: budget object this property will be attached to
name: name of the property
initial_value: starting value of the property
appreciation: (can be negative) expected price change over a year (10%=0.10)
financing: if there is a loan attached to the purchase
"""
self._name = name
self._budget = budget
self._value = initial_value
self._appreciation = appreciation
self._financing = financing
self._budget.register_property(self)
def on_month_end(self):
value_diff = self._value * self._appreciation / 12
self._value += value_diff
self._budget.deposit(value_diff)
if self._financing is not None:
self._budget.withdraw(self._financing.on_month_end())
@property
def name(self):
return self._name
@property
def value(self):
return self._value
```
|
{
"source": "jGaboardi/Fiona",
"score": 2
}
|
#### File: Fiona/fiona/meta.py
```python
import xml.etree.ElementTree as ET
import logging
import fiona
with fiona._loading.add_gdal_dll_directories():
from fiona.ogrext import _get_metadata_item
from fiona.env import require_gdal_version
log = logging.getLogger(__name__)
class MetadataItem:
# since GDAL 2.0
CREATION_FIELD_DATA_TYPES = "DMD_CREATIONFIELDDATATYPES"
# since GDAL 2.3
CREATION_FIELD_DATA_SUB_TYPES = "DMD_CREATIONFIELDDATASUBTYPES"
CREATION_OPTION_LIST = "DMD_CREATIONOPTIONLIST"
LAYER_CREATION_OPTION_LIST = "DS_LAYER_CREATIONOPTIONLIST"
# since GDAL 2.0
DATASET_OPEN_OPTIONS = "DMD_OPENOPTIONLIST"
# since GDAL 2.0
EXTENSIONS = "DMD_EXTENSIONS"
EXTENSION = "DMD_EXTENSION"
VIRTUAL_IO = "DCAP_VIRTUALIO"
# since GDAL 2.0
NOT_NULL_FIELDS = "DCAP_NOTNULL_FIELDS"
# since gdal 2.3
NOT_NULL_GEOMETRY_FIELDS = "DCAP_NOTNULL_GEOMFIELDS"
# since GDAL 3.2
UNIQUE_FIELDS = "DCAP_UNIQUE_FIELDS"
# since GDAL 2.0
DEFAULT_FIELDS = "DCAP_DEFAULT_FIELDS"
OPEN = "DCAP_OPEN"
CREATE = "DCAP_CREATE"
def _parse_options(xml):
"""Convert metadata xml to dict"""
options = {}
if len(xml) > 0:
root = ET.fromstring(xml)
for option in root.iter('Option'):
option_name = option.attrib['name']
opt = {}
opt.update((k, v) for k, v in option.attrib.items() if not k == 'name')
values = []
for value in option.iter('Value'):
values.append(value.text)
if len(values) > 0:
opt['values'] = values
options[option_name] = opt
return options
@require_gdal_version('2.0')
def dataset_creation_options(driver):
""" Returns dataset creation options for driver
Parameters
----------
driver : str
Returns
-------
dict
Dataset creation options
"""
xml = _get_metadata_item(driver, MetadataItem.CREATION_OPTION_LIST)
if xml is None:
return {}
if len(xml) == 0:
return {}
return _parse_options(xml)
@require_gdal_version('2.0')
def layer_creation_options(driver):
""" Returns layer creation options for driver
Parameters
----------
driver : str
Returns
-------
dict
Layer creation options
"""
xml = _get_metadata_item(driver, MetadataItem.LAYER_CREATION_OPTION_LIST)
if xml is None:
return {}
if len(xml) == 0:
return {}
return _parse_options(xml)
@require_gdal_version('2.0')
def dataset_open_options(driver):
""" Returns dataset open options for driver
Parameters
----------
driver : str
Returns
-------
dict
Dataset open options
"""
xml = _get_metadata_item(driver, MetadataItem.DATASET_OPEN_OPTIONS)
if xml is None:
return {}
if len(xml) == 0:
return {}
return _parse_options(xml)
@require_gdal_version('2.0')
def print_driver_options(driver):
""" Print driver options for dataset open, dataset creation, and layer creation.
Parameters
----------
driver : str
"""
for option_type, options in [("Dataset Open Options", dataset_open_options(driver)),
("Dataset Creation Options", dataset_creation_options(driver)),
("Layer Creation Options", layer_creation_options(driver))]:
print("{option_type}:".format(option_type=option_type))
if len(options) == 0:
print("\tNo options available.")
else:
for option_name in options:
print("\t{option_name}:".format(option_name=option_name))
if 'description' in options[option_name]:
print("\t\tDescription: {description}".format(description=options[option_name]['description']))
if 'type' in options[option_name]:
print("\t\tType: {type}".format(type=options[option_name]['type']))
if 'values' in options[option_name] and len(options[option_name]['values']) > 0:
print("\t\tAccepted values: {values}".format(values=",".join(options[option_name]['values'])))
for attr_text, attribute in [('Default value', 'default'),
('Required', 'required'),
('Alias', 'aliasOf'),
('Min', 'min'),
('Max', 'max'),
('Max size', 'maxsize'),
('Scope', 'scope'),
('Alternative configuration option', 'alt_config_option')]:
if attribute in options[option_name]:
print("\t\t{attr_text}: {attribute}".format(attr_text=attr_text,
attribute=options[option_name][attribute]))
print("")
@require_gdal_version('2.0')
def extensions(driver):
""" Returns file extensions supported by driver
Parameters
----------
driver : str
Returns
-------
list
List with file extensions or None if not specified by driver
"""
exts = _get_metadata_item(driver, MetadataItem.EXTENSIONS)
if exts is None:
return None
return [ext for ext in exts.split(" ") if len(ext) > 0]
def extension(driver):
""" Returns file extension of driver
Parameters
----------
driver : str
Returns
-------
str
File extensions or None if not specified by driver
"""
return _get_metadata_item(driver, MetadataItem.EXTENSION)
@require_gdal_version('2.0')
def supports_vsi(driver):
""" Returns True if driver supports GDAL's VSI*L API
Parameters
----------
driver : str
Returns
-------
bool
"""
virutal_io = _get_metadata_item(driver, MetadataItem.VIRTUAL_IO)
return virutal_io is not None and virutal_io.upper() == "YES"
@require_gdal_version('2.0')
def supported_field_types(driver):
""" Returns supported field types
Parameters
----------
driver : str
Returns
-------
list
List with supported field types or None if not specified by driver
"""
field_types_str = _get_metadata_item(driver, MetadataItem.CREATION_FIELD_DATA_TYPES)
if field_types_str is None:
return None
return [field_type for field_type in field_types_str.split(" ") if len(field_type) > 0]
@require_gdal_version('2.3')
def supported_sub_field_types(driver):
""" Returns supported sub field types
Parameters
----------
driver : str
Returns
-------
list
List with supported field types or None if not specified by driver
"""
field_types_str = _get_metadata_item(driver, MetadataItem.CREATION_FIELD_DATA_SUB_TYPES)
if field_types_str is None:
return None
return [field_type for field_type in field_types_str.split(" ") if len(field_type) > 0]
```
#### File: Fiona/tests/test_bytescollection.py
```python
import pytest
import six
import fiona
class TestReading(object):
@pytest.fixture(autouse=True)
def bytes_collection_object(self, path_coutwildrnp_json):
with open(path_coutwildrnp_json) as src:
bytesbuf = src.read().encode('utf-8')
self.c = fiona.BytesCollection(bytesbuf)
yield
self.c.close()
@pytest.mark.skipif(six.PY2, reason='string are bytes in Python 2')
def test_construct_with_str(self, path_coutwildrnp_json):
with open(path_coutwildrnp_json) as src:
strbuf = src.read()
with pytest.raises(ValueError):
fiona.BytesCollection(strbuf)
def test_open_repr(self):
# I'm skipping checking the name of the virtual file as it produced by uuid.
print(repr(self.c))
assert repr(self.c).startswith("<open BytesCollection '/vsimem/")
def test_closed_repr(self):
# I'm skipping checking the name of the virtual file as it produced by uuid.
self.c.close()
print(repr(self.c))
assert repr(self.c).startswith("<closed BytesCollection '/vsimem/")
def test_path(self):
assert self.c.path == self.c.virtual_file
def test_closed_virtual_file(self):
self.c.close()
assert self.c.virtual_file is None
def test_closed_buf(self):
self.c.close()
assert self.c.bytesbuf is None
def test_name(self):
assert len(self.c.name) > 0
def test_mode(self):
assert self.c.mode == 'r'
def test_collection(self):
assert self.c.encoding == 'utf-8'
def test_iter(self):
assert iter(self.c)
def test_closed_no_iter(self):
self.c.close()
with pytest.raises(ValueError):
iter(self.c)
def test_len(self):
assert len(self.c) == 67
def test_closed_len(self):
# Len is lazy, it's never computed in this case. TODO?
self.c.close()
assert len(self.c) == 0
def test_len_closed_len(self):
# Lazy len is computed in this case and sticks.
len(self.c)
self.c.close()
assert len(self.c) == 67
def test_driver(self):
assert self.c.driver == "GeoJSON"
def test_closed_driver(self):
self.c.close()
assert self.c.driver is None
def test_driver_closed_driver(self):
self.c.driver
self.c.close()
assert self.c.driver == "GeoJSON"
def test_schema(self):
s = self.c.schema['properties']
assert s['PERIMETER'] == "float"
assert s['NAME'] == "str"
assert s['URL'] == "str"
assert s['STATE_FIPS'] == "str"
assert s['WILDRNP020'] == "int"
def test_closed_schema(self):
# Schema is lazy too, never computed in this case. TODO?
self.c.close()
assert self.c.schema is None
def test_schema_closed_schema(self):
self.c.schema
self.c.close()
assert sorted(self.c.schema.keys()) == ['geometry', 'properties']
def test_crs(self):
assert self.c.crs['init'] == 'epsg:4326'
def test_crs_wkt(self):
assert self.c.crs_wkt.startswith('GEOGCS["WGS 84"')
def test_closed_crs(self):
# Crs is lazy too, never computed in this case. TODO?
self.c.close()
assert self.c.crs is None
def test_crs_closed_crs(self):
self.c.crs
self.c.close()
assert sorted(self.c.crs.keys()) == ['init']
def test_meta(self):
assert (sorted(self.c.meta.keys()) ==
['crs', 'crs_wkt', 'driver', 'schema'])
def test_bounds(self):
assert self.c.bounds[0] == pytest.approx(-113.564247)
assert self.c.bounds[1] == pytest.approx(37.068981)
assert self.c.bounds[2] == pytest.approx(-104.970871)
assert self.c.bounds[3] == pytest.approx(41.996277)
def test_iter_one(self):
itr = iter(self.c)
f = next(itr)
assert f['id'] == "0"
assert f['properties']['STATE'] == 'UT'
def test_iter_list(self):
f = list(self.c)[0]
assert f['id'] == "0"
assert f['properties']['STATE'] == 'UT'
def test_re_iter_list(self):
f = list(self.c)[0] # Run through iterator
f = list(self.c)[0] # Run through a new, reset iterator
assert f['id'] == "0"
assert f['properties']['STATE'] == 'UT'
def test_getitem_one(self):
f = self.c[0]
assert f['id'] == "0"
assert f['properties']['STATE'] == 'UT'
def test_no_write(self):
with pytest.raises(OSError):
self.c.write({})
def test_iter_items_list(self):
i, f = list(self.c.items())[0]
assert i == 0
assert f['id'] == "0"
assert f['properties']['STATE'] == 'UT'
def test_iter_keys_list(self):
i = list(self.c.keys())[0]
assert i == 0
def test_in_keys(self):
assert 0 in self.c.keys()
assert 0 in self.c
class TestFilterReading(object):
@pytest.fixture(autouse=True)
def bytes_collection_object(self, path_coutwildrnp_json):
with open(path_coutwildrnp_json) as src:
bytesbuf = src.read().encode('utf-8')
self.c = fiona.BytesCollection(bytesbuf)
yield
self.c.close()
def test_filter_1(self):
results = list(self.c.filter(bbox=(-120.0, 30.0, -100.0, 50.0)))
assert len(results) == 67
f = results[0]
assert f['id'] == "0"
assert f['properties']['STATE'] == 'UT'
def test_filter_reset(self):
results = list(self.c.filter(bbox=(-112.0, 38.0, -106.0, 40.0)))
assert len(results) == 26
results = list(self.c.filter())
assert len(results) == 67
def test_filter_mask(self):
mask = {
'type': 'Polygon',
'coordinates': (
((-112, 38), (-112, 40), (-106, 40), (-106, 38), (-112, 38)),)}
results = list(self.c.filter(mask=mask))
assert len(results) == 26
def test_zipped_bytes_collection(bytes_coutwildrnp_zip):
"""Open a zipped stream of bytes as a collection"""
with fiona.BytesCollection(bytes_coutwildrnp_zip) as col:
assert col.name == 'coutwildrnp'
assert len(col) == 67
@pytest.mark.skipif(fiona.gdal_version >= (2, 3, 0),
reason="Changed behavior with gdal 2.3, possibly related to RFC 70:"
"Guessing output format from output file name extension for utilities")
def test_grenada_bytes_geojson(bytes_grenada_geojson):
"""Read grenada.geojson as BytesCollection.
grenada.geojson is an example of geojson that GDAL's GeoJSON
driver will fail to read successfully unless the file's extension
reflects its json'ness.
"""
# We expect an exception if the GeoJSON driver isn't specified.
with pytest.raises(fiona.errors.FionaValueError):
with fiona.BytesCollection(bytes_grenada_geojson) as col:
pass
# If told what driver to use, we should be good.
with fiona.BytesCollection(bytes_grenada_geojson, driver='GeoJSON') as col:
assert len(col) == 1
```
#### File: Fiona/tests/test_fio_bounds.py
```python
import re
from fiona.fio import bounds
from fiona.fio.main import main_group
def test_fail(runner):
result = runner.invoke(main_group, ['bounds', ], '5')
assert result.exit_code == 1
def test_seq(feature_seq, runner):
result = runner.invoke(main_group, ['bounds', ], feature_seq)
assert result.exit_code == 0
assert result.output.count('[') == result.output.count(']') == 2
assert len(re.findall(r'\d*\.\d*', result.output)) == 8
def test_seq_rs(feature_seq_pp_rs, runner):
result = runner.invoke(main_group, ['bounds', ], feature_seq_pp_rs)
assert result.exit_code == 0
assert result.output.count('[') == result.output.count(']') == 2
assert len(re.findall(r'\d*\.\d*', result.output)) == 8
def test_precision(feature_seq, runner):
result = runner.invoke(main_group, ['bounds', '--precision', 1], feature_seq)
assert result.exit_code == 0
assert result.output.count('[') == result.output.count(']') == 2
assert len(re.findall(r'\d*\.\d{1}\D', result.output)) == 8
def test_explode(feature_collection, runner):
result = runner.invoke(main_group, ['bounds', '--explode'], feature_collection)
assert result.exit_code == 0
assert result.output.count('[') == result.output.count(']') == 2
assert len(re.findall(r'\d*\.\d*', result.output)) == 8
def test_explode_pp(feature_collection_pp, runner):
result = runner.invoke(main_group, ['bounds', '--explode'], feature_collection_pp)
assert result.exit_code == 0
assert result.output.count('[') == result.output.count(']') == 2
assert len(re.findall(r'\d*\.\d*', result.output)) == 8
def test_with_id(feature_seq, runner):
result = runner.invoke(main_group, ['bounds', '--with-id'], feature_seq)
assert result.exit_code == 0
assert result.output.count('id') == result.output.count('bbox') == 2
def test_explode_with_id(feature_collection, runner):
result = runner.invoke(
main_group, ['bounds', '--explode', '--with-id'], feature_collection)
assert result.exit_code == 0
assert result.output.count('id') == result.output.count('bbox') == 2
def test_with_obj(feature_seq, runner):
result = runner.invoke(main_group, ['bounds', '--with-obj'], feature_seq)
assert result.exit_code == 0
assert result.output.count('geometry') == result.output.count('bbox') == 2
def test_bounds_explode_with_obj(feature_collection, runner):
result = runner.invoke(
main_group, ['bounds', '--explode', '--with-obj'], feature_collection)
assert result.exit_code == 0
assert result.output.count('geometry') == result.output.count('bbox') == 2
def test_explode_output_rs(feature_collection, runner):
result = runner.invoke(main_group, ['bounds', '--explode', '--rs'], feature_collection)
assert result.exit_code == 0
assert result.output.count('\x1e') == 2
assert result.output.count('[') == result.output.count(']') == 2
assert len(re.findall(r'\d*\.\d*', result.output)) == 8
```
#### File: Fiona/tests/test_meta.py
```python
import pytest
import fiona
import fiona.drvsupport
import fiona.meta
from fiona.drvsupport import supported_drivers
from fiona.errors import FionaValueError
from .conftest import requires_gdal2, requires_gdal23, requires_gdal31
from six import string_types
@requires_gdal31
@pytest.mark.parametrize("driver", supported_drivers)
def test_print_driver_options(driver):
""" Test fiona.meta.print_driver_options(driver) """
# do not fail
fiona.meta.print_driver_options(driver)
@requires_gdal2
def test_metadata_wrong_driver():
""" Test that FionaValueError is raised for non existing driver"""
with pytest.raises(FionaValueError):
fiona.meta.print_driver_options("Not existing driver")
@requires_gdal2
@pytest.mark.parametrize("driver", supported_drivers)
def test_extension(driver):
""" Test fiona.meta.extension(driver) """
# do not fail
extension = fiona.meta.extension(driver)
assert extension is None or isinstance(extension, string_types)
@requires_gdal2
@pytest.mark.parametrize("driver", supported_drivers)
def test_extensions(driver):
""" Test fiona.meta.extensions(driver) """
# do not fail
extensions = fiona.meta.extensions(driver)
assert extensions is None or isinstance(extensions, list)
@requires_gdal2
@pytest.mark.parametrize("driver", supported_drivers)
def test_supports_vsi(driver):
""" Test fiona.meta.supports_vsi(driver) """
# do not fail
assert fiona.meta.supports_vsi(driver) in (True, False)
@requires_gdal2
@pytest.mark.parametrize("driver", supported_drivers)
def test_supported_field_types(driver):
""" Test fiona.meta.supported_field_types(driver) """
# do not fail
field_types = fiona.meta.supported_field_types(driver)
assert field_types is None or isinstance(field_types, list)
@requires_gdal23
@pytest.mark.parametrize("driver", supported_drivers)
def test_supported_sub_field_types(driver):
""" Test fiona.meta.supported_sub_field_types(driver) """
# do not fail
sub_field_types = fiona.meta.supported_sub_field_types(driver)
assert sub_field_types is None or isinstance(sub_field_types, list)
```
#### File: Fiona/tests/test_schema.py
```python
from collections import OrderedDict
import fiona
from fiona.errors import SchemaError, UnsupportedGeometryTypeError, \
DriverSupportError
from fiona.schema import FIELD_TYPES, normalize_field_type
import os
import tempfile
from .conftest import get_temp_filename
from fiona.drvsupport import driver_mode_mingdal
from fiona.env import GDALVersion
import pytest
from .conftest import requires_only_gdal1, requires_gdal2
def test_schema_ordering_items(tmpdir):
name = str(tmpdir.join('test_scheme.shp'))
items = [('title', 'str:80'), ('date', 'date')]
with fiona.open(name, 'w',
driver="ESRI Shapefile",
schema={
'geometry': 'LineString',
'properties': items}) as c:
assert list(c.schema['properties'].items()) == items
with fiona.open(name) as c:
assert list(c.schema['properties'].items()) == items
def test_shapefile_schema(tmpdir):
name = str(tmpdir.join('test_schema.shp'))
items = sorted({
'AWATER10': 'float',
'CLASSFP10': 'str',
'ZipCodeType': 'str',
'EstimatedPopulation': 'float',
'LocationType': 'str',
'ALAND10': 'float',
'TotalWages': 'float',
'FUNCSTAT10': 'str',
'Long': 'float',
'City': 'str',
'TaxReturnsFiled': 'float',
'State': 'str',
'Location': 'str',
'GSrchCnt': 'float',
'INTPTLAT10': 'str',
'Lat': 'float',
'MTFCC10': 'str',
'Decommisioned': 'str',
'GEOID10': 'str',
'INTPTLON10': 'str'}.items())
with fiona.open(name, 'w',
driver="ESRI Shapefile",
schema={'geometry': 'Polygon', 'properties': items}) as c:
assert list(c.schema['properties'].items()) == items
c.write(
{'geometry': {'coordinates': [[(-117.882442, 33.783633),
(-117.882284, 33.783817),
(-117.863348, 33.760016),
(-117.863478, 33.760016),
(-117.863869, 33.760017),
(-117.864, 33.760017999999995),
(-117.864239, 33.760019),
(-117.876608, 33.755769),
(-117.882886, 33.783114),
(-117.882688, 33.783345),
(-117.882639, 33.783401999999995),
(-117.88259, 33.78346),
(-117.882442, 33.783633)]],
'type': 'Polygon'},
'id': '1',
'properties': {
'ALAND10': 8819240.0,
'AWATER10': 309767.0,
'CLASSFP10': 'B5',
'City': 'SANTA ANA',
'Decommisioned': False,
'EstimatedPopulation': 27773.0,
'FUNCSTAT10': 'S',
'GEOID10': '92706',
'GSrchCnt': 0.0,
'INTPTLAT10': '+33.7653010',
'INTPTLON10': '-117.8819759',
'Lat': 33.759999999999998,
'Location': 'NA-US-CA-SANTA ANA',
'LocationType': 'PRIMARY',
'Long': -117.88,
'MTFCC10': 'G6350',
'State': 'CA',
'TaxReturnsFiled': 14635.0,
'TotalWages': 521280485.0,
'ZipCodeType': 'STANDARD'},
'type': 'Feature'})
assert len(c) == 1
with fiona.open(name) as c:
assert (
list(c.schema['properties'].items()) ==
sorted([('AWATER10', 'float:24.15'),
('CLASSFP10', 'str:80'),
('ZipCodeTyp', 'str:80'),
('EstimatedP', 'float:24.15'),
('LocationTy', 'str:80'),
('ALAND10', 'float:24.15'),
('INTPTLAT10', 'str:80'),
('FUNCSTAT10', 'str:80'),
('Long', 'float:24.15'),
('City', 'str:80'),
('TaxReturns', 'float:24.15'),
('State', 'str:80'),
('Location', 'str:80'),
('GSrchCnt', 'float:24.15'),
('TotalWages', 'float:24.15'),
('Lat', 'float:24.15'),
('MTFCC10', 'str:80'),
('INTPTLON10', 'str:80'),
('GEOID10', 'str:80'),
('Decommisio', 'str:80')]))
f = next(iter(c))
assert f['properties']['EstimatedP'] == 27773.0
def test_field_truncation_issue177(tmpdir):
name = str(tmpdir.join('output.shp'))
kwargs = {
'driver': 'ESRI Shapefile',
'crs': 'EPSG:4326',
'schema': {
'geometry': 'Point',
'properties': [('a_fieldname', 'float')]}}
with fiona.open(name, 'w', **kwargs) as dst:
rec = {}
rec['geometry'] = {'type': 'Point', 'coordinates': (0, 0)}
rec['properties'] = {'a_fieldname': 3.0}
dst.write(rec)
with fiona.open(name) as src:
first = next(iter(src))
assert first['geometry'] == {'type': 'Point', 'coordinates': (0, 0)}
assert first['properties']['a_fieldnam'] == 3.0
def test_unsupported_geometry_type():
tmpdir = tempfile.mkdtemp()
tmpfile = os.path.join(tmpdir, 'test-test-geom.shp')
profile = {
'driver': 'ESRI Shapefile',
'schema': {
'geometry': 'BOGUS',
'properties': {}}}
with pytest.raises(UnsupportedGeometryTypeError):
fiona.open(tmpfile, 'w', **profile)
@pytest.mark.parametrize('x', list(range(1, 10)))
def test_normalize_int32(x):
assert normalize_field_type('int:{}'.format(x)) == 'int32'
@requires_gdal2
@pytest.mark.parametrize('x', list(range(10, 20)))
def test_normalize_int64(x):
assert normalize_field_type('int:{}'.format(x)) == 'int64'
@pytest.mark.parametrize('x', list(range(0, 20)))
def test_normalize_str(x):
assert normalize_field_type('str:{}'.format(x)) == 'str'
def test_normalize_bool():
assert normalize_field_type('bool') == 'bool'
def test_normalize_float():
assert normalize_field_type('float:25.8') == 'float'
def generate_field_types():
"""
Produce a unique set of field types in a consistent order.
This ensures that tests are able to run in parallel.
"""
types = set(FIELD_TYPES)
types.remove(None)
return list(sorted(types)) + [None]
@pytest.mark.parametrize('x', generate_field_types())
def test_normalize_std(x):
assert normalize_field_type(x) == x
def test_normalize_error():
with pytest.raises(SchemaError):
assert normalize_field_type('thingy')
@requires_only_gdal1
@pytest.mark.parametrize('field_type', ['time', 'datetime'])
def test_check_schema_driver_support_shp(tmpdir, field_type):
with pytest.raises(DriverSupportError):
name = str(tmpdir.join('test_scheme.shp'))
items = [('field1', field_type)]
with fiona.open(name, 'w',
driver="ESRI Shapefile",
schema={
'geometry': 'LineString',
'properties': items}) as c:
pass
@requires_only_gdal1
def test_check_schema_driver_support_gpkg(tmpdir):
with pytest.raises(DriverSupportError):
name = str(tmpdir.join('test_scheme.gpkg'))
items = [('field1', 'time')]
with fiona.open(name, 'w',
driver="GPKG",
schema={
'geometry': 'LineString',
'properties': items}) as c:
pass
@pytest.mark.parametrize('driver', ['GPKG', 'GeoJSON'])
def test_geometry_only_schema_write(tmpdir, driver):
schema = {
"geometry": "Polygon",
# No properties defined here.
}
record = {'geometry': {'type': 'Polygon', 'coordinates': [[(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (1.0, 0.0), (0.0, 0.0)]]}}
path = str(tmpdir.join(get_temp_filename(driver)))
with fiona.open(path,
mode='w',
driver=driver,
schema=schema) as c:
c.write(record)
with fiona.open(path,
mode='r',
driver=driver) as c:
data = [f for f in c]
assert len(data) == 1
assert len(data[0].get('properties', {})) == 0
assert data[0]['geometry'] == record['geometry']
@pytest.mark.parametrize('driver', ['GPKG', 'GeoJSON'])
def test_geometry_only_schema_update(tmpdir, driver):
# Guard unsupported drivers
if driver in driver_mode_mingdal['a'] and GDALVersion.runtime() < GDALVersion(
*driver_mode_mingdal['a'][driver][:2]):
return
schema = {
"geometry": "Polygon",
# No properties defined here.
}
record1 = {
'geometry': {'type': 'Polygon', 'coordinates': [[(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (1.0, 0.0), (0.0, 0.0)]]}}
record2 = {
'geometry': {'type': 'Polygon', 'coordinates': [[(0.0, 0.0), (2.0, 0.0), (2.0, 2.0), (2.0, 0.0), (0.0, 0.0)]]}}
path = str(tmpdir.join(get_temp_filename(driver)))
# Create file
with fiona.open(path,
mode='w',
driver=driver,
schema=schema) as c:
c.write(record1)
# Append record
with fiona.open(path,
mode='a',
driver=driver) as c:
c.write(record2)
with fiona.open(path,
mode='r',
driver=driver) as c:
data = [f for f in c]
assert len(data) == 2
for f in data:
assert len(f.get('properties', {})) == 0
assert data[0]['geometry'] == record1['geometry']
assert data[1]['geometry'] == record2['geometry']
@pytest.mark.parametrize('driver', ['GPKG', 'GeoJSON'])
def test_property_only_schema_write(tmpdir, driver):
schema = {
# No geometry defined here.
"properties": {'prop1': 'str'}
}
record1 = {'properties': {'prop1': 'one'}}
path = str(tmpdir.join(get_temp_filename(driver)))
with fiona.open(path,
mode='w',
driver=driver,
schema=schema) as c:
c.write(record1)
with fiona.open(path,
mode='r',
driver=driver) as c:
data = [f for f in c]
assert len(data) == 1
assert len(data[0].get('properties', {})) == 1
assert 'prop1' in data[0]['properties'] and data[0]['properties']['prop1'] == 'one'
for f in data:
assert 'geometry' not in f or f['geometry'] is None
@pytest.mark.parametrize('driver', ['GPKG', 'GeoJSON'])
def test_property_only_schema_update(tmpdir, driver):
# Guard unsupported drivers
if driver in driver_mode_mingdal['a'] and GDALVersion.runtime() < GDALVersion(
*driver_mode_mingdal['a'][driver][:2]):
return
schema = {
# No geometry defined here.
"properties": {'prop1': 'str'}
}
record1 = {'properties': {'prop1': 'one'}}
record2 = {'properties': {'prop1': 'two'}}
path = str(tmpdir.join(get_temp_filename(driver)))
# Create file
with fiona.open(path,
mode='w',
driver=driver,
schema=schema) as c:
c.write(record1)
# Append record
with fiona.open(path,
mode='a',
driver=driver) as c:
c.write(record2)
with fiona.open(path,
mode='r',
driver=driver) as c:
data = [f for f in c]
assert len(data) == 2
for f in data:
assert len(f.get('properties', {})) == 1
assert 'geometry' not in f or f['geometry'] is None
assert 'prop1' in data[0]['properties'] and data[0]['properties']['prop1'] == 'one'
assert 'prop1' in data[1]['properties'] and data[1]['properties']['prop1'] == 'two'
def test_schema_default_fields_wrong_type(tmpdir):
""" Test for SchemaError if a default field is specified with a different type"""
name = str(tmpdir.join('test.gpx'))
schema = {'properties': OrderedDict([('ele', 'str'), ('time', 'datetime')]),
'geometry': 'Point'}
with pytest.raises(SchemaError):
with fiona.open(name, 'w',
driver="GPX",
schema=schema) as c:
pass
```
|
{
"source": "jGaboardi/geosnap",
"score": 2
}
|
#### File: geosnap/tests/test_clusters.py
```python
from geosnap import Community
import numpy as np
reno = Community.from_census(msa_fips="39900")
columns = ["median_household_income", "p_poverty_rate", "p_unemployment_rate"]
# Aspatial Clusters
def test_gm():
r = reno.cluster(columns=columns, method="gaussian_mixture", best_model=True)
assert len(r.gdf.gaussian_mixture.unique()) >= 5
def test_ward():
r = reno.cluster(columns=columns, method="ward")
assert len(r.gdf.ward.unique()) == 7
def test_spectral():
r = reno.cluster(columns=columns, method="spectral")
assert len(r.gdf.spectral.unique()) == 7
def test_kmeans():
r = reno.cluster(columns=columns, method="kmeans")
assert len(r.gdf.kmeans.unique()) == 7
def test_aff_prop():
r = reno.cluster(columns=columns, method="affinity_propagation", preference=-100)
assert len(r.gdf.affinity_propagation.unique()) == 3
def test_hdbscan():
r = reno.cluster(columns=columns, method="hdbscan")
assert len(r.gdf.hdbscan.unique()) >= 4
# Spatial Clusters
def test_spenc():
r = reno.regionalize(columns=columns, method="spenc")
assert len(r.gdf.spenc.unique()) == 7
def test_maxp():
r = reno.regionalize(columns=columns, method="max_p", initial=10)
assert len(r.gdf.max_p.unique()) >= 8
def test_ward_spatial():
r = reno.regionalize(columns=columns, method="ward_spatial", n_clusters=7)
assert len(r.gdf.ward_spatial.unique()) == 8
def test_skater():
r = reno.regionalize(columns=columns, method="skater", n_clusters=10)
assert len(r.gdf.skater.unique()) == 11
def test_azp():
r = reno.regionalize(columns=columns, method="azp", n_clusters=7)
assert len(r.gdf.azp.unique()) == 8
# Test seeding
def test_seed():
np.random.seed(12345)
r = reno.cluster(columns=columns, method='ward')
card = r.gdf.groupby('ward').count()['geoid'].values
np.testing.assert_array_equal(card, [27, 83, 19, 51, 38, 7])
```
|
{
"source": "jGaboardi/legendgram",
"score": 3
}
|
#### File: legendgram/legendgram/util.py
```python
loc_lut = {'best' : 0,
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10}
inv_lut = {v:k for k,v in loc_lut.items()} #yes, it's not general, but it's ok.
def make_location(ax,loc, legend_size=(.27,.2)):
"""
Construct the location bounds of a legendgram
Arguments:
----------
ax : matplotlib.AxesSubplot
axis on which to add a legendgram
loc : string or int
valid legend location like that used in matplotlib.pyplot.legend
legend_size : tuple or float
tuple denoting the length/width of the legendgram in terms
of a fraction of the axis. If a float, the legend is assumed
square.
Returns
-------
a list [left_anchor, bottom_anchor, width, height] in terms of plot units
that defines the location and extent of the legendgram.
"""
position = ax.get_position()
if isinstance(legend_size, float):
legend_size = (legend_size, legend_size)
lw, lh = legend_size
legend_width = position.width * lw
legend_height = position.height * lh
right_offset = (position.width - legend_width)
top_offset = (position.height - legend_height)
if isinstance(loc, int):
try:
loc = inv_lut[loc]
except KeyError:
raise KeyError('Legend location {} not recognized. Please choose '
' from the list of valid matplotlib legend locations.'
''.format(loc))
if loc.lower() == 'lower left' or loc.lower() == 'best':
anchor_x, anchor_y = position.x0, position.y0
elif loc.lower() == 'lower center':
anchor_x, anchor_y = position.x0 + position.width*.5, position.y0
elif loc.lower() == 'lower right':
anchor_x, anchor_y = position.x0 + right_offset, position.y0
elif loc.lower() == 'center left':
anchor_x, anchor_y = position.x0, position.y0 + position.height * .5
elif loc.lower() == 'center':
anchor_x, anchor_y = position.x0 + position.width * .5, position.y0 + position.height*.5
elif loc.lower() == 'center right' or loc.lower()=='right':
anchor_x, anchor_y = position.x0 + right_offset, position.y0 + position.height*.5
elif loc.lower() == 'upper left':
anchor_x, anchor_y = position.x0, position.y0 + top_offset
elif loc.lower() == 'upper center':
anchor_x, anchor_y = position.x0 + position.width * .5, position.y0 + top_offset
elif loc.lower() == 'upper right':
anchor_x, anchor_y = position.x0 + right_offset, position.y0 + top_offset
return [anchor_x, anchor_y, legend_width, legend_height]
```
|
{
"source": "jGaboardi/logo",
"score": 3
}
|
#### File: logo/logo/build_tex_file.py
```python
from .predefined import psnav_1line, psnav_2line
def set_header_and_footer(font, convert_tikz, colors, cformat):
header = r"""
\documentclass[tikz%s]{standalone}
\usetikzlibrary{mindmap,trees,backgrounds}
\usepackage{fontspec}
\usepackage{lmodern}
\defaultfontfeatures{Ligatures=TeX,Scale=3}
\setmainfont{%s}
""" % (
convert_tikz,
font,
)
defined = set()
for color, code in colors:
if color not in defined:
header += r"""
\definecolor{%s}{%s}{%s}""" % (
color,
cformat,
code,
)
defined.add(color)
header += r"""
\begin{document}"""
footer = r"""
\end{document}"""
return header, footer
def level_distances_and_sibling_angles(child_nodes, grandchild_nodes):
"""Alter this function for varying child/grandchild configurations."""
if child_nodes == 7:
dist_l1, angle_l1 = "5cm", "51"
if grandchild_nodes == 3:
dist_l2, angle_l2 = "3cm", "45"
return dist_l1, angle_l1, dist_l2, angle_l2
def initialize_tikz(
nav_logo,
background_color,
concept_color,
text_color,
level_distance_1,
sibling_angle_1,
level_distance_2,
sibling_angle_2,
font_size_l1="Huge",
):
""" see `level_distances_and_sibling_angles()` for adjusting the
`level distance` and `sibling angle` parameters for the
tikzpicture mindmap/concept.
"""
# pack distance, angle, and font arguments for each level
args_l1 = level_distance_1, sibling_angle_1, font_size_l1
args_l2 = level_distance_2, sibling_angle_2
# set a scope environment if for the navigation/index logo
if nav_logo:
scope = r"""
\begin{scope}"""
else:
scope = ""
# set background of desired
if background_color != None:
background = r"""
background rectangle/.style={fill=%s},
show background rectangle,""" % background_color
else:
background = ""
# initialize tikz picture
main_content = r"""
\begin{tikzpicture}%s[%s
mindmap,
grow cyclic,
every node/.style=concept,
concept color=%s,
text=%s,
level 1/.append style={
level distance=%s,
sibling angle=%s,
font=\%s
},
level 2/.append style={
level distance=%s,
sibling angle=%s
}
]
""" % (
scope,
background,
concept_color,
text_color,
*args_l1,
*args_l2,
)
return main_content
def finalize_tikz(nav_logo):
# set scope variables if navigation logo
if nav_logo:
scope = r"""
\end{scope}
\begin{scope}
%s
\end{scope}""" % nav_logo
else:
scope = ""
# final `tikzpicture` environment
end_tikzpicture = r"""
;
%s
\end{tikzpicture}""" % scope
return end_tikzpicture
def create_grandchild():
# create a grandchild node
grandchild = r"""
child { node { }}"""
return grandchild
def create_child(child_color, grandchildren, child_text):
# create a child node
child = r"""
child [concept color=%s]{ node {%s}""" % (
child_color,
child_text,
)
# create grandchildren nodes for the child
for grandchild in range(grandchildren):
child += create_grandchild()
# finalize child syntax
child += r"""
}"""
return child
def create_concept(concept_color, concept_text, concept_font_style, concept_font_size):
tikz_concept = r"""
\node[concept color=%s]{\%s\%s{%s}}""" % (
concept_color,
concept_font_size,
concept_font_style,
concept_text,
)
return tikz_concept
```
|
{
"source": "jGaboardi/non-duplicated-intersects",
"score": 3
}
|
#### File: non-duplicated-intersects/nd_intersects/nd_intersects.py
```python
__author__ = "<NAME> <<EMAIL>>"
import geopandas
import matplotlib
import string
from shapely.geometry import Point, Polygon
import sys
def nd_intersects(pnts, pgons, ptid, pgid, keep_columns):
"""Create a non-duplicated intersects geodataframe.
Parameters
----------
pnts : geopandas.GeoDataFrame
Points for spatial join.
pgons : geopandas.GeoDataFrame
Polygons for spatial join.
how : str
Type of join to perform.
ptid : str
Point ID variable.
pgid : str
Polygon ID variable.
keep_columns : list
Columns to retain.
Returns
-------
ndgdf : geopandas.GeoDataFrame
Result of the non-duplicated intersects method.
"""
def _row(_p, _g):
"""generate specific row values for dataframe"""
return (_p, "-".join(_g[pgid]), *_g.geometry.unique())
if pnts.geometry.name not in keep_columns:
keep_columns += [pnts.geometry.name]
# perform "intersects" spatial join
igdf = do_sjoin(pnts, pgons, "intersects", ptid, pgid, keep_columns)
# Create shell dataframe to store squashed intersection points
ndgdf = geopandas.GeoDataFrame(columns=igdf.columns, index=pnts.index)
# Populate the squashed intersection points dataframe
ndgdf.loc[:, keep_columns] = [_row(p, g) for p, g in igdf.groupby(ptid)]
return ndgdf
def do_sjoin(df1, df2, op, ptid, pgid, keep_columns, how="left", fillna="NaN"):
"""Perform a spatial join in GeoPandas.
Parameters
----------
df1 : geopandas.GeoDataFrame
Left geodataframe for the spatial join.
df2 : geopandas.GeoDataFrame
Right geodataframe for the spatial join.
op : str
Binary predicate in Shapley.
https://shapely.readthedocs.io/en/latest/manual.html#binary-predicates
ptid : str
Point ID variable.
pgid : str
Polygon ID variable.
keep_columns : list
Columns to retain.
how : str
Join method. Defaults is 'left'. Also supports {'right', 'inner'}.
fillna : {None, bool, int, str, ...}
Any value to fill 'not a value' cells. Defaults is 'NaN'.
Returns
-------
df3 :
Result of the spatial join.
"""
# Make sure to keep geometry (from left dataframe)
if df1.geometry.name not in keep_columns:
keep_columns += [df1.geometry.name]
# Perform join
df3 = geopandas.sjoin(df1, df2, how=how, op=op)[keep_columns]
# Fill actual NaN with "NaN" for plotting purposes
df3.loc[(df3[pgid].isna()), pgid] = fillna
return df3
def demo_plot_join(
pnts, pgons, ptid, pgid, title, orig, save=None, cmap="Paired", fmat="png"
):
"""Plot the demonstration spatial join.
Parameters
----------
pnts : geopandas.GeoDataFrame
Points for spatial join.
pgons : geopandas.GeoDataFrame
Polygons for spatial join.
ptid : str
Point ID variable.
pgid : str
Polygon ID variable.
title : str
Supertitle of the plot.
orig : int
Set cardinality of original point set, $|P|$.
save : str
File name (including path) plot output. Default is None.
figsize : tuple
Figure size. Default is (8,8).
cmap : str
Default is 'Paired'.
https://matplotlib.org/3.1.1/gallery/color/colormap_reference.html
fmat : str
Format for saving `savefig`. Default is 'png'.
"""
def pgon_labels(p):
"""label polygons"""
def _loc(_x):
"""polygon label location helper"""
return [coord + 0.35 for coord in _x.geometry.centroid.coords[0]]
kws = {"size": 25, "va": "bottom"}
p.apply(lambda x: base.annotate(s=x[pgid], xy=_loc(x), **kws), axis=1)
def pt_labels(p):
"""label points with PTID+PGID"""
def _lab(_x):
"""point label helper"""
return ",".join([_x[ptid], _x[pgid]])
def _loc(_x):
"""point label location helper"""
return _x.geometry.coords[0]
kws = {"size": 15, "va": "bottom", "weight": "bold"}
p.apply(lambda x: base.annotate(s=_lab(x), xy=_loc(x), **kws), axis=1)
def add_title(label, sup=True):
"""add a suptitle or title"""
if sup:
matplotlib.pyplot.suptitle(label, x=0.515, y=0.98, fontsize=30)
else:
matplotlib.pyplot.title(label, fontsize=20)
def set_card():
"""Determine equality of set cardinality for subtitle"""
sj_pnts = pnts.shape[0]
if orig == sj_pnts:
oper = "="
elif orig < sj_pnts:
oper = "<"
elif orig > sj_pnts:
oper = ">"
else:
raise ValueError("Equality could not be determined.")
return "$\\vert P \\vert %s \\vert P^\\prime \\vert$" % oper
base = pgons.plot(figsize=(8,8), zorder=0, facecolor="w", edgecolor="k")
pnts.plot(ax=base, markersize=50, column=pgid, cmap=cmap)
# polygons labels
pgon_labels(pgons)
# points labels
pt_labels(pnts)
# add title
add_title(title)
# add subtitle
add_title(set_card(), sup=False)
# save figure
if save:
kws = {"bbox_inches": "tight", "format": fmat, "dpi": 400, "quality": 100}
matplotlib.pyplot.savefig("%s.%s" % (save, fmat), **kws)
def demo_points(ptid):
"""Points for demo"""
point_coords = [
(-1, -1),
(0, -1),
(0.5, -0.75),
(-0.5, -0.5),
(0.5, -0.5),
(0, 0),
(0.5, 0.5),
]
point_ids = {ptid: list(string.ascii_uppercase[: len(point_coords)])}
points = [Point(coords) for coords in point_coords]
points = geopandas.GeoDataFrame(point_ids, geometry=points)
return points
def demo_polygons(pgid):
"""Polygons for demo"""
polygon_coords = [
[(-1, -1), (0, -1), (0, 0), (-1, 0)],
[(0, -1), (1, -1), (1, 0), (0, 0)],
[(-1, 0), (0, 0), (0, 1), (-1, 1)],
]
polygon_ids = {pgid: list(string.ascii_lowercase[-len(polygon_coords) :])}
polygons = [Polygon(coords) for coords in polygon_coords]
polygons = geopandas.GeoDataFrame(polygon_ids, geometry=polygons)
return polygons
def demo(save):
"""Run the demonstration synthetic example"""
# Variable names
PTID = "point_id"
PGID = "polygon_id"
KEEP_COLUMNS = [PTID, PGID]
# Synthetic geometries
points = demo_points(ptid=PTID)
polygons = demo_polygons(pgid=PGID)
# Within
print("* Join: %s\n" % "within")
print(do_sjoin(points, polygons, "within", PTID, PGID, KEEP_COLUMNS))
print("------------------------------------------------------\n")
# Intersects
print("* Join: %s\n" % "intersects")
print(do_sjoin(points, polygons, "intersects", PTID, PGID, KEEP_COLUMNS))
print("------------------------------------------------------\n")
# Non-duplicated intersects
print("* Join: n-d intersects")
nd = nd_intersects(points, polygons, PTID, PGID, KEEP_COLUMNS)
print(nd)
print("------------------------------------------------------\n")
if save:
title = "nd-intersects"
save += title
orig = points.shape[0]
demo_plot_join(nd, polygons, PTID, PGID, title, orig, save=save)
return nd
if __name__ == "__main__":
try:
save_plot_path = sys.argv[1]
except IndexError:
save_plot_path = None
nd = demo(save_plot_path)
```
|
{
"source": "jGaboardi/pointpats",
"score": 3
}
|
#### File: pointpats/pointpats/pointpattern.py
```python
import numpy as np
import sys
from libpysal.cg import KDTree
from .centrography import hull
from .window import as_window, poly_from_bbox
from .util import cached_property
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
__author__ = "<NAME> <EMAIL>"
__all__ = ['PointPattern']
if sys.version_info[0] > 2:
xrange = range
class PointPattern(object):
"""
Planar Point Pattern Class 2-D.
Parameters
----------
points: array
(n,p), n points with p >= 2 attributes on each
point. Two attributes must comprise the spatial
coordinate pair. Default is that the first two
attributes are the x and y spatial coordinates.
window: :class:`.Window`
Bounding geometric object for the point pattern.
If not specified, window will be set to the minimum
bounding rectangle of the point pattern.
names: list
The names of the attributes.
coord_names: list
The names of the attributes defining the two spatial
coordinates.
Examples
--------
>>> from pointpats import PointPattern
>>> points = [[66.22, 32.54], [22.52, 22.39], [31.01, 81.21],
... [9.47, 31.02], [30.78, 60.10], [75.21, 58.93],
... [79.26, 7.68], [8.23, 39.93], [98.73, 77.17],
... [89.78, 42.53], [65.19, 92.08], [54.46, 8.48]]
>>> pp = PointPattern(points)
>>> pp.n
12
>>> pp.mean_nnd
21.612139802089246
>>> pp.lambda_mbb
0.0015710507711240867
>>> pp.lambda_hull
0.0022667153468973137
>>> pp.hull_area
5294.00395
>>> pp.mbb_area
7638.200000000001
"""
def __init__(self, points, window=None, names=None, coord_names=None):
# first two series in df are x, y unless coor_names and names are
# specified
self.df = pd.DataFrame(points)
n, p = self.df.shape
self._n_marks = p - 2
if coord_names is None:
if names is not None:
coord_names = names[:2]
else:
coord_names = ['x', 'y']
if names is None:
col_names = coord_names
if p > 2:
for m in range(2, p):
col_names.append("mark_{}".format(m-2))
coord_names = coord_names[:2]
else:
col_names = names
self.coord_names = coord_names
self._x, self._y = coord_names
self.df.columns = col_names
self.points = self.df.loc[:, [self._x, self._y]]
self._n, self._p = self.points.shape
if window is None:
self.set_window(as_window(poly_from_bbox(self.mbb)))
else:
self.set_window(window)
self._facade()
def __len__(self):
"""Return the number of points. Use the expression 'len(pp)'.
Returns
-------
length : int
The number of points in the point pattern.
Examples
--------
>>> from pointpats import PointPattern
>>> points = [[1, 3], [4, 5], [0,0]]
>>> pp = PointPattern(points)
>>> len(pp)
3
"""
return len(self.df)
def __contains__(self, n):
"""Return True if n is a point (a tuple of coordinates), False otherwise.
Use the expression 'n in pp'.
Examples
--------
>>> from pointpats import PointPattern
>>> points = [[1, 3], [4, 5], [0,0]]
>>> pp = PointPattern(points)
>>> [1, 3] in pp
True
"""
name = self.df.columns.values.tolist()
return ((self.df[name[0]] == n[0]) & (self.df[name[1]] == n[1])).any()
def set_window(self, window):
try:
self._window = window
except:
print("not a valid Window object")
def get_window(self):
"""
Bounding geometry for the point pattern
:class:`.window.Window`
"""
if not hasattr(self, '_window') or self._window is None:
# use bbox as window
self.set_window(as_window(poly_from_bbox(self.mbb)))
return self._window
window = property(get_window, set_window)
def summary(self):
'''
Description of the point pattern.
'''
print('Point Pattern')
print("{} points".format(self.n))
print("Bounding rectangle [({},{}), ({},{})]".format(*self.mbb))
print("Area of window: {}".format(self.window.area))
print("Intensity estimate for window: {}".format(self.lambda_window))
print(self.head())
def add_marks(self, marks, mark_names=None):
if mark_names is None:
nm = range(len(marks))
mark_names = ["mark_{}".format(self._n_marks+1+j) for j in nm]
for name, mark in zip(mark_names, marks):
self.df[name] = mark
self._n_marks += 1
def plot(self, window=False, title="Point Pattern", hull=False,
get_ax=False):
"""
Plot function for a point pattern.
Parameters
----------
window : boolean
If window is True, plot window of the point
pattern. If not, don't plot window.
title : string
Name of the figure.
hull : boolean
If hull is True, plot convex hull of the point
pattern. If not, don't plot convex hull.
get_ax : boolean
If get_ax is True, return the current plot ax.
Returns
-------
ax : matplotlib.axes._subplots.AxesSubplot
Current plot ax. Only return it when get_ax is True.
"""
fig, ax = plt.subplots()
plt.plot(self.df[self._x], self.df[self._y], '.')
# plt.scatter(self.df[self._x], self.df[self._y])
plt.title(title)
if window:
patches = []
for part in self.window.parts:
p = Polygon(np.asarray(part))
patches.append(p)
ax.add_collection(PatchCollection(patches, facecolor='w',
edgecolor='k', alpha=0.3))
if hull:
patches = []
p = Polygon(self.hull)
patches.append(p)
ax.add_collection(PatchCollection(patches, facecolor='w',
edgecolor='k', alpha=0.3))
# plt.plot(x, y, '.')
if get_ax:
return ax
def _mbb(self):
"""
Minimum bounding box
"""
mins = self.points.min(axis=0)
maxs = self.points.max(axis=0)
return np.hstack((mins, maxs))
mbb = cached_property(_mbb)
def _mbb_area(self):
"""
Area of minimum bounding box
"""
return np.product(self.mbb[[2, 3]]-self.mbb[[0, 1]])
mbb_area = cached_property(_mbb_area)
def _n(self):
"""
Number of points
"""
return self.points.shape[0]
n = cached_property(_n)
def _rot(self):
"""
Ripley's rule of thumb for distance range in plotting k and related functions
One-quarter the smallest side of the mbb.
"""
w, s, e, n = self.mbb
return 0.25 * min(e-w, n-s)
rot = cached_property(_rot)
def _lambda_mbb(self):
"""
Intensity based on minimum bounding box
"""
return self.n * 1. / self.mbb_area
lambda_mbb = cached_property(_lambda_mbb)
def _hull(self):
"""
Points defining convex hull in counterclockwise order
"""
return hull(self.points)
hull = cached_property(_hull)
def _lambda_window(self):
"""
Intensity estimate based on area of window
The intensity of a point process at point :math:`s_j` can be defined
as:
.. math::
\\lambda(s_j) = \\lim \\limits_{|\\mathbf{A}s_j|
\\to 0} \\left \\{ \\frac{E(Y(\mathbf{A}s_j)}{|\mathbf{A}s_j|}
\\right \\}
where :math:`\\mathbf{A}s_j` is a small region surrounding location
:math:`s_j` with area :math:`|\\mathbf{A}s_j|`, and
:math:`E(Y(\\mathbf{A}s_j))` is the expected number of event points in
:math:`\\mathbf{A}s_j`.
The intensity is the mean number of event points per unit of area at
point :math:`s_j`.
"""
return self.n / self.window.area
lambda_window = cached_property(_lambda_window)
def _hull_area(self):
"""
Area of convex hull
"""
h = self.hull
if not np.alltrue(h[0] == h[-1]):
# not in closed cartographic form
h = np.vstack((h, h[0]))
s = h[:-1, 0] * h[1:, 1] - h[1:, 0] * h[:-1, 1]
return s.sum() / 2.
hull_area = cached_property(_hull_area)
def _lambda_hull(self):
"""
Intensity based on convex hull
"""
return self.n * 1. / self.hull_area
lambda_hull = cached_property(_lambda_hull)
def _build_tree(self):
return KDTree(self.points)
tree = cached_property(_build_tree)
def knn(self, k=1):
"""
Find k nearest neighbors for each point in the pattern
Parameters
----------
k: int
number of nearest neighbors to find
Returns
-------
nn: array (n x k)
row i column j contains the id for i's jth nearest neighbor
nnd: array(n x k)
row i column j contains the distance between i and its jth
nearest neighbor
"""
if k < 1:
raise ValueError('k must be at least 1')
nn = self.tree.query(self.tree.data, k=k+1)
return nn[1][:, 1:], nn[0][:, 1:]
def _nn_sum(self):
"""
Nearest neighbor distances
"""
ids, nnd = self.knn(1)
return nnd
nnd = cached_property(_nn_sum) # nearest neighbor distances
def _min_nnd(self):
"""
Min nearest neighbor distance
"""
return self.nnd.min()
min_nnd = cached_property(_min_nnd)
def _max_nnd(self):
"""
Max nearest neighbor distance
"""
return self.nnd.max()
max_nnd = cached_property(_max_nnd)
def _mean_nnd(self):
"""
Mean nearest neighbor distance
"""
return self.nnd.mean()
mean_nnd = cached_property(_mean_nnd)
def find_pairs(self, r):
"""
Find all pairs of points in the pattern that are within r units of each
other
Parameters
----------
r: float
diameter of pair circle
Returns
-------
s: set
pairs of points within r units of each other
"""
return self.tree.query_pairs(r)
def knn_other(self, other, k=1):
"""
Find k nearest neighbors in the pattern for each point in other
Parameters
----------
other: PointPattern
:py:class:`pointpats.PointPattern`
k: int
number of nearest neighbors to find
Returns
-------
nn: array (n x k)
row i column j contains the id for i's jth nearest neighbor
nnd: array(n x k)
row i column j contains the distance between i and its jth
nearest neighbor
"""
if k < 1:
raise ValueError('k must be at least 1')
try:
nn = self.tree.query(other.points, k=k)
except:
nn = self.tree.query(other, k=k)
return nn[1], nn[0]
def explode(self, mark):
"""
Explode a marked point pattern into a sequence of individual point
patterns. If the mark has k unique values, then the sequence will be of
length k.
Parameters
----------
mark: string
The label of the mark to use for the subsetting
Returns
-------
pps: list
sequence of :class:`PointPattern` instances
"""
uv = np.unique(self.df[mark])
pps = [self.df[self.df[mark] == v] for v in uv]
names = self.df.columns.values.tolist()
cnames = self.coord_names
return[PointPattern(pp, names=names, coord_names=cnames) for pp in pps]
def unique(self):
""" Remove duplicate points in the point pattern.
Two points in a point pattern are deemed to be identical if their
coordinates are the same, and their marks are the same (if any)
Returns
-------
pp: list
A deduplicated :class:`PointPattern` instance
Examples
--------
>>> from pointpats import PointPattern
>>> points = [[1.2, 2.1], [1.2, 2.1], [0, 1], [1, 2]]
>>> pp = PointPattern(points)
>>> pp.unique().df
x y
0 1.2 2.1
2 0.0 1.0
3 1.0 2.0
"""
names = self.df.columns.values.tolist()
coord_names = self.coord_names
window = self.set_window
unique_df = self.df.drop_duplicates()
return PointPattern(unique_df, names=names, coord_names=coord_names,
window=window)
def superimpose(self, point_pattern):
"""Returns a superimposed point pattern.
Parameters
----------
point_pattern:
:class:`PointPattern` instance
Returns
-------
superimposed :
:class:`PointPattern` instance
Examples
--------
>>> from pointpats import PointPattern
>>> points1 = [[1, 3], [4, 5], [0, 0]]
>>> points2 = [[5, 6], [1, 4], [0, 0]]
>>> pp1 = PointPattern(points1)
>>> pp2 = PointPattern(points2)
>>> pp1.superimpose(pp2).points
x y
0 1 3
1 4 5
2 0 0
0 5 6
1 1 4
"""
names_pp1 = self.df.columns.values.tolist()
cnames_pp1 = self.coord_names
names_pp2 = point_pattern.df.columns.values.tolist()
cnames_pp2 = point_pattern.coord_names
if names_pp1 != names_pp2 or cnames_pp1 != cnames_pp2:
raise TypeError('Both point patterns should have similar\
attributes and spatial coordinates ')
pp = pd.concat((self.df, point_pattern.df))
pp = pp.drop_duplicates()
return PointPattern(pp, names=names_pp1, coord_names=cnames_pp1)
def flip_coordinates(self):
""" Flips the coordinates of a point pattern.
Doesn't change the structure of data frame. This function swaps
`_x` and `_y` variables, which are used to represent coordinates.
"""
self._x, self._y = self._y, self._x
# Pandas facade
def _facade(self):
self.head = self.df.head
self.tail = self.df.tail
```
#### File: pointpats/pointpats/spacetime.py
```python
__author__ = "<NAME> <<EMAIL>>", "<NAME> \
<<EMAIL>>", "<NAME> <<EMAIL>"
__all__ = ['SpaceTimeEvents', 'knox', 'mantel', 'jacquez', 'modified_knox']
import os
import libpysal as lps
import numpy as np
import scipy.stats as stats
from libpysal import cg
from datetime import date
class SpaceTimeEvents:
"""
Method for reformatting event data stored in a shapefile for use in
calculating metrics of spatio-temporal interaction.
Parameters
----------
path : string
the path to the appropriate shapefile, including the
file name and extension
time : string
column header in the DBF file indicating the column
containing the time stamp.
infer_timestamp : bool, optional
if the column containing the timestamp is formatted as
calendar dates, try to coerce them into Python datetime
objects (the default is False).
Attributes
----------
n : int
number of events.
x : array
(n, 1), array of the x coordinates for the events.
y : array
(n, 1), array of the y coordinates for the events.
t : array
(n, 1), array of the temporal coordinates for the events.
space : array
(n, 2), array of the spatial coordinates (x,y) for the
events.
time : array
(n, 2), array of the temporal coordinates (t,1) for the
events, the second column is a vector of ones.
Examples
--------
Read in the example shapefile data, ensuring to omit the file
extension. In order to successfully create the event data the .dbf file
associated with the shapefile should have a column of values that are a
timestamp for the events. This timestamp may be a numerical value
or a date. Date inference was added in version 1.6.
>>> import libpysal as lps
>>> path = lps.examples.get_path("burkitt.shp")
>>> from pointpats import SpaceTimeEvents
Create an instance of SpaceTimeEvents from a shapefile, where the
temporal information is stored in a column named "T".
>>> events = SpaceTimeEvents(path,'T')
See how many events are in the instance.
>>> events.n
188
Check the spatial coordinates of the first event.
>>> events.space[0]
array([300., 302.])
Check the time of the first event.
>>> events.t[0]
array([413.])
Calculate the time difference between the first two events.
>>> events.t[1] - events.t[0]
array([59.])
New, in 1.6, date support:
Now, create an instance of SpaceTimeEvents from a shapefile, where the
temporal information is stored in a column named "DATE".
>>> events = SpaceTimeEvents(path,'DATE')
See how many events are in the instance.
>>> events.n
188
Check the spatial coordinates of the first event.
>>> events.space[0]
array([300., 302.])
Check the time of the first event. Note that this value is equivalent to
413 days after January 1, 1900.
>>> events.t[0][0]
datetime.date(1901, 2, 16)
Calculate the time difference between the first two events.
>>> (events.t[1][0] - events.t[0][0]).days
59
"""
def __init__(self, path, time_col, infer_timestamp=False):
shp = lps.io.open(path)
head, tail = os.path.split(path)
dbf_tail = tail.split(".")[0]+".dbf"
dbf = lps.io.open(lps.examples.get_path(dbf_tail))
# extract the spatial coordinates from the shapefile
x = [coords[0] for coords in shp]
y = [coords[1] for coords in shp]
self.n = n = len(shp)
x = np.array(x)
y = np.array(y)
self.x = np.reshape(x, (n, 1))
self.y = np.reshape(y, (n, 1))
self.space = np.hstack((self.x, self.y))
# extract the temporal information from the database
if infer_timestamp:
col = dbf.by_col(time_col)
if isinstance(col[0], date):
day1 = min(col)
col = [(d - day1).days for d in col]
t = np.array(col)
else:
print("Unable to parse your time column as Python datetime \
objects, proceeding as integers.")
t = np.array(col)
else:
t = np.array(dbf.by_col(time_col))
line = np.ones((n, 1))
self.t = np.reshape(t, (n, 1))
self.time = np.hstack((self.t, line))
# close open objects
dbf.close()
shp.close()
def knox(s_coords, t_coords, delta, tau, permutations=99, debug=False):
"""
Knox test for spatio-temporal interaction. :cite:`Knox:1964`
Parameters
----------
s_coords : array
(n, 2), spatial coordinates.
t_coords : array
(n, 1), temporal coordinates.
delta : float
threshold for proximity in space.
tau : float
threshold for proximity in time.
permutations : int, optional
the number of permutations used to establish pseudo-
significance (the default is 99).
debug : bool, optional
if true, debugging information is printed (the default is
False).
Returns
-------
knox_result : dictionary
contains the statistic (stat) for the test and the
associated p-value (pvalue).
stat : float
value of the knox test for the dataset.
pvalue : float
pseudo p-value associated with the statistic.
counts : int
count of space time neighbors.
Examples
--------
>>> import numpy as np
>>> import libpysal as lps
>>> from pointpats import SpaceTimeEvents, knox
Read in the example data and create an instance of SpaceTimeEvents.
>>> path = lps.examples.get_path("burkitt.shp")
>>> events = SpaceTimeEvents(path,'T')
Set the random seed generator. This is used by the permutation based
inference to replicate the pseudo-significance of our example results -
the end-user will normally omit this step.
>>> np.random.seed(100)
Run the Knox test with distance and time thresholds of 20 and 5,
respectively. This counts the events that are closer than 20 units in
space, and 5 units in time.
>>> result = knox(events.space, events.t, delta=20, tau=5, permutations=99)
Next, we examine the results. First, we call the statistic from the
results dictionary. This reports that there are 13 events close
in both space and time, according to our threshold definitions.
>>> result['stat'] == 13
True
Next, we look at the pseudo-significance of this value, calculated by
permuting the timestamps and rerunning the statistics. In this case,
the results indicate there is likely no space-time interaction between
the events.
>>> print("%2.2f"%result['pvalue'])
0.17
"""
# Do a kdtree on space first as the number of ties (identical points) is
# likely to be lower for space than time.
kd_s = cg.KDTree(s_coords)
neigh_s = kd_s.query_pairs(delta)
tau2 = tau * tau
ids = np.array(list(neigh_s))
# For the neighboring pairs in space, determine which are also time
# neighbors
d_t = (t_coords[ids[:, 0]] - t_coords[ids[:, 1]]) ** 2
n_st = sum(d_t <= tau2)
knox_result = {'stat': n_st[0]}
if permutations:
joint = np.zeros((permutations, 1), int)
for p in range(permutations):
np.random.shuffle(t_coords)
d_t = (t_coords[ids[:, 0]] - t_coords[ids[:, 1]]) ** 2
joint[p] = np.sum(d_t <= tau2)
larger = sum(joint >= n_st[0])
if (permutations - larger) < larger:
larger = permutations - larger
p_sim = (larger + 1.) / (permutations + 1.)
knox_result['pvalue'] = p_sim
return knox_result
def mantel(s_coords, t_coords, permutations=99, scon=1.0, spow=-1.0, tcon=1.0, tpow=-1.0):
"""
Standardized Mantel test for spatio-temporal interaction. :cite:`Mantel:1967`
Parameters
----------
s_coords : array
(n, 2), spatial coordinates.
t_coords : array
(n, 1), temporal coordinates.
permutations : int, optional
the number of permutations used to establish pseudo-
significance (the default is 99).
scon : float, optional
constant added to spatial distances (the default is 1.0).
spow : float, optional
value for power transformation for spatial distances
(the default is -1.0).
tcon : float, optional
constant added to temporal distances (the default is 1.0).
tpow : float, optional
value for power transformation for temporal distances
(the default is -1.0).
Returns
-------
mantel_result : dictionary
contains the statistic (stat) for the test and the
associated p-value (pvalue).
stat : float
value of the knox test for the dataset.
pvalue : float
pseudo p-value associated with the statistic.
Examples
--------
>>> import numpy as np
>>> import libpysal as lps
>>> from pointpats import SpaceTimeEvents, mantel
Read in the example data and create an instance of SpaceTimeEvents.
>>> path = lps.examples.get_path("burkitt.shp")
>>> events = SpaceTimeEvents(path,'T')
Set the random seed generator. This is used by the permutation based
inference to replicate the pseudo-significance of our example results -
the end-user will normally omit this step.
>>> np.random.seed(100)
The standardized Mantel test is a measure of matrix correlation between
the spatial and temporal distance matrices of the event dataset. The
following example runs the standardized Mantel test without a constant
or transformation; however, as recommended by :cite:`Mantel:1967`, these
should be added by the user. This can be done by adjusting the constant
and power parameters.
>>> result = mantel(events.space, events.t, 99, scon=1.0, spow=-1.0, tcon=1.0, tpow=-1.0)
Next, we examine the result of the test.
>>> print("%6.6f"%result['stat'])
0.048368
Finally, we look at the pseudo-significance of this value, calculated by
permuting the timestamps and rerunning the statistic for each of the 99
permutations. According to these parameters, the results indicate
space-time interaction between the events.
>>> print("%2.2f"%result['pvalue'])
0.01
"""
t = t_coords
s = s_coords
n = len(t)
# calculate the spatial and temporal distance matrices for the events
distmat = cg.distance_matrix(s)
timemat = cg.distance_matrix(t)
# calculate the transformed standardized statistic
timevec = (timemat[np.tril_indices(timemat.shape[0], k = -1)] + tcon) ** tpow
distvec = (distmat[np.tril_indices(distmat.shape[0], k = -1)] + scon) ** spow
stat = stats.pearsonr(timevec, distvec)[0].sum()
# return the results (if no inference)
if not permutations:
return stat
# loop for generating a random distribution to assess significance
dist = []
for i in range(permutations):
trand = _shuffle_matrix(timemat, np.arange(n))
timevec = (trand[np.tril_indices(trand.shape[0], k = -1)] + tcon) ** tpow
m = stats.pearsonr(timevec, distvec)[0].sum()
dist.append(m)
## establish the pseudo significance of the observed statistic
distribution = np.array(dist)
greater = np.ma.masked_greater_equal(distribution, stat)
count = np.ma.count_masked(greater)
pvalue = (count + 1.0) / (permutations + 1.0)
# report the results
mantel_result = {'stat': stat, 'pvalue': pvalue}
return mantel_result
def jacquez(s_coords, t_coords, k, permutations=99):
"""
Jacquez k nearest neighbors test for spatio-temporal interaction.
:cite:`Jacquez:1996`
Parameters
----------
s_coords : array
(n, 2), spatial coordinates.
t_coords : array
(n, 1), temporal coordinates.
k : int
the number of nearest neighbors to be searched.
permutations : int, optional
the number of permutations used to establish pseudo-
significance (the default is 99).
Returns
-------
jacquez_result : dictionary
contains the statistic (stat) for the test and the
associated p-value (pvalue).
stat : float
value of the Jacquez k nearest neighbors test for the
dataset.
pvalue : float
p-value associated with the statistic (normally
distributed with k-1 df).
Examples
--------
>>> import numpy as np
>>> import libpysal as lps
>>> from pointpats import SpaceTimeEvents, jacquez
Read in the example data and create an instance of SpaceTimeEvents.
>>> path = lps.examples.get_path("burkitt.shp")
>>> events = SpaceTimeEvents(path,'T')
The Jacquez test counts the number of events that are k nearest
neighbors in both time and space. The following runs the Jacquez test
on the example data and reports the resulting statistic. In this case,
there are 13 instances where events are nearest neighbors in both space
and time.
# turning off as kdtree changes from scipy < 0.12 return 13
>>> np.random.seed(100)
>>> result = jacquez(events.space, events.t ,k=3,permutations=99)
>>> print(result['stat'])
13
The significance of this can be assessed by calling the p-
value from the results dictionary, as shown below. Again, no
space-time interaction is observed.
>>> result['pvalue'] < 0.01
False
"""
time = t_coords
space = s_coords
n = len(time)
# calculate the nearest neighbors in space and time separately
knnt = lps.weights.KNN.from_array(time, k)
knns = lps.weights.KNN.from_array(space, k)
nnt = knnt.neighbors
nns = knns.neighbors
knn_sum = 0
# determine which events are nearest neighbors in both space and time
for i in range(n):
t_neighbors = nnt[i]
s_neighbors = nns[i]
check = set(t_neighbors)
inter = check.intersection(s_neighbors)
count = len(inter)
knn_sum += count
stat = knn_sum
# return the results (if no inference)
if not permutations:
return stat
# loop for generating a random distribution to assess significance
dist = []
for p in range(permutations):
j = 0
trand = np.random.permutation(time)
knnt = lps.weights.KNN.from_array(trand, k)
nnt = knnt.neighbors
for i in range(n):
t_neighbors = nnt[i]
s_neighbors = nns[i]
check = set(t_neighbors)
inter = check.intersection(s_neighbors)
count = len(inter)
j += count
dist.append(j)
# establish the pseudo significance of the observed statistic
distribution = np.array(dist)
greater = np.ma.masked_greater_equal(distribution, stat)
count = np.ma.count_masked(greater)
pvalue = (count + 1.0) / (permutations + 1.0)
# report the results
jacquez_result = {'stat': stat, 'pvalue': pvalue}
return jacquez_result
def modified_knox(s_coords, t_coords, delta, tau, permutations=99):
"""
Baker's modified Knox test for spatio-temporal interaction.
:cite:`Baker:2004`
Parameters
----------
s_coords : array
(n, 2), spatial coordinates.
t_coords : array
(n, 1), temporal coordinates.
delta : float
threshold for proximity in space.
tau : float
threshold for proximity in time.
permutations : int, optional
the number of permutations used to establish pseudo-
significance (the default is 99).
Returns
-------
modknox_result : dictionary
contains the statistic (stat) for the test and the
associated p-value (pvalue).
stat : float
value of the modified knox test for the dataset.
pvalue : float
pseudo p-value associated with the statistic.
Examples
--------
>>> import numpy as np
>>> import libpysal as lps
>>> from pointpats import SpaceTimeEvents, modified_knox
Read in the example data and create an instance of SpaceTimeEvents.
>>> path = lps.examples.get_path("burkitt.shp")
>>> events = SpaceTimeEvents(path, 'T')
Set the random seed generator. This is used by the permutation based
inference to replicate the pseudo-significance of our example results -
the end-user will normally omit this step.
>>> np.random.seed(100)
Run the modified Knox test with distance and time thresholds of 20 and 5,
respectively. This counts the events that are closer than 20 units in
space, and 5 units in time.
>>> result = modified_knox(events.space, events.t, delta=20, tau=5, permutations=99)
Next, we examine the results. First, we call the statistic from the
results dictionary. This reports the difference between the observed
and expected Knox statistic.
>>> print("%2.8f" % result['stat'])
2.81016043
Next, we look at the pseudo-significance of this value, calculated by
permuting the timestamps and rerunning the statistics. In this case,
the results indicate there is likely no space-time interaction.
>>> print("%2.2f" % result['pvalue'])
0.11
"""
s = s_coords
t = t_coords
n = len(t)
# calculate the spatial and temporal distance matrices for the events
sdistmat = cg.distance_matrix(s)
tdistmat = cg.distance_matrix(t)
# identify events within thresholds
spacmat = np.ones((n, n))
spacbin = sdistmat <= delta
spacmat = spacmat * spacbin
timemat = np.ones((n, n))
timebin = tdistmat <= tau
timemat = timemat * timebin
# calculate the observed (original) statistic
knoxmat = timemat * spacmat
obsstat = (knoxmat.sum() - n)
# calculate the expectated value
ssumvec = np.reshape((spacbin.sum(axis=0) - 1), (n, 1))
tsumvec = np.reshape((timebin.sum(axis=0) - 1), (n, 1))
expstat = (ssumvec * tsumvec).sum()
# calculate the modified stat
stat = (obsstat - (expstat / (n - 1.0))) / 2.0
# return results (if no inference)
if not permutations:
return stat
distribution = []
# loop for generating a random distribution to assess significance
for p in range(permutations):
rtdistmat = _shuffle_matrix(tdistmat, list(range(n)))
timemat = np.ones((n, n))
timebin = rtdistmat <= tau
timemat = timemat * timebin
# calculate the observed knox again
knoxmat = timemat * spacmat
obsstat = (knoxmat.sum() - n)
# calculate the expectated value again
ssumvec = np.reshape((spacbin.sum(axis=0) - 1), (n, 1))
tsumvec = np.reshape((timebin.sum(axis=0) - 1), (n, 1))
expstat = (ssumvec * tsumvec).sum()
# calculate the modified stat
tempstat = (obsstat - (expstat / (n - 1.0))) / 2.0
distribution.append(tempstat)
# establish the pseudo significance of the observed statistic
distribution = np.array(distribution)
greater = np.ma.masked_greater_equal(distribution, stat)
count = np.ma.count_masked(greater)
pvalue = (count + 1.0) / (permutations + 1.0)
# return results
modknox_result = {'stat': stat, 'pvalue': pvalue}
return modknox_result
def _shuffle_matrix(X, ids):
"""
Random permutation of rows and columns of a matrix
Parameters
----------
X : array
(k, k), array to be permuted.
ids : array
range (k, ).
Returns
-------
: array
(k, k) with rows and columns randomly shuffled.
"""
np.random.shuffle(ids)
return X[ids, :][:, ids]
```
#### File: pointpats/tests/test_quadrat_statistics.py
```python
import unittest
import numpy as np
from ..quadrat_statistics import *
from ..pointpattern import PointPattern
from libpysal.common import RTOL, ATOL
class TestQuadratStatistics(unittest.TestCase):
def setUp(self):
self.points = [
[94., 93.], [80., 95.], [79., 90.], [78., 92.], [76., 92.], [66., 93.], [64., 90.], [27., 70.], [58., 88.],
[57., 92.], [53., 92.], [50., 90.], [49., 90.], [32., 90.], [31., 87.], [22., 87.], [21., 87.], [21., 86.],
[22., 81.], [23., 83.], [27., 85.], [27., 84.], [27., 83.], [27., 82.], [30., 84.], [31., 84.], [31., 84.],
[32., 83.], [33., 81.], [32., 79.], [32., 76.], [33., 77.], [34., 86.], [34., 84.], [38., 82.], [39., 81.],
[40., 80.], [41., 83.], [43., 75.], [44., 81.], [46., 81.], [47., 82.], [47., 81.], [48., 80.], [48., 81.],
[50., 85.], [51., 84.], [52., 83.], [55., 85.], [57., 88.], [57., 81.], [60., 87.], [69., 80.], [71., 82.],
[72., 81.], [74., 82.], [75., 81.], [77., 88.], [80., 88.], [82., 77.], [66., 62.], [64., 71.], [59., 63.],
[55., 64.], [53., 68.], [52., 59.], [51., 61.], [50., 75.], [50., 74.], [45., 61.], [44., 60.], [43., 59.],
[42., 61.], [39., 71.], [37., 67.], [35., 70.], [31., 68.], [30., 71.], [29., 61.], [26., 69.], [24., 68.],
[7., 52.], [11., 53.], [34., 50.], [36., 47.], [37., 45.], [37., 56.], [38., 55.], [38., 50.], [39., 52.],
[41., 52.], [47., 49.], [50., 57.], [52., 56.], [53., 55.], [56., 57.], [69., 52.], [69., 50.], [71., 51.],
[71., 51.], [73., 48.], [74., 48.], [75., 46.], [75., 46.], [86., 51.], [87., 51.], [87., 52.], [90., 52.],
[91., 51.], [87., 42.], [81., 39.], [80., 43.], [79., 37.], [78., 38.], [75., 44.], [73., 41.], [71., 44.],
[68., 29.], [62., 33.], [61., 35.], [60., 34.], [58., 36.], [54., 30.], [52., 38.], [52., 36.], [47., 37.],
[46., 36.], [45., 33.], [36., 32.], [22., 39.], [21., 38.], [22., 35.], [21., 36.], [22., 30.], [19., 29.],
[17., 40.], [14., 41.], [13., 36.], [10., 34.], [7., 37.], [2., 39.], [21., 16.], [22., 14.], [29., 17.],
[30., 25.], [32., 26.], [39., 28.], [40., 26.], [40., 26.], [42., 25.], [43., 24.], [43., 16.], [48., 16.],
[51., 25.], [52., 26.], [57., 27.], [60., 22.], [63., 24.], [64., 23.], [64., 27.], [71., 25.], [50., 10.],
[48., 12.], [45., 14.], [33., 8.], [31., 7.], [32., 6.], [31., 8.]
]
self.pp = PointPattern(self.points)
def test_QStatistic(self):
q_r = QStatistic(self.pp, shape="rectangle", nx=3, ny=3)
np.testing.assert_allclose(q_r.chi2, 33.1071428571, RTOL)
np.testing.assert_allclose(q_r.chi2_pvalue, 5.89097854516e-05, ATOL)
assert q_r.df == 8
q_r = QStatistic(self.pp, shape="hexagon", lh=10)
np.testing.assert_allclose(q_r.chi2, 195.0, RTOL)
np.testing.assert_allclose(q_r.chi2_pvalue, 6.3759506952e-22, RTOL)
assert q_r.df == 41
def test_RectangleM1(self):
rm = RectangleM(self.pp, count_column = 3, count_row = 3)
np.testing.assert_array_equal(list(rm.point_location_sta().values()),
[12, 22, 4, 11, 26, 22, 22, 33, 16])
def test_RectangleM2(self):
hm = HexagonM(self.pp, lh = 10)
np.testing.assert_array_equal(list(hm.point_location_sta().values()),
[0, 2, 4, 5, 0, 0, 0, 0, 9, 6, 10, 7, 3, 0, 2, 2, 3, 7, 4,
13, 1, 1, 1, 4, 11, 3, 0, 4, 0, 5, 15, 15, 3, 10, 0, 0,
0, 9, 0, 7, 1, 1])
```
#### File: pointpats/pointpats/window.py
```python
__author__ = "<NAME> <EMAIL>"
import libpysal as ps
import numpy as np
__all__ = ["as_window", "poly_from_bbox", "to_ccf", "Window"]
def poly_from_bbox(bbox):
l, b, r, t = bbox
c = [(l, b), (l, t), (r, t), (r, b), (l, b)]
return ps.cg.shapes.Polygon(c)
def to_ccf(poly):
if poly[-1] != poly[0]:
poly.append(poly[0])
return poly
def as_window(pysal_polygon):
"""
Convert a libpysal polygon to a Window.
Parameters
----------
pysal_polygon: libpysal.cg.shapes.Polygon
libpysal Polygon instance.
Returns
-------
Window
A Window instance.
"""
if pysal_polygon.holes == [[]]:
return Window(pysal_polygon.parts)
else:
return Window(pysal_polygon.parts, pysal_polygon.holes)
class Window(ps.cg.Polygon):
"""
Geometric container for point patterns.
A window is used to define the area over which the pattern is observed.
This area is used in estimating the intensity of the point pattern.
See :attr:`PointPattern.lambda_window`.
Parameters
----------
parts: sequence
A sequence of rings which bound the positive space point
pattern.
holes: sequence
A sequence of rings which bound holes in the polygons that bound the
point pattern.
"""
def __init__(self, parts, holes=[]):
if holes:
super(Window, self).__init__(parts, holes)
else:
super(Window, self).__init__(parts)
def filter_contained(self, points):
return [np.asarray(pnt) for pnt in points if self.contains_point(pnt)]
```
|
{
"source": "jGaboardi/shapely",
"score": 2
}
|
#### File: tests/geometry/test_hash.py
```python
from shapely import GeometryCollection, LineString, MultiPoint, Point
def test_point():
g = Point(1, 2)
assert hash(g) == hash(Point(1, 2))
assert hash(g) != hash(Point(1, 3))
def test_multipoint():
g = MultiPoint([(1, 2), (3, 4)])
assert hash(g) == hash(MultiPoint([(1, 2), (3, 4)]))
assert hash(g) != hash(MultiPoint([(1, 2), (3, 3)]))
def test_linestring():
g = LineString([(1, 2), (3, 4)])
assert hash(g) == hash(LineString([(1, 2), (3, 4)]))
assert hash(g) != hash(LineString([(1, 2), (3, 3)]))
def test_polygon():
g = Point(0, 0).buffer(1.0)
assert hash(g) == hash(Point(0, 0).buffer(1.0))
assert hash(g) != hash(Point(0, 0).buffer(1.1))
def test_collection():
g = GeometryCollection([Point(1, 2), LineString([(1, 2), (3, 4)])])
assert hash(g) == hash(
GeometryCollection([Point(1, 2), LineString([(1, 2), (3, 4)])])
)
assert hash(g) != hash(
GeometryCollection([Point(1, 2), LineString([(1, 2), (3, 3)])])
)
```
|
{
"source": "jGaboardi/Shapely",
"score": 2
}
|
#### File: tests/geometry/test_linestring.py
```python
import numpy as np
import shapely
from shapely.coords import CoordinateSequence
from shapely.geometry import LineString, Point, LinearRing
import pytest
def test_from_coordinate_sequence():
# From coordinate tuples
line = LineString(((1.0, 2.0), (3.0, 4.0)))
assert len(line.coords) == 2
assert line.coords[:] == [(1.0, 2.0), (3.0, 4.0)]
line = LineString([(1.0, 2.0), (3.0, 4.0)])
assert line.coords[:] == [(1.0, 2.0), (3.0, 4.0)]
def test_from_coordinate_sequence_3D():
line = LineString(((1.0, 2.0, 3.0), (3.0, 4.0, 5.0)))
assert line.has_z
assert line.coords[:] == [(1.0, 2.0, 3.0), (3.0, 4.0, 5.0)]
def test_from_points():
# From Points
line = LineString((Point(1.0, 2.0), Point(3.0, 4.0)))
assert line.coords[:] == [(1.0, 2.0), (3.0, 4.0)]
line = LineString([Point(1.0, 2.0), Point(3.0, 4.0)])
assert line.coords[:] == [(1.0, 2.0), (3.0, 4.0)]
def test_from_mix():
# From mix of tuples and Points
line = LineString((Point(1.0, 2.0), (2.0, 3.0), Point(3.0, 4.0)))
assert line.coords[:] == [(1.0, 2.0), (2.0, 3.0), (3.0, 4.0)]
def test_from_linestring():
# From another linestring
line = LineString(((1.0, 2.0), (3.0, 4.0)))
copy = LineString(line)
assert copy.coords[:] == [(1.0, 2.0), (3.0, 4.0)]
assert copy.geom_type == "LineString"
def test_from_linearring():
coords = [(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 0.0)]
ring = LinearRing(coords)
copy = LineString(ring)
assert copy.coords[:] == coords
assert copy.geom_type == "LineString"
def test_from_linestring_z():
coords = [(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)]
line = LineString(coords)
copy = LineString(line)
assert copy.coords[:] == coords
assert copy.geom_type == "LineString"
def test_from_generator():
gen = (coord for coord in [(1.0, 2.0), (3.0, 4.0)])
line = LineString(gen)
assert line.coords[:] == [(1.0, 2.0), (3.0, 4.0)]
def test_from_empty():
line = LineString()
assert line.is_empty
assert isinstance(line.coords, CoordinateSequence)
assert line.coords[:] == []
line = LineString([])
assert line.is_empty
assert isinstance(line.coords, CoordinateSequence)
assert line.coords[:] == []
def test_from_numpy():
# Construct from a numpy array
line = LineString(np.array([[1.0, 2.0], [3.0, 4.0]]))
assert line.coords[:] == [(1.0, 2.0), (3.0, 4.0)]
@pytest.mark.filterwarnings("error:An exception was ignored") # NumPy 1.21
def test_numpy_empty_linestring_coords():
# Check empty
line = LineString([])
la = np.asarray(line.coords)
assert la.shape == (0, 2)
@pytest.mark.filterwarnings("error:An exception was ignored") # NumPy 1.21
def test_numpy_object_array():
geom = LineString([(0.0, 0.0), (0.0, 1.0)])
ar = np.empty(1, object)
ar[:] = [geom]
assert ar[0] == geom
# TODO(shapely-2.0)
@pytest.mark.xfail(strict=False, reason="Not yet implemented for Shapely 2.0")
def test_from_invalid_dim():
# TODO(shapely-2.0) better error message?
# pytest.raises(ValueError, match="at least 2 coordinate tuples|at least 2 coordinates"):
with pytest.raises(shapely.GEOSException):
LineString([(1, 2)])
with pytest.raises(
ValueError,
match="Inconsistent coordinate dimensionality|Input operand 0 does not have enough dimensions",
):
LineString([(1, 2, 3), (4, 5)])
with pytest.raises(
ValueError,
match="Inconsistent coordinate dimensionality|Input operand 0 does not have enough dimensions",
):
LineString([(1, 2), (3, 4, 5)])
# TODO better error, right now raises AssertionError
with pytest.raises(Exception):
LineString([(1, 2, 3, 4), (4, 5, 6, 7)])
def test_from_single_coordinate():
"""Test for issue #486"""
coords = [[-122.185933073564, 37.3629353839073]]
with pytest.raises(shapely.GEOSException):
ls = LineString(coords)
ls.geom_type # caused segfault before fix
class TestLineString:
def test_linestring(self):
# From coordinate tuples
line = LineString(((1.0, 2.0), (3.0, 4.0)))
assert len(line.coords) == 2
assert line.coords[:] == [(1.0, 2.0), (3.0, 4.0)]
# Bounds
assert line.bounds == (1.0, 2.0, 3.0, 4.0)
# Coordinate access
assert tuple(line.coords) == ((1.0, 2.0), (3.0, 4.0))
assert line.coords[0] == (1.0, 2.0)
assert line.coords[1] == (3.0, 4.0)
with pytest.raises(IndexError):
line.coords[2] # index out of range
# Geo interface
assert line.__geo_interface__ == {
"type": "LineString",
"coordinates": ((1.0, 2.0), (3.0, 4.0)),
}
def test_linestring_empty(self):
# Test Non-operability of Null geometry
l_null = LineString()
assert l_null.wkt == "LINESTRING EMPTY"
assert l_null.length == 0.0
def test_equals_argument_order(self):
"""
Test equals predicate functions correctly regardless of the order
of the inputs. See issue #317.
"""
coords = ((0, 0), (1, 0), (1, 1), (0, 0))
ls = LineString(coords)
lr = LinearRing(coords)
assert ls.__eq__(lr) is False # previously incorrectly returned True
assert lr.__eq__(ls) is False
assert (ls == lr) is False
assert (lr == ls) is False
ls_clone = LineString(coords)
lr_clone = LinearRing(coords)
assert ls.__eq__(ls_clone) is True
assert lr.__eq__(lr_clone) is True
assert (ls == ls_clone) is True
assert (lr == lr_clone) is True
def test_numpy_linestring_coords(self):
from numpy.testing import assert_array_equal
line = LineString(((1.0, 2.0), (3.0, 4.0)))
expected = np.array([[1.0, 2.0], [3.0, 4.0]])
# Coordinate sequences can be adapted as well
la = np.asarray(line.coords)
assert_array_equal(la, expected)
def test_linestring_immutable():
line = LineString(((1.0, 2.0), (3.0, 4.0)))
with pytest.raises(AttributeError):
line.coords = [(-1.0, -1.0), (1.0, 1.0)]
with pytest.raises(TypeError):
line.coords[0] = (-1.0, -1.0)
def test_linestring_array_coercion():
# don't convert to array of coordinates, keep objects
line = LineString(((1.0, 2.0), (3.0, 4.0)))
arr = np.array(line)
assert arr.ndim == 0
assert arr.size == 1
assert arr.dtype == np.dtype("object")
assert arr.item() == line
```
#### File: tests/geometry/test_multipolygon.py
```python
import numpy as np
import pytest
from shapely.geometry import Polygon, MultiPolygon
from shapely.geometry.base import dump_coords
from .test_multi import MultiGeometryTestCase
class TestMultiPolygon(MultiGeometryTestCase):
def test_multipolygon(self):
# From coordinate tuples
coords = [
(
((0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)),
[((0.25, 0.25), (0.25, 0.5), (0.5, 0.5), (0.5, 0.25))],
)
]
geom = MultiPolygon(coords)
assert isinstance(geom, MultiPolygon)
assert len(geom.geoms) == 1
assert dump_coords(geom) == [
[
(0.0, 0.0),
(0.0, 1.0),
(1.0, 1.0),
(1.0, 0.0),
(0.0, 0.0),
[(0.25, 0.25), (0.25, 0.5), (0.5, 0.5), (0.5, 0.25), (0.25, 0.25)],
]
]
# Or from polygons
p = Polygon(
((0, 0), (0, 1), (1, 1), (1, 0)),
[((0.25, 0.25), (0.25, 0.5), (0.5, 0.5), (0.5, 0.25))],
)
geom = MultiPolygon([p])
assert len(geom.geoms) == 1
assert dump_coords(geom) == [
[
(0.0, 0.0),
(0.0, 1.0),
(1.0, 1.0),
(1.0, 0.0),
(0.0, 0.0),
[(0.25, 0.25), (0.25, 0.5), (0.5, 0.5), (0.5, 0.25), (0.25, 0.25)],
]
]
# Or from another multi-polygon
geom2 = MultiPolygon(geom)
assert len(geom2.geoms) == 1
assert dump_coords(geom2) == [
[
(0.0, 0.0),
(0.0, 1.0),
(1.0, 1.0),
(1.0, 0.0),
(0.0, 0.0),
[(0.25, 0.25), (0.25, 0.5), (0.5, 0.5), (0.5, 0.25), (0.25, 0.25)],
]
]
# Sub-geometry Access
assert isinstance(geom.geoms[0], Polygon)
assert dump_coords(geom.geoms[0]) == [
(0.0, 0.0),
(0.0, 1.0),
(1.0, 1.0),
(1.0, 0.0),
(0.0, 0.0),
[(0.25, 0.25), (0.25, 0.5), (0.5, 0.5), (0.5, 0.25), (0.25, 0.25)],
]
with pytest.raises(IndexError): # index out of range
geom.geoms[1]
# Geo interface
assert geom.__geo_interface__ == {
"type": "MultiPolygon",
"coordinates": [
(
((0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0), (0.0, 0.0)),
((0.25, 0.25), (0.25, 0.5), (0.5, 0.5), (0.5, 0.25), (0.25, 0.25)),
)
],
}
def test_subgeom_access(self):
poly0 = Polygon([(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)])
poly1 = Polygon([(0.25, 0.25), (0.25, 0.5), (0.5, 0.5), (0.5, 0.25)])
self.subgeom_access_test(MultiPolygon, [poly0, poly1])
def test_fail_list_of_multipolygons():
"""A list of multipolygons is not a valid multipolygon ctor argument"""
multi = MultiPolygon(
[
(
((0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)),
[((0.25, 0.25), (0.25, 0.5), (0.5, 0.5), (0.5, 0.25))],
)
]
)
with pytest.raises(ValueError):
MultiPolygon([multi])
@pytest.mark.filterwarnings("error:An exception was ignored") # NumPy 1.21
def test_numpy_object_array():
geom = MultiPolygon(
[
(
((0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)),
[((0.25, 0.25), (0.25, 0.5), (0.5, 0.5), (0.5, 0.25))],
)
]
)
ar = np.empty(1, object)
ar[:] = [geom]
assert ar[0] == geom
```
|
{
"source": "jGaboardi/shapely",
"score": 2
}
|
#### File: tests/geometry/test_multi.py
```python
import numpy as np
test_int_types = [int, np.int16, np.int32, np.int64]
class MultiGeometryTestCase:
def subgeom_access_test(self, cls, geoms):
geom = cls(geoms)
for t in test_int_types:
for i, g in enumerate(geoms):
assert geom.geoms[t(i)] == geoms[i]
```
#### File: shapely/tests/test_prepared.py
```python
import pytest
import numpy as np
from shapely.geometry import Point, Polygon
from shapely.prepared import PreparedGeometry, prep
def test_prepared_geometry():
polygon = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
p = PreparedGeometry(polygon)
assert p.contains(Point(0.5, 0.5))
assert not p.contains(Point(0.5, 1.5))
def test_prep():
polygon = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
p = prep(polygon)
assert p.contains(Point(0.5, 0.5))
assert not p.contains(Point(0.5, 1.5))
def test_op_not_allowed():
p = PreparedGeometry(Point(0.0, 0.0).buffer(1.0))
with pytest.raises(TypeError):
Point(0.0, 0.0).union(p)
def test_predicate_not_allowed():
p = PreparedGeometry(Point(0.0, 0.0).buffer(1.0))
with pytest.raises(TypeError):
Point(0.0, 0.0).contains(p)
def test_prepared_predicates():
# check prepared predicates give the same result as regular predicates
polygon1 = Polygon([
(0, 0), (0, 1), (1, 1), (1, 0), (0, 0)
])
polygon2 = Polygon([
(0.5, 0.5), (1.5, 0.5), (1.0, 1.0), (0.5, 0.5)
])
point2 = Point(0.5, 0.5)
polygon_empty = Polygon()
prepared_polygon1 = PreparedGeometry(polygon1)
for geom2 in (polygon2, point2, polygon_empty):
with np.errstate(invalid="ignore"):
assert polygon1.disjoint(geom2) == prepared_polygon1.disjoint(geom2)
assert polygon1.touches(geom2) == prepared_polygon1.touches(geom2)
assert polygon1.intersects(geom2) == prepared_polygon1.intersects(geom2)
assert polygon1.crosses(geom2) == prepared_polygon1.crosses(geom2)
assert polygon1.within(geom2) == prepared_polygon1.within(geom2)
assert polygon1.contains(geom2) == prepared_polygon1.contains(geom2)
assert polygon1.overlaps(geom2) == prepared_polygon1.overlaps(geom2)
def test_prepare_already_prepared():
polygon = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
prepared = prep(polygon)
# attempt to prepare an already prepared geometry with `prep`
result = prep(prepared)
assert isinstance(result, PreparedGeometry)
assert result.context is polygon
# attempt to prepare an already prepared geometry with `PreparedGeometry`
result = PreparedGeometry(prepared)
assert isinstance(result, PreparedGeometry)
assert result.context is polygon
```
#### File: shapely/tests/test_strtree.py
```python
import gc
import os
import pickle
import subprocess
import sys
import pytest
from shapely.geometry import Point, Polygon, box
from shapely.geos import geos_version
from shapely import strtree
from shapely.strtree import STRtree
from shapely import wkt
from .conftest import requires_geos_342, shapely20_todo
import pytest
point = Point(2, 3)
empty = wkt.loads("GEOMETRYCOLLECTION EMPTY")
@requires_geos_342
@pytest.mark.parametrize("geoms", [[Point(i, i) for i in range(10)]])
@pytest.mark.parametrize(
"query_geom,num_results",
[(Point(2, 2).buffer(0.99), 1), (Point(2, 2).buffer(1.0), 3)],
)
def test_query(geoms, query_geom, num_results):
tree = STRtree(geoms)
results = tree.query(query_geom)
assert len(results) == num_results
@requires_geos_342
@pytest.mark.parametrize("geoms", [[Point(i, i) for i in range(10)]])
@pytest.mark.parametrize(
"query_geom,expected",
[(Point(2, 2).buffer(0.99), [2]), (Point(2, 2).buffer(1.0), [1, 2, 3])],
)
def test_query_enumeration_idx(geoms, query_geom, expected):
"""Store enumeration idx"""
tree = STRtree(geoms, range(len(geoms)))
results = tree.query_items(query_geom)
assert sorted(results) == sorted(expected)
@requires_geos_342
@pytest.mark.parametrize("geoms", [[Point(i, i) for i in range(5)]])
@pytest.mark.parametrize("items", [None, list(range(1, 6)), list("abcde")])
@pytest.mark.parametrize(
"query_geom,expected",
[(Point(2, 2).buffer(0.99), [2]), (Point(2, 2).buffer(1.0), [1, 2, 3])],
)
def test_query_items(geoms, items, query_geom, expected):
"""Store enumeration idx"""
tree = STRtree(geoms, items)
results = tree.query_items(query_geom)
expected = [items[idx] for idx in expected] if items is not None else expected
assert sorted(results) == sorted(expected)
@pytest.mark.parametrize(
"tree_geometry, geometry,expected",
[
([point], box(0, 0, 10, 10), [0]),
# None/empty is ignored in the tree, but the index of the valid geometry
# should be retained.
([None, point], box(0, 0, 10, 10), [1]),
([None, empty, point], box(0, 0, 10, 10), [2]),
],
)
def test_query_items_with_empty(tree_geometry, geometry, expected):
tree = STRtree(tree_geometry)
assert tree.query_items(geometry) == expected
@requires_geos_342
def test_insert_empty_geometry():
"""
Passing nothing but empty geometries results in an empty strtree.
The query segfaults if the empty geometry was actually inserted.
"""
empty = Polygon()
geoms = [empty]
tree = STRtree(geoms)
query = Polygon([(0, 0), (1, 1), (2, 0), (0, 0)])
results = tree.query(query)
assert len(results) == 0
@requires_geos_342
def test_query_empty_geometry():
"""
Empty geometries should be filtered out.
The query segfaults if the empty geometry was actually inserted.
"""
empty = Polygon()
point = Point(1, 0.5)
geoms = [empty, point]
tree = STRtree(geoms)
query = Polygon([(0, 0), (1, 1), (2, 0), (0, 0)])
results = tree.query(query)
assert len(results) == 1
assert results[0] == point
@requires_geos_342
def test_references():
"""Don't crash due to dangling references"""
empty = Polygon()
point = Point(1, 0.5)
geoms = [empty, point]
tree = STRtree(geoms)
empty = None
point = None
gc.collect()
query = Polygon([(0, 0), (1, 1), (2, 0), (0, 0)])
results = tree.query(query)
assert len(results) == 1
assert results[0] == Point(1, 0.5)
# TODO(shapely-2.0) this fails on Appveyor, see
# https://github.com/shapely/shapely/pull/983#issuecomment-718557666
@pytest.mark.skipif(sys.platform.startswith("win32"), reason="does not run on Appveyor")
@requires_geos_342
def test_pickle_persistence():
"""
Don't crash trying to use unpickled GEOS handle.
"""
tree = STRtree([Point(i, i).buffer(0.1) for i in range(3)], range(3))
pickled_strtree = pickle.dumps(tree)
unpickle_script_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "unpickle-strtree.py")
proc = subprocess.Popen(
[sys.executable, str(unpickle_script_file_path)],
stdin=subprocess.PIPE,
)
proc.communicate(input=pickled_strtree)
proc.wait()
assert proc.returncode == 0
@pytest.mark.skipif(geos_version < (3, 6, 0), reason="GEOS 3.6.0 required")
@pytest.mark.parametrize(
"geoms",
[
[
Polygon([(1, 0), (2, 0), (2, 1), (1, 1)]),
Polygon([(0, 2), (1, 2), (1, 3), (0, 3)]),
Point(0, 0.5),
]
],
)
@pytest.mark.parametrize("query_geom", [Point(0, 0.4)])
def test_nearest_geom(geoms, query_geom):
tree = STRtree(geoms)
result = tree.nearest(query_geom)
assert result.geom_type == "Point"
assert result.x == 0.0
assert result.y == 0.5
@pytest.mark.skipif(geos_version < (3, 6, 0), reason="GEOS 3.6.0 required")
@pytest.mark.parametrize(
"geoms",
[
[
Point(0, 0.5),
Polygon([(1, 0), (2, 0), (2, 1), (1, 1)]),
Polygon([(0, 2), (1, 2), (1, 3), (0, 3)]),
]
],
)
@pytest.mark.parametrize("items", [list(range(1, 4)), list("abc")])
@pytest.mark.parametrize("query_geom", [Point(0, 0.4)])
def test_nearest_item(geoms, items, query_geom):
tree = STRtree(geoms, items)
assert tree.nearest_item(query_geom) == items[0]
@pytest.mark.parametrize(["geoms", "items"], [([], None), ([], [])])
def test_nearest_empty(geoms, items):
tree = STRtree(geoms, items)
assert tree.nearest_item(None) is None
@pytest.mark.parametrize(["geoms", "items"], [([], None), ([], [])])
def test_nearest_items(geoms, items):
tree = STRtree(geoms, items)
assert tree.nearest_item(None) is None
@shapely20_todo
@pytest.mark.skipif(geos_version < (3, 6, 0), reason="GEOS 3.6.0 required")
@pytest.mark.parametrize(
"geoms",
[
[
Point(0, 0.5),
Polygon([(1, 0), (2, 0), (2, 1), (1, 1)]),
Polygon([(0, 2), (1, 2), (1, 3), (0, 3)]),
]
],
)
@pytest.mark.parametrize("items", [list(range(1, 4)), list("abc")])
@pytest.mark.parametrize("query_geom", [Point(0, 0.5)])
def test_nearest_item_exclusive(geoms, items, query_geom):
tree = STRtree(geoms, items)
assert tree.nearest_item(query_geom, exclusive=True) != items[0]
```
|
{
"source": "jGaboardi/splot",
"score": 2
}
|
#### File: splot/tests/test_viz_bokeh.py
```python
from libpysal.weights.contiguity import Queen
from libpysal import examples
import geopandas as gpd
import esda
import pytest
from splot._bk import (
plot_choropleth,
lisa_cluster,
moran_scatterplot,
plot_local_autocorrelation,
)
@pytest.mark.skip(reason="to be deprecated")
def test_plot_choropleth():
link = examples.get_path("columbus.shp")
df = gpd.read_file(link)
w = Queen.from_dataframe(df)
w.transform = "r"
TOOLS = "tap,help"
fig = plot_choropleth(
df, "HOVAL", title="columbus", reverse_colors=True, tools=TOOLS
)
@pytest.mark.skip(reason="to be deprecated")
def test_lisa_cluster():
link = examples.get_path("columbus.shp")
df = gpd.read_file(link)
y = df["HOVAL"].values
w = Queen.from_dataframe(df)
w.transform = "r"
moran_loc = esda.moran.Moran_Local(y, w)
TOOLS = "tap,reset,help"
fig = lisa_cluster(moran_loc, df, p=0.05, tools=TOOLS)
@pytest.mark.skip(reason="to be deprecated")
def test_moran_scatterplot():
link = examples.get_path("columbus.shp")
df = gpd.read_file(link)
y = df["HOVAL"].values
w = Queen.from_dataframe(df)
w.transform = "r"
moran_loc = esda.moran.Moran_Local(y, w)
fig = moran_scatterplot(moran_loc, p=0.05)
@pytest.mark.skip(reason="to be deprecated")
def test_plot_local_autocorrelation():
link = examples.get_path("columbus.shp")
df = gpd.read_file(link)
y = df["HOVAL"].values
w = Queen.from_dataframe(df)
w.transform = "r"
moran_loc = esda.moran.Moran_Local(y, w)
fig = plot_local_autocorrelation(moran_loc, df, "HOVAL")
```
|
{
"source": "jGaboardi/transbigdata",
"score": 2
}
|
#### File: src/transbigdata/preprocess.py
```python
import geopandas as gpd
import pandas as pd
from .grids import (
GPS_to_grid,
area_to_params,
grid_to_centre,
grid_to_polygon
)
from .coordinates import getdistance
def clean_same(data, col=['VehicleNum', 'Time', 'Lng', 'Lat']):
'''
Delete the data with the same information as the data before and
after to reduce the amount of data. For example, if several consecutive
data of an individual have the same information except for the time,
only the first and last two data can be kept
Parameters
-------
data : DataFrame
Data
col : List
The column name, in the order of [‘Vehicleid, Time’]. It will sort
by time, and then determine the information of other columns besides
the time
Returns
-------
data1 : DataFrame
Cleaned data
'''
[VehicleNum, Time, Lng, Lat] = col[:4]
extra = col[4:]
data1 = data.copy()
data1 = data1.drop_duplicates(subset=[VehicleNum, Time])
data1 = data1.sort_values(by=[VehicleNum, Time])
data1['issame'] = 0
for i in [VehicleNum, Lng, Lat]+extra:
data1['issame'] += (data1[i].shift() == data1[i]
) & (data1[i].shift(-1) == data1[i])
data1 = data1[-(data1['issame'] == len([VehicleNum, Lng, Lat]+extra))]
data1 = data1.drop('issame', axis=1)
return data1
def clean_drift(data, col=['VehicleNum', 'Time', 'Lng', 'Lat'],
speedlimit=80, dislimit=1000):
'''
Delete the drift data. The select principle is that: if the speed of a
trajectory point is larger than the speed limit with before and after
points, while the speed between the before and after data is less than
the speedlimit. The time column in the input data is calculated more
efficiently if it is in datetime format.
Parameters
-------
data : DataFrame
Data
col : List
Column names, in the order of [‘VehicleNum’, ‘Time’, ‘Lng’, ‘Lat’]
speedlimit : number
Speed limitation
Returns
-------
data1 : DataFrame
Cleaned data
'''
[VehicleNum, Time, Lng, Lat] = col
data1 = data.copy()
data1 = data1.drop_duplicates(subset=[VehicleNum, Time])
data1[Time+'_dt'] = pd.to_datetime(data1[Time])
data1 = data1.sort_values(by=[VehicleNum, Time])
for i in [VehicleNum, Lng, Lat, Time+'_dt']:
data1[i+'_pre'] = data1[i].shift()
data1[i+'_next'] = data1[i].shift(-1)
data1['dis_pre'] = getdistance(
data1[Lng],
data1[Lat],
data1[Lng+'_pre'],
data1[Lat+'_pre'])
data1['dis_next'] = getdistance(
data1[Lng],
data1[Lat],
data1[Lng+'_next'],
data1[Lat+'_next'])
data1['dis_prenext'] = getdistance(
data1[Lng+'_pre'],
data1[Lat+'_pre'],
data1[Lng+'_next'],
data1[Lat+'_next'])
data1['timegap_pre'] = data1[Time+'_dt'] - data1[Time+'_dt_pre']
data1['timegap_next'] = data1[Time+'_dt_next'] - data1[Time+'_dt']
data1['timegap_prenext'] = data1[Time+'_dt_next'] - data1[Time+'_dt_pre']
data1['speed_pre'] = data1['dis_pre'] / \
data1['timegap_pre'].dt.total_seconds()*3.6
data1['speed_next'] = data1['dis_next'] / \
data1['timegap_next'].dt.total_seconds()*3.6
data1['speed_prenext'] = data1['dis_prenext'] / \
data1['timegap_prenext'].dt.total_seconds()*3.6
if speedlimit:
data1 = data1[
-((data1[VehicleNum+'_pre'] == data1[VehicleNum]) &
(data1[VehicleNum+'_next'] == data1[VehicleNum]) &
(data1['speed_pre'] > speedlimit) &
(data1['speed_next'] > speedlimit) &
(data1['speed_prenext'] < speedlimit))]
if dislimit:
data1 = data1[
-((data1[VehicleNum+'_pre'] == data1[VehicleNum]) &
(data1[VehicleNum+'_next'] == data1[VehicleNum]) &
(data1['dis_pre'] > dislimit) &
(data1['dis_next'] > dislimit) &
(data1['dis_prenext'] < dislimit))]
data1 = data1[data.columns]
return data1
def clean_outofbounds(data, bounds, col=['Lng', 'Lat']):
'''
The input is the latitude and longitude coordinates of the lower
left and upper right of the study area and exclude data that are
outside the study area
Parameters
-------
data : DataFrame
Data
bounds : List
Latitude and longitude of the lower left and upper right of
the study area, in the order of [lon1, lat1, lon2, lat2]
col : List
Column name of longitude and latitude
Returns
-------
data1 : DataFrame
Data within the scope of the study
'''
lon1, lat1, lon2, lat2 = bounds
if (lon1 > lon2) | (lat1 > lat2) | (abs(lat1) > 90) | (
abs(lon1) > 180) | (abs(lat2) > 90) | (abs(lon2) > 180):
raise Exception(
'Bounds error. The input bounds should be in the order \
of [lon1,lat1,lon2,lat2]. (lon1,lat1) is the lower left corner and \
(lon2,lat2) is the upper right corner.')
Lng, Lat = col
data1 = data.copy()
data1 = data1[(data1[Lng] > bounds[0]) & (data1[Lng] < bounds[2]) & (
data1[Lat] > bounds[1]) & (data1[Lat] < bounds[3])]
return data1
def clean_outofshape(data, shape, col=['Lng', 'Lat'], accuracy=500):
'''
Input the GeoDataFrame of the study area and exclude the data beyond
the study area
Parameters
-------
data : DataFrame
Data
shape : GeoDataFrame
The GeoDataFrame of the study area
col : List
Column name of longitude and latitude
accuracy : number
The size of grid. The principle is to do the data gridding first
and then do the data cleaning. The smaller the size is, the higher
accuracy it has
Returns
-------
data1 : DataFrame
Data within the scope of the study
'''
Lng, Lat = col
shape_unary = shape.unary_union
bounds = shape_unary.bounds
params = area_to_params(bounds, accuracy)
data1 = data.copy()
data1['LONCOL'], data1['LATCOL'] = GPS_to_grid(
data1[Lng], data1[Lat], params)
data1_gdf = data1[['LONCOL', 'LATCOL']].drop_duplicates()
data1_gdf['geometry'] = grid_to_polygon(
[data1_gdf['LONCOL'], data1_gdf['LATCOL']], params)
data1_gdf = gpd.GeoDataFrame(data1_gdf)
data1_gdf = data1_gdf[data1_gdf.intersects(shape_unary)]
data1 = pd.merge(data1, data1_gdf[['LONCOL', 'LATCOL']]).drop(
['LONCOL', 'LATCOL'], axis=1)
return data1
def clean_traj(data, col=['uid', 'str_time', 'lon', 'lat'], tripgap=1800,
disgap=50000, speedlimit=80):
'''
A combo for trajectory data cleaning, including defining the the time
length threshold considered as a new trip, and the distance threshold
considered as a new trip
Parameters
-------
data : DataFrame
Trajectory data
col : List
Column names, in the order of [‘VehicleNum’, ‘Time’, ‘Lng’, ‘Lat’]
tripgap : number
The time length threshold considered as a new trip
disgap : number
The distance threshold considered as a new trip
speedlimit : number
Speed limit
Returns
-------
data1 : DataFrame
Cleaned data
'''
uid, timecol, lon, lat = col
data[timecol] = pd.to_datetime(data[timecol])
data = data.sort_values(by=[uid, timecol])
cols = []
for i in data.columns:
if i not in [uid, timecol, lon, lat]:
cols.append(i)
data = clean_same(data, col=[uid, timecol, lon, lat]+cols)
data = clean_drift(data, col=[uid, timecol, lon, lat],
speedlimit=speedlimit)
data = id_reindex(data, uid, timecol=timecol, timegap=tripgap)
data = data.rename(columns={uid+'_new': 'tripid'})
data = id_reindex_disgap(
data, col=['tripid', lon, lat], disgap=disgap, suffix='')
data1 = data.copy()
data1['lon1'] = data1[lon].shift(-1)
data1['lat1'] = data1[lat].shift(-1)
data1['tripid1'] = data1['tripid'].shift(-1)
data1 = data1[data1['tripid'] == data1['tripid1']]
data1['dis'] = getdistance(
data1[lon], data1[lat], data1['lon1'], data1['lat1'])
a = data1.groupby(['tripid'])['dis'].sum()
a = a[-(a < 50)].reset_index()['tripid']
data = pd.merge(data, a)
data = data.drop('tripid', axis=1)
data = id_reindex(data, uid, timecol=timecol, timegap=tripgap)
data = data.rename(columns={uid+'_new': 'tripid'})
data = id_reindex_disgap(
data, col=['tripid', lon, lat], disgap=disgap, suffix='')
return data
def dataagg(data, shape, col=['Lng', 'Lat', 'count'], accuracy=500):
'''
Aggregate data to traffic zone
Parameters
-------
data : DataFrame
The origin DataFrame
shape : GeoDataFrame
The shape of the traffic zone
col : List
You can either choose to input two columns, i.e., [‘Lng’,’Lat’], or
to input three columns, i.e., [‘Lng’,’Lat’,’count’]”, where count
means the points count
accuracy : number
The idea is to first implement data gridding and then the aggregation.
Here, the grid size will be determined. The less the size is, the
higher the accuracy will have.
Returns
-------
aggresult : GeoDataFrame
Traffic zone. The count column is the output result
data1 : DataFrame
The zone-matched data
'''
if len(col) == 2:
Lng, Lat = col
aggcol = None
else:
Lng, Lat, aggcol = col
shape['index'] = range(len(shape))
shape_unary = shape.unary_union
bounds = shape_unary.bounds
params = area_to_params(bounds, accuracy)
data1 = data.copy()
data1['LONCOL'], data1['LATCOL'] = GPS_to_grid(
data1[Lng], data1[Lat], params)
data1_gdf = data1[['LONCOL', 'LATCOL']].drop_duplicates()
data1_gdf['geometry'] = gpd.points_from_xy(
*grid_to_centre([data1_gdf['LONCOL'], data1_gdf['LATCOL']], params))
data1_gdf = gpd.GeoDataFrame(data1_gdf)
data1_gdf = gpd.sjoin(data1_gdf, shape, how='left')
data1 = pd.merge(data1, data1_gdf).drop(['LONCOL', 'LATCOL'], axis=1)
if aggcol:
aggresult = pd.merge(shape, data1.groupby('index')[
aggcol].sum().reset_index()).drop('index', axis=1)
else:
data1['_'] = 1
aggresult = pd.merge(shape, data1.groupby('index')['_'].sum().rename(
'count').reset_index()).drop('index', axis=1)
data1 = data1.drop('_', axis=1)
data1 = data1.drop('index', axis=1)
return aggresult, data1
def id_reindex_disgap(data, col=['uid', 'lon', 'lat'], disgap=1000,
suffix='_new'):
'''
Renumber the ID columns of the data,If two adjacent records exceed the
distance, the number is the new ID
Parameters
-------
data : DataFrame
Data
col : str
Name of the ID column to be re-indexed
disgap : number
If two adjacent records exceed this distance, the number is the
new ID
suffix : str
The suffix of the new column. When set to False, the former column
will be replaced
Returns
-------
data1 : DataFrame
Renumbered data
'''
uid, lon, lat = col
data1 = data.copy()
data1[uid+suffix] = (
(data1[uid].shift() != data1[uid]) |
(getdistance(data1[lon],
data1[lat],
data1[lon].shift(),
data1[lat].shift()) > disgap)).astype(int).cumsum()-1
a = data1.groupby([uid+suffix])[lon].count()
data1 = pd.merge(data1, a[a > 1].reset_index()[[uid+suffix]])
return data1
def id_reindex(data, col, new=False, timegap=None, timecol=None,
suffix='_new', sample=None):
'''
Renumber the ID columns of the data
Parameters
-------
data : DataFrame
Data
col : str
Name of the ID column to be re-indexed
new : bool
False: the new number of the same ID will be the same index;
True: according to the order of the table, the origin ID appears
again with different index
timegap : number
If an individual does not appear for a period of time (timegap is
the time threshold), it is numbered as a new individual. This parameter
should be set with timecol to take effect.
timecol : str
The column name of time, it should be set with timegap to take effect
suffix : str
The suffix of the new column. When set to False, the former column will
be replaced
sample : int (optional)
To desampling the data
Returns
-------
data1 : DataFrame
Renumbered data
'''
if not suffix:
suffix = ''
data1 = data.copy()
if new:
data1[col+suffix] = data1[col] != data1[col].shift()
data1[col+suffix] = data1[col+suffix].cumsum()-1
else:
tmp = data1[[col]].drop_duplicates()
tmp[col+'_'] = range(len(tmp))
data1 = pd.merge(data1, tmp, on=col)
data1[col+suffix] = data1[col+'_']
if suffix != '_':
data1 = data1.drop(col+'_', axis=1)
if (timegap is not None) & (timecol is not None):
data1[timecol] = pd.to_datetime(data1[timecol])
data1 = data1.sort_values(by=[col+suffix, timecol])
data1[col+suffix] = (
(data1[col+suffix].shift() != data1[col+suffix]) |
((data1[timecol]-data1[timecol].shift()).dt.total_seconds()
> timegap)).astype(int).cumsum()-1
if sample:
tmp = data1[col+suffix].drop_duplicates().sample(sample)
data1 = pd.merge(data1, tmp)
return data1
```
#### File: transbigdata/tests/test_plotmap.py
```python
import transbigdata as tbd
import matplotlib.pyplot as plt
class TestPlotmap:
def setup_method(self):
pass
def test_plot_map(self):
bounds = [113.6, 22.4, 114.8, 22.9]
_ = plt.figure(1, (8, 8), dpi=250)
ax = plt.subplot(111)
tbd.plotscale(ax, bounds=bounds, textsize=10, compasssize=1,
accuracy=2000, rect=[0.06, 0.03], zorder=10)
```
|
{
"source": "jgabriel1607/Python",
"score": 4
}
|
#### File: jgabriel1607/Python/ex101.py
```python
from datetime import date
def voto(a=18):
if a < 16:
return 'Voto Negado.'
if 16 <= a < 18 or a > 65:
return 'Voto Opcional.'
if 18 <= a <= 65:
return 'Voto Obrigatório.'
atual = date.today().year
nasc = int(input('Digite o ano de seu nascimento: '))
idade = atual - nasc
res = voto(idade)
print(f'Com {idade} anos: {res}')
```
#### File: jgabriel1607/Python/ex102.py
```python
def fatorial(num=1, show=False):
"""
Realiza o fatorial de um número digitado pelo usuário.
:param num: O número a ser calculado.
:param show: (Opcional) Mostra ou não o processo da conta.
:return: O valor do Fatorial de um número n.
"""
n = 1
for f in range(num, 0, -1):
if show:
print(f'{f}', 'x ' if f > 1 else '', end='')
n *= f
if f <= 1:
print(f'=', end=' ')
else:
n *= f
return n
numero = int(input('Digite um número: '))
op = ' '
while op not in 'SN':
op = str(input('Deseja mostrar processo: ')).strip().upper()
if op == 'S':
x = True
break
if op == 'N':
x = False
break
print(f'Fatorial de {numero} é: ')
fat = fatorial(numero, x)
print(fat)
```
|
{
"source": "jgabriel98/Instagram-Giveaways-Winner",
"score": 3
}
|
#### File: Instagram-Giveaways-Winner/modules/implicitly_wait.py
```python
from contextlib import contextmanager
from selenium import webdriver # type: ignore
class ImplicitlyWait:
def __init__(self, driver:webdriver, timeout:int):
self.driver = driver
self.timeout = timeout
def enable(self):
'''
Enable implicitly wait so it doesn't throw errors without waiting some time in order to let the element appear
'''
self.driver.implicitly_wait(self.timeout)
def disable(self):
'''
Disable implicitly wait so it doesn't wait for the element to appear. This can cause errors if not handled with a 'Explicitly Wait'
'''
self.driver.implicitly_wait(0)
@contextmanager
def ignore(self):
'''
Ingore implicitly wait in the current block of code by disabling and enabling again when finished
'''
try:
yield self.disable()
finally:
self.enable()
```
|
{
"source": "JGabriel-AbreuM/CalculadoraBinaria",
"score": 4
}
|
#### File: calculadora_binaria/calc/functions_bin.py
```python
def bin2dec(b):
num = 0
count = 0
for i in b[::-1]:
num += int(i)*(2**count)
count += 1
return str(num)
def resolve_equacao(sentenca):
sentenca = sentenca.replace(" ", "")
equacao = ""
elemento = ""
for i in sentenca:
if i in "+-*/":
equacao += bin2dec(elemento)
equacao += i
elemento = ""
else:
elemento += i
equacao += bin2dec(elemento)
try:
if (eval(equacao) - int(eval(equacao))) == 0:
resultado = bin(int(eval(equacao)))[2:]
else:
resultado = "Impossível converter um 'float' em binario"
except ZeroDivisionError:
resultado = "Impossível dividir por 0"
return resultado
```
|
{
"source": "JGabriel-AbreuM/CepAPI",
"score": 2
}
|
#### File: correios/api/serializers.py
```python
from django.db.models import fields
from rest_framework.serializers import ModelSerializer
from correios.models import Endereco
from pycep_correios import get_address_from_cep, exceptions
class RegisterEndereco(ModelSerializer):
class Meta:
model = Endereco
fields = ["cep"]
def create(self, validated_data):
dict_local = get_address_from_cep(validated_data["cep"])
novo_endereco = Endereco.objects.create(
bairro = dict_local["bairro"],
cep = dict_local["cep"],
cidade = dict_local["cidade"],
logradouro = dict_local["logradouro"],
uf = dict_local["uf"],
)
return novo_endereco
class EnderecoSerializer(ModelSerializer):
class Meta:
model = Endereco
fields = ("id", "bairro", "cep", "cidade", "logradouro", "uf")
```
|
{
"source": "JGabriel-AbreuM/DjangoNationalGeograph",
"score": 3
}
|
#### File: DjangoNationalGeographic/Animal/tests.py
```python
from django.test import TestCase
from .models import Animal, Raca, Origem, NomePopular, NomeCientifico
from Protecao.models import Protecao
class AnimalTestCase(TestCase):
def setUp(self):
self.protecao=Protecao.objects.create(
tipo="Penas"
)
self.raca = Raca.objects.create(
raca = "v"
)
self.origem = Origem.objects.create(
origem = "Afeganistão"
)
self.nome_popular = NomePopular.objects.create(
nome_popular = "g"
)
self.nome_cientifico = NomeCientifico.objects.create(
nome_cientifico = "yy"
)
self.animal = Animal.objects.create(
raca=self.raca,
origem=self.origem,
nome_popular=self.nome_popular,
nome_cientifico=self.nome_cientifico,
protecao=self.protecao,
)
def test_animal_contido(self):
self.assertIn(self.animal, Animal.objects.all())
def test_protecao_contido(self):
item = Protecao.objects.create(
tipo="z"
)
self.assertIn(item, Protecao.objects.all())
def test_animal_filter(self):
raca_nova = Raca.objects.create(
raca = "x"
)
item = Animal.objects.create(
raca=raca_nova,
origem=self.origem,
nome_popular=self.nome_popular,
nome_cientifico=self.nome_cientifico,
protecao=self.protecao,
)
self.assertNotIn(item, Animal.objects.filter(raca=self.raca))
self.assertIn(self.animal, Animal.objects.filter(raca=self.raca))
```
|
{
"source": "JGabriel-AbreuM/Ecommerce",
"score": 3
}
|
#### File: ecommerce/user/signup.py
```python
from django import forms
from django.contrib.auth import get_user_model
class SignupForm(forms.Form):
CHOICES = (
("VD", ("Vendedor")),
("CP", ("Comprador"))
)
type = forms.ChoiceField(choices=CHOICES, label="Tipo")
def signup(self, request, user):
user.type = self.cleaned_data["type"]
user.save()
```
|
{
"source": "jgabrielfreitas/hands-up",
"score": 3
}
|
#### File: api/auth/auth.py
```python
from data.user import all_users
class AuthHelper:
@staticmethod
def check_user(user_received):
for user in all_users:
if user.email == user_received.email and \
user.password == user_received.password:
return user
return None
```
|
{
"source": "jgabrielfreitas/raspberry-playground",
"score": 3
}
|
#### File: jgabrielfreitas/raspberry-playground/blink.py
```python
import RPi.GPIO as GPIO
import RPi.GPIO as GPIO
import time
time_to_sleep = 2
pin_default = 12
GPIO.setmode(GPIO.BOARD)
ON = 1
OFF = 0
#Define pin 12 as output
GPIO.setup(pin_default, GPIO.OUT)
def ledon(pin_led = pin_default):
GPIO.output(pin_led, ON)
return
def ledoff(pin_led = pin_default):
GPIO.output(pin_led, OFF)
return
while(True):
ledon()
time.sleep(time_to_sleep)
ledoff()
time.sleep(time_to_sleep)
```
|
{
"source": "jgabrielfreitas/recruitment-api",
"score": 3
}
|
#### File: jgabrielfreitas/recruitment-api/excel.py
```python
import xlrd
class ExcelReader:
def read_file(self, file_name):
return xlrd.open_workbook('xlsx/{}.xlsx'.format(file_name), on_demand=True)
```
|
{
"source": "jgabriellima/design-patterns-python",
"score": 3
}
|
#### File: comportamentais/strategy/impostos.py
```python
class ISS:
def calcula(self, orcamento):
return orcamento.valor * 0.1
class ICMS:
def calcula(self, orcamento):
return orcamento.valor * 0.06
```
#### File: comportamentais/strategy/orcamento.py
```python
class Orcamento:
def __init__(self, valor):
self.__valor = valor
@property
def valor(self):
return self.__valor
```
|
{
"source": "jgabriellima/python-fullstack-template",
"score": 3
}
|
#### File: python-fullstack-template/app/auth.py
```python
import functools
import logging
from flask import Blueprint, request, session, abort
logger = logging.getLogger(__name__)
bp = Blueprint('auth', __name__)
def is_logged_in():
return True
def login_required(view):
"""
View decorator that sends an error to anonymous users.
Use the error handler to provide a friendly user experience.
"""
@functools.wraps(view)
def wrapped_view(**kwargs):
if not is_logged_in():
abort(401)
return view(**kwargs)
return wrapped_view
@bp.before_app_request
def load_logged_in_user():
pass # Not implemented
```
|
{
"source": "jga/capmetrics-etl",
"score": 2
}
|
#### File: capmetrics-etl/tests/test_etl.py
```python
import configparser
from datetime import datetime
import json
import os
import unittest
import pytz
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import xlrd
from capmetrics_etl import cli, etl, models, utils
APP_TIMEZONE = pytz.timezone('America/Chicago')
UTC_TIMEZONE = pytz.timezone('UTC')
class CheckForHeaderTests(unittest.TestCase):
def setUp(self):
tests_path = os.path.dirname(__file__)
self.test_excel = os.path.join(tests_path, 'data/test_cmta_data.xls')
excel_book = xlrd.open_workbook(filename=self.test_excel)
self.worksheet = excel_book.sheet_by_name('Ridership by Route Weekday')
def test_cell_with_headers(self):
worksheet_routes = {
'numbers_available': False,
'names_available': False,
'types_available': False,
'routes': [],
}
cell = self.worksheet.cell(4, 0)
result = etl.check_for_headers(cell, self.worksheet, 4, worksheet_routes)
self.assertTrue(result)
self.assertEqual(worksheet_routes['numbers_available'], True)
self.assertEqual(worksheet_routes['names_available'], True)
self.assertEqual(worksheet_routes['types_available'], True)
def test_cell_without_headers(self):
worksheet_routes = {
'numbers_available': False,
'names_available': False,
'types_available': False,
'routes': [],
}
cell = self.worksheet.cell(50, 0)
result = etl.check_for_headers(cell, self.worksheet, 50, worksheet_routes)
self.assertFalse(result)
self.assertEqual(worksheet_routes['numbers_available'], False)
self.assertEqual(worksheet_routes['names_available'], False)
self.assertEqual(worksheet_routes['types_available'], False)
class GetSeasonYearTests(unittest.TestCase):
def setUp(self):
tests_path = os.path.dirname(__file__)
self.test_excel = os.path.join(tests_path, 'data/test_cmta_data.xls')
def test_get_season_and_year(self):
periods = ['Winter 2014', 'Spring 2015', 'Summer 2013', 'Fall 1999']
self.assertEqual(('winter', '2014'), etl.get_season_and_year(periods[0]))
self.assertEqual(('spring', '2015'), etl.get_season_and_year(periods[1]))
self.assertEqual(('summer', '2013'), etl.get_season_and_year(periods[2]))
self.assertEqual(('fall', '1999'), etl.get_season_and_year(periods[3]))
def test_get_bad_season_and_year(self):
periods = ['Winter 14', 'June 2015', 'Summer-2013', 'Fall 99']
for period in periods:
self.assertEqual((None, None), etl.get_season_and_year(period))
class GetRouteInfoTests(unittest.TestCase):
def setUp(self):
tests_path = os.path.dirname(__file__)
self.test_excel = os.path.join(tests_path, 'data/test_cmta_data.xls')
def test_get_route_info(self):
result = etl.get_route_info(self.test_excel, 'Ridership by Route Weekday')
self.assertTrue(result['numbers_available'])
self.assertTrue(result['names_available'])
self.assertEqual(len(result['routes']), 88, msg=result['routes'])
route_info = result['routes'][0]
self.assertEqual(route_info['route_number'], '1')
self.assertEqual(route_info['route_name'], '1-NORTH LAMAR/SOUTH CONGRESS')
self.assertEqual(route_info['service_type'], 'LOCAL')
route_info2 = result['routes'][1]
self.assertEqual(route_info2['route_number'], '2')
self.assertEqual(route_info2['route_name'], '2-ROSEWOOD')
self.assertEqual(route_info2['service_type'], 'LOCAL')
route_info10 = result['routes'][9]
self.assertEqual(route_info10['route_number'], '18')
self.assertEqual(route_info10['route_name'], '18-MARTIN LUTHER KING')
self.assertEqual(route_info10['service_type'], 'LOCAL')
class GetPeriodTests(unittest.TestCase):
def setUp(self):
tests_path = os.path.dirname(__file__)
self.test_excel = os.path.join(tests_path, 'data/test_cmta_data.xls')
def test_get_periods(self):
excel_book = xlrd.open_workbook(filename=self.test_excel)
worksheet = excel_book.sheet_by_name('Ridership by Route Weekday')
periods = etl.get_periods(worksheet)
self.assertEqual(periods['3']['day_of_week'], 'weekday')
self.assertEqual(periods['3']['season'], 'spring')
self.assertEqual(periods['3']['year'], 2012)
timestamp_3 = APP_TIMEZONE.localize(datetime(year=2012, month=3, day=26))
self.assertEqual(periods['3']['timestamp'], timestamp_3, msg=periods['3']['timestamp'])
self.assertEqual(periods['8']['day_of_week'], 'weekday')
self.assertEqual(periods['8']['season'], 'fall')
self.assertEqual(periods['8']['year'], 2013)
timestamp_8 = APP_TIMEZONE.localize(datetime(year=2013, month=9, day=30))
self.assertEqual(periods['8']['timestamp'], timestamp_8)
self.assertEqual(periods['10']['day_of_week'], 'weekday')
self.assertEqual(periods['10']['season'], 'summer')
self.assertEqual(periods['10']['year'], 2014)
timestamp_10 = APP_TIMEZONE.localize(datetime(year=2014, month=6, day=30))
self.assertEqual(periods['10']['timestamp'], timestamp_10)
class ExtractDayOfWeekTests(unittest.TestCase):
def setUp(self):
tests_path = os.path.dirname(__file__)
self.test_excel = os.path.join(tests_path, 'data/test_cmta_data.xls')
def test_weekday_extraction(self):
excel_book = xlrd.open_workbook(filename=self.test_excel)
worksheet = excel_book.sheet_by_name('Ridership by Route Weekday')
day_of_week = etl.extract_day_of_week(3, 3, worksheet)
self.assertEqual('weekday', day_of_week)
no_day = etl.extract_day_of_week(10, 3, worksheet)
self.assertIsNone(no_day)
def test_saturday_extraction(self):
excel_book = xlrd.open_workbook(filename=self.test_excel)
worksheet = excel_book.sheet_by_name('Ridership by Route Saturday')
day_of_week = etl.extract_day_of_week(3, 6, worksheet)
self.assertEqual('saturday', day_of_week)
no_day = etl.extract_day_of_week(10, 3, worksheet)
self.assertIsNone(no_day)
def test_sunday_extraction(self):
excel_book = xlrd.open_workbook(filename=self.test_excel)
worksheet = excel_book.sheet_by_name('Ridership by Route Sunday')
day_of_week = etl.extract_day_of_week(3, 9, worksheet)
self.assertEqual('sunday', day_of_week)
no_day = etl.extract_day_of_week(10, 3, worksheet)
self.assertIsNone(no_day)
class MergeRouteDataTests(unittest.TestCase):
"""
Tests etl.merge_route_data function.
"""
def test_route_number_redundancy(self):
worksheet_result_1 = {
'routes': [{'route_number': 1}],
}
worksheet_result_2 = {
'routes': [
{'route_number': 1},
{'route_number': 2},
{'route_number': 3}
],
}
results = [worksheet_result_1, worksheet_result_2]
merged_data = etl.merge_route_data(results)
self.assertEqual(len(list(merged_data.keys())), 3)
self.assertTrue(set(merged_data.keys()).issuperset({1, 2, 3}))
self.assertFalse(set(merged_data.keys()).issuperset({1, 2, 3, 4}))
def test_duplicate_route_dict(self):
route_info_1a = {
'route_number': '1',
'route_name': '1 - POPULAR ROUTE',
'service_type': 'Local'
}
route_info_1b = {
'route_number': '1',
'route_name': '1 - POPULAR ROUTE',
'service_type': 'Local'
}
route_info_2 = {
'route_number': '2',
'route_name': '2 - UNPOPULAR ROUTE',
'service_type': 'Local'
}
route_info_3 = {
'route_number': '3',
'route_name': '3 - NEW ROUTE',
'service_type': 'Express'
}
results = [{'routes': [route_info_1a, route_info_1b, route_info_2, route_info_3]}]
merged_data = etl.merge_route_data(results)
merged_keys = merged_data.keys()
self.assertEqual(len(list(merged_keys)), 3)
self.assertTrue('1' in merged_keys)
self.assertTrue('2' in merged_keys)
self.assertTrue('3' in merged_keys)
class GetLatestMeasurementTimestampTests(unittest.TestCase):
def setUp(self):
tests_path = os.path.dirname(__file__)
self.test_excel = os.path.join(tests_path, 'data/test_cmta_data_single.xls')
excel_book = xlrd.open_workbook(filename=self.test_excel)
self.worksheet = excel_book.sheet_by_name('Ridership by Route Weekday')
self.periods = etl.get_periods(self.worksheet)
self.engine = create_engine('sqlite:///:memory:')
Session = sessionmaker()
Session.configure(bind=self.engine)
session = Session()
models.Base.metadata.create_all(self.engine)
self.session = session
etl.update_route_info(self.test_excel,
session,
['Ridership by Route Weekday'])
etl.parse_worksheet_ridership(self.worksheet, self.periods, models.DailyRidership,
self.session)
def tearDown(self):
models.Base.metadata.drop_all(self.engine)
def test_ordering(self):
latest = etl.get_latest_measurement_timestamp(self.session)
self.assertEqual(latest.year, 2015)
self.assertEqual(latest.month, 6)
self.assertEqual(latest.day, 29)
class GetHighRidershipRoutesTests(unittest.TestCase):
def setUp(self):
tests_path = os.path.dirname(__file__)
ini_config = os.path.join(tests_path, 'capmetrics.ini')
config_parser = configparser.ConfigParser()
# make parsing of config file names case-sensitive
config_parser.optionxform = str
config_parser.read(ini_config)
self.config = cli.parse_capmetrics_configuration(config_parser)
self.engine = create_engine(self.config['engine_url'])
Session = sessionmaker()
Session.configure(bind=self.engine)
session = Session()
models.Base.metadata.create_all(self.engine)
self.session = session
def tearDown(self):
models.Base.metadata.drop_all(self.engine)
def test_routes(self):
# This test takes a long time, as it perform the full etl task for a realistic file
data_source_file = './tests/data/test_cmta_data.xls'
file_location = os.path.abspath(data_source_file)
daily_worksheets = self.config['daily_ridership_worksheets']
hourly_worksheets = self.config['hour_productivity_worksheets']
route_info_report = etl.update_route_info(file_location,
self.session,
daily_worksheets)
route_info_report.etl_type = 'route-info'
self.session.add(route_info_report)
daily_ridership_report = etl.update_ridership(file_location,
daily_worksheets,
models.DailyRidership,
self.session)
daily_ridership_report.etl_type = 'daily-ridership'
self.session.add(daily_ridership_report)
hourly_ridership_report = etl.update_ridership(file_location,
hourly_worksheets,
models.ServiceHourRidership,
self.session)
hourly_ridership_report.etl_type = 'hourly-ridership'
self.session.add(hourly_ridership_report)
self.session.commit()
self.session.close()
etl.update_weekly_performance(self.session)
latest = etl.get_latest_measurement_timestamp(self.session)
routes = etl.get_high_ridership_routes(self.session, latest)
expected_routes = {7, 1, 300, 801, 10, 3, 20, 803, 331, 37}
self.assertEqual(set(routes), expected_routes)
class ParseWorksheetRidershipTests(unittest.TestCase):
"""
Tests etl.parse_worksheet_ridership function.
"""
def setUp(self):
tests_path = os.path.dirname(__file__)
self.test_excel = os.path.join(tests_path, 'data/test_cmta_data_single.xls')
excel_book = xlrd.open_workbook(filename=self.test_excel)
self.worksheet = excel_book.sheet_by_name('Ridership by Route Weekday')
self.periods = etl.get_periods(self.worksheet)
self.engine = create_engine('sqlite:///:memory:')
Session = sessionmaker()
Session.configure(bind=self.engine)
session = Session()
models.Base.metadata.create_all(self.engine)
self.session = session
etl.update_route_info(self.test_excel,
session,
['Ridership by Route Weekday'])
def tearDown(self):
models.Base.metadata.drop_all(self.engine)
def test_parse(self):
etl.parse_worksheet_ridership(self.worksheet, self.periods, models.DailyRidership,
self.session)
instances = self.session.query(models.DailyRidership).all()
self.assertEqual(len(instances), 11)
ridership_values = list()
for daily_ridership in instances:
ridership_values.append(daily_ridership.ridership)
expected_set = {
14041.3609132795,
12794.7123015873,
14117.8574082171,
13633.8223760158,
12875.3658424908,
12459.0767904292,
7949.55721234328,
7000.32063492063,
6740.593831168831,
6227.303709,
6037.422349
}
difference = expected_set.difference(set(ridership_values))
self.assertEqual(len(list(difference)), 0)
def test_parse_etl_report(self):
report = models.ETLReport(creates=0, updates=0)
etl.parse_worksheet_ridership(self.worksheet, self.periods, models.DailyRidership,
self.session, report)
instances = self.session.query(models.DailyRidership).all()
self.assertEqual(len(instances), 11)
ridership_values = list()
for daily_ridership in instances:
ridership_values.append(daily_ridership.ridership)
expected_set = {
14041.3609132795,
12794.7123015873,
14117.8574082171,
13633.8223760158,
12875.3658424908,
12459.0767904292,
7949.55721234328,
7000.32063492063,
6740.593831168831,
6227.303709,
6037.422349
}
difference = expected_set.difference(set(ridership_values))
self.assertEqual(len(list(difference)), 0)
self.assertEqual(report.creates, 11)
self.assertEqual(report.updates, 0)
class StoreRouteTests(unittest.TestCase):
"""
Tests etl.store_route function.
"""
def setUp(self):
self.timezone = pytz.timezone('America/Chicago')
tests_path = os.path.dirname(__file__)
self.test_excel = os.path.join(tests_path, 'data/test_cmta_data.xls')
self.test_excel_with_updates = os.path.join(tests_path, 'data/test_cmta_updated_data.xls')
self.engine = create_engine('sqlite:///:memory:')
Session = sessionmaker()
Session.configure(bind=self.engine)
self.session = Session()
models.Base.metadata.create_all(self.engine)
self.worksheets = [
"Ridership by Route Weekday",
"Ridership by Route Saturday",
"Ridership by Route Sunday",
"Riders per Hour Weekday",
"Riders Hour Saturday",
"Riders per Hour Sunday"
]
self.daily_worksheets = [
"Ridership by Route Weekday",
"Ridership by Route Saturday",
"Ridership by Route Sunday",
]
def tearDown(self):
models.Base.metadata.drop_all(self.engine)
def test_initial_store_route_info(self):
for v in range(10):
route_info = {
'route_number': str(v),
'route_name': '{0}-POPULAR ROUTE'.format(v),
'service_type': 'Local'
}
etl.store_route(self.session, str(v), route_info)
self.session.commit()
for version in range(10):
route = self.session.query(models.Route) \
.filter_by(route_number=int(version)).one()
self.assertEqual(route.route_name, '{0}-POPULAR ROUTE'.format(version))
self.assertEqual(route.service_type, 'LOCAL')
instances = self.session.query(models.Route).all()
self.assertEqual(len(instances), 10)
def test_store_route_info_update(self):
route_info = {
'route_number': '1',
'route_name': '1-POPULAR ROUTE',
'service_type': 'Local'
}
etl.store_route(self.session, '1', route_info)
self.session.commit()
new_route = self.session.query(models.Route).filter_by(route_number=1).one()
self.assertEqual(new_route.route_name, '1-POPULAR ROUTE')
self.assertEqual(new_route.service_type, 'LOCAL')
new_route.route_name = '1 - REORGANIZED_ROUTE'
self.session.commit()
updated_route = self.session.query(models.Route).filter_by(route_number=1).one()
self.assertEqual(updated_route.route_name, '1 - REORGANIZED_ROUTE')
self.assertEqual(updated_route.service_type, 'LOCAL')
class UpdateRouteInfoTests(unittest.TestCase):
"""
Tests etl.update_route_info function.
"""
def setUp(self):
tests_path = os.path.dirname(__file__)
self.test_excel = os.path.join(tests_path, 'data/test_cmta_data.xls')
self.test_excel_with_updates = os.path.join(tests_path, 'data/test_cmta_updated_data.xls')
self.engine = create_engine('sqlite:///:memory:')
Session = sessionmaker()
Session.configure(bind=self.engine)
self.session = Session()
models.Base.metadata.create_all(self.engine)
self.worksheets = [
"Ridership by Route Weekday",
"Ridership by Route Saturday",
"Ridership by Route Sunday",
"Riders per Hour Weekday",
"Riders Hour Saturday",
"Riders per Hour Sunday"
]
self.daily_worksheets = [
"Ridership by Route Weekday",
"Ridership by Route Saturday",
"Ridership by Route Sunday",
]
def tearDown(self):
models.Base.metadata.drop_all(self.engine)
def test_update_route_info_bare_database(self):
etl.update_route_info(self.test_excel, self.session, self.worksheets)
instances = self.session.query(models.Route).all()
self.assertEqual(len(instances), 88)
route_1 = self.session.query(models.Route).filter_by(route_number=1).one()
self.assertEqual(route_1.route_name, '1-NORTH LAMAR/SOUTH CONGRESS')
self.assertEqual(route_1.service_type, 'LOCAL')
route_20 = self.session.query(models.Route).filter_by(route_number=20).one()
self.assertEqual(route_20.route_name, '20-MANOR RD/RIVERSIDE')
self.assertEqual(route_20.service_type, 'LOCAL')
route_100 = self.session.query(models.Route).filter_by(route_number=100).one()
self.assertEqual(route_100.route_name, '100-AIRPORT FLYER')
self.assertEqual(route_100.service_type, 'LIMITED/FLYER')
route_271 = self.session.query(models.Route).filter_by(route_number=271).one()
self.assertEqual(route_271.route_name, '271-DEL VALLE FLEX')
self.assertEqual(route_271.service_type, 'FEEDER')
route_350 = self.session.query(models.Route).filter_by(route_number=350).one()
self.assertEqual(route_350.route_name, '350-AIRPORT BLVD')
self.assertEqual(route_350.service_type, 'CROSSTOWN')
route_412 = self.session.query(models.Route).filter_by(route_number=412).one()
self.assertEqual(route_412.route_name, '412-EBUS/MAIN CAMPUS')
self.assertEqual(route_412.service_type, 'SPECIAL SERVICES- EBUS')
route_550 = self.session.query(models.Route).filter_by(route_number=550).one()
self.assertEqual(route_550.route_name, '550-METRO RAIL RED LINE')
self.assertEqual(route_550.service_type, 'METRORAIL')
route_801 = self.session.query(models.Route).filter_by(route_number=803).one()
self.assertEqual(route_801.route_name, 'METRORAPID 803 S LAMAR BURNET')
self.assertEqual(route_801.service_type, 'METRORAPID')
def test_update_route_info_report(self):
report = etl.update_route_info(self.test_excel, self.session, self.worksheets)
route_1 = self.session.query(models.Route).filter_by(route_number=1).one()
self.assertEqual(route_1.route_name, '1-NORTH LAMAR/SOUTH CONGRESS')
self.assertEqual(route_1.service_type, 'LOCAL')
route_20 = self.session.query(models.Route).filter_by(route_number=20).one()
self.assertEqual(route_20.route_name, '20-MANOR RD/RIVERSIDE')
self.assertEqual(route_20.service_type, 'LOCAL')
route_100 = self.session.query(models.Route).filter_by(route_number=100).one()
self.assertEqual(route_100.route_name, '100-AIRPORT FLYER')
self.assertEqual(route_100.service_type, 'LIMITED/FLYER')
route_271 = self.session.query(models.Route).filter_by(route_number=271).one()
self.assertEqual(route_271.route_name, '271-DEL VALLE FLEX')
self.assertEqual(route_271.service_type, 'FEEDER')
route_350 = self.session.query(models.Route).filter_by(route_number=350).one()
self.assertEqual(route_350.route_name, '350-AIRPORT BLVD')
self.assertEqual(route_350.service_type, 'CROSSTOWN')
route_412 = self.session.query(models.Route).filter_by(route_number=412).one()
self.assertEqual(route_412.route_name, '412-EBUS/MAIN CAMPUS')
self.assertEqual(route_412.service_type, 'SPECIAL SERVICES- EBUS')
route_550 = self.session.query(models.Route).filter_by(route_number=550).one()
self.assertEqual(route_550.route_name, '550-METRO RAIL RED LINE')
self.assertEqual(route_550.service_type, 'METRORAIL')
route_801 = self.session.query(models.Route).filter_by(route_number=803).one()
self.assertEqual(route_801.route_name, 'METRORAPID 803 S LAMAR BURNET')
self.assertEqual(route_801.service_type, 'METRORAPID')
self.assertEqual(report.creates, 88)
self.assertEqual(report.updates, 0)
self.assertEqual(report.total_models, 88)
def test_update_route_info_with_existing_database(self):
etl.update_route_info(self.test_excel, self.session, self.daily_worksheets)
instances = self.session.query(models.Route).all()
self.assertEqual(len(instances), 88)
# now we update
etl.update_route_info(self.test_excel_with_updates,
self.session,
self.daily_worksheets)
route_1 = self.session.query(models.Route).filter_by(route_number=1).one()
self.assertEqual(route_1.route_name, '1-NORTH LAMAR/SOUTH CONGRESS')
self.assertEqual(route_1.service_type, 'LOCAL')
route_10 = self.session.query(models.Route).filter_by(route_number=10).one()
self.assertEqual(route_10.route_name, '10-SOUTH 1ST STREET/RED RIVER')
self.assertEqual(route_10.service_type, 'UPDATED LOCAL')
route_20 = self.session.query(models.Route).filter_by(route_number=20).one()
self.assertEqual(route_20.route_name, '20-MANOR RD/RIVERSIDE')
self.assertEqual(route_20.service_type, 'LOCAL')
route_100 = self.session.query(models.Route).filter_by(route_number=100).one()
self.assertEqual(route_100.route_name, '100-AIRPORT FLYER')
self.assertEqual(route_100.service_type, 'LIMITED/FLYER')
route_271 = self.session.query(models.Route).filter_by(route_number=271).one()
self.assertEqual(route_271.route_name, '271-DEL VALLE FLEX')
self.assertEqual(route_271.service_type, 'FEEDER')
route_350 = self.session.query(models.Route).filter_by(route_number=350).one()
self.assertEqual(route_350.route_name, '350-AIRPORT BLVD')
self.assertEqual(route_350.service_type, 'CROSSTOWN')
route_412 = self.session.query(models.Route).filter_by(route_number=412).one()
self.assertEqual(route_412.route_name, '412-EBUS/MAIN CAMPUS')
self.assertEqual(route_412.service_type, 'SPECIAL SERVICES- EBUS')
route_550 = self.session.query(models.Route).filter_by(route_number=550).one()
self.assertEqual(route_550.route_name, '550-METRO RAIL RED LINE')
self.assertEqual(route_550.service_type, 'METRORAIL')
route_801 = self.session.query(models.Route).filter_by(route_number=803).one()
self.assertEqual(route_801.route_name, 'METRORAPID 803 S LAMAR BURNET')
self.assertEqual(route_801.service_type, 'METRORAPID')
def test_update_route_info_report_with_existing_database(self):
first_report = etl.update_route_info(self.test_excel, self.session, self.daily_worksheets)
instances = self.session.query(models.Route).all()
self.assertEqual(len(instances), 88)
self.assertEqual(first_report.creates, 88)
self.assertEqual(first_report.updates, 0)
self.assertEqual(first_report.total_models, 88)
# now we update
report = etl.update_route_info(self.test_excel_with_updates,
self.session,
self.daily_worksheets)
route_1 = self.session.query(models.Route).filter_by(route_number=1).one()
self.assertEqual(route_1.route_name, '1-NORTH LAMAR/SOUTH CONGRESS')
self.assertEqual(route_1.service_type, 'LOCAL')
route_10 = self.session.query(models.Route).filter_by(route_number=10).one()
self.assertEqual(route_10.route_name, '10-SOUTH 1ST STREET/RED RIVER')
self.assertEqual(route_10.service_type, 'UPDATED LOCAL')
route_20 = self.session.query(models.Route).filter_by(route_number=20).one()
self.assertEqual(route_20.route_name, '20-MANOR RD/RIVERSIDE')
self.assertEqual(route_20.service_type, 'LOCAL')
route_100 = self.session.query(models.Route).filter_by(route_number=100).one()
self.assertEqual(route_100.route_name, '100-AIRPORT FLYER')
self.assertEqual(route_100.service_type, 'LIMITED/FLYER')
route_271 = self.session.query(models.Route).filter_by(route_number=271).one()
self.assertEqual(route_271.route_name, '271-DEL VALLE FLEX')
self.assertEqual(route_271.service_type, 'FEEDER')
route_350 = self.session.query(models.Route).filter_by(route_number=350).one()
self.assertEqual(route_350.route_name, '350-AIRPORT BLVD')
self.assertEqual(route_350.service_type, 'CROSSTOWN')
route_412 = self.session.query(models.Route).filter_by(route_number=412).one()
self.assertEqual(route_412.route_name, '412-EBUS/MAIN CAMPUS')
self.assertEqual(route_412.service_type, 'SPECIAL SERVICES- EBUS')
route_550 = self.session.query(models.Route).filter_by(route_number=550).one()
self.assertEqual(route_550.route_name, '550-METRO RAIL RED LINE')
self.assertEqual(route_550.service_type, 'METRORAIL')
route_801 = self.session.query(models.Route).filter_by(route_number=803).one()
self.assertEqual(route_801.route_name, 'METRORAPID 803 S LAMAR BURNET')
self.assertEqual(route_801.service_type, 'METRORAPID')
self.assertEqual(report.creates, 0)
self.assertEqual(report.updates, 88)
self.assertEqual(report.total_models, 88)
class DeactivateCurrentPeriodTests(unittest.TestCase):
def setUp(self):
tests_path = os.path.dirname(__file__)
self.test_excel = os.path.join(tests_path, 'data/test_cmta_data.xls')
self.engine = create_engine('sqlite:///:memory:')
Session = sessionmaker()
Session.configure(bind=self.engine)
self.session = Session()
models.Base.metadata.create_all(self.engine)
route = models.Route(route_number=1, route_name='POPULAR ROUTE',
service_type='LOCAL')
self.session.add(route)
self.session.commit()
self.timestamp = utils.get_period_timestamp('weekday', 'spring', 2015)
daily_ridership = models.DailyRidership(created_on=datetime.now(),
is_current=True,
day_of_week='weekday',
season='spring',
calendar_year=2015,
ridership=700,
route_id=route.id,
measurement_timestamp=self.timestamp)
self.session.add(daily_ridership)
self.session.commit()
def tearDown(self):
models.Base.metadata.drop_all(self.engine)
def test_deactivation(self):
ridership_model = models.DailyRidership
period = {
'year': 2015,
'season': 'spring',
'timestamp': self.timestamp,
'day_of_week': 'weekday'
}
etl.deactivate_current_period(1, period, ridership_model, self.session)
self.session.commit()
ridership = self.session.query(models.DailyRidership).one()
self.assertFalse(ridership.is_current)
class HandleRidershipCellTests(unittest.TestCase):
def setUp(self):
tests_path = os.path.dirname(__file__)
self.test_excel = os.path.join(tests_path, 'data/test_cmta_data.xls')
self.engine = create_engine('sqlite:///:memory:')
Session = sessionmaker()
Session.configure(bind=self.engine)
self.session = Session()
models.Base.metadata.create_all(self.engine)
route = models.Route(route_number=1, route_name='POPULAR ROUTE',
service_type='LOCAL')
self.session.add(route)
self.session.commit()
self.timestamp = utils.get_period_timestamp('saturday', 'fall', 2013)
daily_ridership = models.DailyRidership(created_on=datetime.now(),
is_current=True,
day_of_week='saturday',
season='fall',
calendar_year=2013,
measurement_timestamp=self.timestamp,
ridership=7000,
route_id=route.id)
hourly_ridership = models.ServiceHourRidership(created_on=datetime.now(),
is_current=True,
day_of_week='saturday',
season='fall',
measurement_timestamp=self.timestamp,
calendar_year=2013,
ridership=70.7,
route_id=route.id)
self.session.add(daily_ridership)
self.session.add(hourly_ridership)
self.session.commit()
def tearDown(self):
models.Base.metadata.drop_all(self.engine)
def test_handle_new_daily_ridership(self):
excel_book = xlrd.open_workbook(filename=self.test_excel)
worksheet = excel_book.sheet_by_name('Ridership by Route Saturday')
ridership_cell = worksheet.cell(5, 8)
period = {
'year': 2013,
'season': 'fall',
'day_of_week': 'saturday',
'timestamp': self.timestamp
}
etl.handle_ridership_cell(1, period, ridership_cell,
models.DailyRidership, self.session)
self.session.commit()
ridership = self.session.query(models.DailyRidership)\
.filter_by(is_current=True).one()
self.assertEqual(ridership.ridership, 10997.5717761557)
old_ridership = self.session.query(models.DailyRidership) \
.filter_by(is_current=False).one()
self.assertEqual(old_ridership.ridership, float(7000))
def test_handle_new_daily_ridership_with_report(self):
excel_book = xlrd.open_workbook(filename=self.test_excel)
worksheet = excel_book.sheet_by_name('Ridership by Route Saturday')
ridership_cell = worksheet.cell(5, 8)
period = {
'year': 2013,
'season': 'fall',
'timestamp': self.timestamp,
'day_of_week': 'saturday'
}
report = models.ETLReport(creates=0, updates=0)
etl.handle_ridership_cell(1, period, ridership_cell,
models.DailyRidership, self.session, report)
self.session.commit()
ridership = self.session.query(models.DailyRidership) \
.filter_by(is_current=True).one()
self.assertEqual(ridership.ridership, 10997.5717761557)
old_ridership = self.session.query(models.DailyRidership) \
.filter_by(is_current=False).one()
self.assertEqual(old_ridership.ridership, float(7000))
self.assertEqual(report.updates, 1)
def test_handle_new_hourly_ridership(self):
excel_book = xlrd.open_workbook(filename=self.test_excel)
worksheet = excel_book.sheet_by_name('Riders Hour Saturday')
ridership_cell = worksheet.cell(5, 8)
period = {
'year': 2013,
'season': 'fall',
'timestamp': self.timestamp,
'day_of_week': 'saturday'
}
etl.handle_ridership_cell(1, period, ridership_cell,
models.ServiceHourRidership, self.session)
self.session.commit()
ridership = self.session.query(models.ServiceHourRidership) \
.filter_by(is_current=True).one()
self.assertEqual(ridership.ridership, 39.8486808725975)
old_ridership = self.session.query(models.ServiceHourRidership) \
.filter_by(is_current=False).one()
self.assertEqual(old_ridership.ridership, float(70.7))
class UpdateRidershipTests(unittest.TestCase):
def setUp(self):
tests_path = os.path.dirname(__file__)
ini_config = os.path.join(tests_path, 'capmetrics_single.ini')
config_parser = configparser.ConfigParser()
# make parsing of config file names case-sensitive
config_parser.optionxform = str
config_parser.read(ini_config)
self.config = cli.parse_capmetrics_configuration(config_parser)
self.engine = create_engine(self.config['engine_url'])
Session = sessionmaker()
Session.configure(bind=self.engine)
session = Session()
models.Base.metadata.create_all(self.engine)
self.session = session
etl.update_route_info('./tests/data/test_cmta_data_single.xls',
session,
['Ridership by Route Weekday'])
def tearDown(self):
models.Base.metadata.drop_all(self.engine)
def test_etl_reports(self):
report = etl.update_ridership('./tests/data/test_cmta_data_single.xls',
['Ridership by Route Weekday'],
models.DailyRidership,
self.session)
self.assertEqual(report.total_models, 11)
class RunExcelETLTests(unittest.TestCase):
def setUp(self):
tests_path = os.path.dirname(__file__)
ini_config = os.path.join(tests_path, 'capmetrics.ini')
config_parser = configparser.ConfigParser()
# make parsing of config file names case-sensitive
config_parser.optionxform = str
config_parser.read(ini_config)
self.config = cli.parse_capmetrics_configuration(config_parser)
self.engine = create_engine(self.config['engine_url'])
Session = sessionmaker()
Session.configure(bind=self.engine)
session = Session()
models.Base.metadata.create_all(self.engine)
self.session = session
def tearDown(self):
models.Base.metadata.drop_all(self.engine)
def test_output(self):
# This test takes a long time, as it perform the full etl task for a realistic file
etl.run_excel_etl('./tests/data/test_cmta_data.xls', self.session, self.config)
reports = self.session.query(models.ETLReport).all()
self.assertTrue(len(reports), 3)
high_ridership_routes = self.session.query(models.Route) \
.filter_by(is_high_ridership=True) \
.all()
self.assertEqual(len(high_ridership_routes), 10)
returned_routes = set([route.route_number for route in high_ridership_routes])
expected_routes = {7, 1, 300, 801, 10, 3, 20, 803, 331, 37}
self.assertEqual(returned_routes, expected_routes)
class DeactivatePreviousSystemRidershipFacts(unittest.TestCase):
def setUp(self):
tests_path = os.path.dirname(__file__)
ini_config = os.path.join(tests_path, 'capmetrics.ini')
config_parser = configparser.ConfigParser()
# make parsing of config file names case-sensitive
config_parser.optionxform = str
config_parser.read(ini_config)
self.config = cli.parse_capmetrics_configuration(config_parser)
self.engine = create_engine(self.config['engine_url'])
Session = sessionmaker()
Session.configure(bind=self.engine)
session = Session()
models.Base.metadata.create_all(self.engine)
system_ridership_fact1 = models.SystemRidership(
id=1,
created_on=APP_TIMEZONE.localize(datetime.now()),
is_active=True,
day_of_week='weekday',
season='winter',
calendar_year=2015,
service_type='bus',
ridership=1000,
measurement_timestamp=APP_TIMEZONE.localize(datetime(2015, 1, 1)))
system_ridership_fact2 = models.SystemRidership(
id=2,
created_on=APP_TIMEZONE.localize(datetime.now()),
is_active=True,
day_of_week='weekday',
season='fall',
calendar_year=2015,
service_type='bus',
ridership=1000,
measurement_timestamp=APP_TIMEZONE.localize(datetime(2015, 7, 1)))
system_ridership_fact3 = models.SystemRidership(
id=3,
created_on=APP_TIMEZONE.localize(datetime.now()),
is_active=True,
day_of_week='weekday',
season='spring',
calendar_year=2015,
service_type='bus',
ridership=1000,
measurement_timestamp=APP_TIMEZONE.localize(datetime(2015, 4, 1)))
session.add_all([system_ridership_fact1,
system_ridership_fact2,
system_ridership_fact3])
session.commit()
self.session = session
def tearDown(self):
models.Base.metadata.drop_all(self.engine)
def test_deactivation(self):
all_models = self.session.query(models.SystemRidership).all()
self.assertEqual(len(all_models), 3)
pre_actives = self.session.query(models.SystemRidership).filter_by(is_active=True).all()
self.assertEqual(len(pre_actives), 3)
etl.deactivate_previous_system_ridership_facts(self.session)
post_actives = self.session.query(models.SystemRidership).filter_by(is_active=True).all()
self.assertEqual(len(post_actives), 0)
inactives = self.session.query(models.SystemRidership).filter_by(is_active=False).all()
self.assertEqual(len(inactives), 3)
class UpdateSystemTrendsTests(unittest.TestCase):
def setUp(self):
"""
Tested data
+ ----------------------------------------------+
| Bus |
+ ----------------------------------------------+
| season/day | winter | spring | summer |
|-------------+-----------+----------+----------+
| weekday | 10,000 | 11,000 | 12,000 |
|-------------+-----------+----------+----------+
| saturday | 10,000 | 11,000 | 12,000 |
|-------------+-----------+----------+----------+
| sunday | 10,000 | 11,000 | 12,000 |
+-------------+-----------+----------+----------+
| Total | 70,000 | 77,000 | 84,000 |
+-------------+-----------+----------+----------+
+ ----------------------------------------------+
| Rail |
+ ----------------------------------------------+
| season/day | winter | spring | fall |
|-------------+-----------+----------+----------+
| weekday | 1,060 | 800 | 1,200 |
|-------------+-----------+----------+----------+
| saturday | 1,090 | 900 | 1,340 |
|-------------+-----------+----------+----------+
| sunday | 1,500 | 1,400 | 1,300 |
+-------------+-----------+----------+----------+
| Total | 7,890 | 6,300 | 8,640 |
+-------------+-----------+----------+----------+
"""
tests_path = os.path.dirname(__file__)
ini_config = os.path.join(tests_path, 'capmetrics.ini')
config_parser = configparser.ConfigParser()
# make parsing of config file names case-sensitive
config_parser.optionxform = str
config_parser.read(ini_config)
self.config = cli.parse_capmetrics_configuration(config_parser)
self.engine = create_engine(self.config['engine_url'])
Session = sessionmaker()
Session.configure(bind=self.engine)
session = Session()
models.Base.metadata.create_all(self.engine)
# 3 bus data points per season and day of week
system_ridership_fact_bus1 = models.SystemRidership(id=1, created_on=UTC_TIMEZONE.localize(datetime.now()),
is_active=True, day_of_week='weekday', season='winter', calendar_year=2015, service_type='bus',
ridership=10000, measurement_timestamp=UTC_TIMEZONE.localize(datetime(2015, 1, 1)))
system_ridership_fact_bus2 = models.SystemRidership(
id=2, created_on=UTC_TIMEZONE.localize(datetime.now()), is_active=True,
day_of_week='weekday', season='spring', calendar_year=2015, service_type='bus',
ridership=11000, measurement_timestamp=UTC_TIMEZONE.localize(datetime(2015, 4, 1)))
system_ridership_fact_bus3 = models.SystemRidership(
id=3, created_on=UTC_TIMEZONE.localize(datetime.now()), is_active=True,
day_of_week='weekday', season='summer', calendar_year=2015, service_type='bus',
ridership=12000, measurement_timestamp=UTC_TIMEZONE.localize(datetime(2015, 7, 1)))
system_ridership_fact_bus4 = models.SystemRidership(id=4, created_on=UTC_TIMEZONE.localize(datetime.now()),
is_active=True, day_of_week='saturday', season='winter', calendar_year=2015, service_type='bus',
ridership=10000, measurement_timestamp=UTC_TIMEZONE.localize(datetime(2015, 1, 6)))
system_ridership_fact_bus5 = models.SystemRidership(
id=5, created_on=UTC_TIMEZONE.localize(datetime.now()), is_active=True,
day_of_week='saturday', season='spring', calendar_year=2015, service_type='bus',
ridership=11000, measurement_timestamp=UTC_TIMEZONE.localize(datetime(2015, 4, 6)))
system_ridership_fact_bus6 = models.SystemRidership(id=6,
created_on=UTC_TIMEZONE.localize(datetime.now()), is_active=True,
day_of_week='saturday', season='summer', calendar_year=2015, service_type='bus',
ridership=12000, measurement_timestamp=UTC_TIMEZONE.localize(datetime(2015, 7, 6)))
system_ridership_fact_bus7 = models.SystemRidership(id=7,
created_on=UTC_TIMEZONE.localize(datetime.now()), is_active=True,
day_of_week='sunday', season='winter', calendar_year=2015, service_type='bus',
ridership=10000, measurement_timestamp=UTC_TIMEZONE.localize(datetime(2015, 1, 7)))
system_ridership_fact_bus8 = models.SystemRidership(
id=8, created_on=UTC_TIMEZONE.localize(datetime.now()), is_active=True,
day_of_week='sunday', season='spring', calendar_year=2015, service_type='bus',
ridership=11000, measurement_timestamp=UTC_TIMEZONE.localize(datetime(2015, 4, 7)))
system_ridership_fact_bus9 = models.SystemRidership(id=9,
created_on=UTC_TIMEZONE.localize(datetime.now()), is_active=True,
day_of_week='sunday', season='summer', calendar_year=2015, service_type='bus',
ridership=12000, measurement_timestamp=UTC_TIMEZONE.localize(datetime(2015, 7, 7)))
# 3 rail data points per season and day of week
system_ridership_fact_rail1 = models.SystemRidership(id=11,
created_on=UTC_TIMEZONE.localize(datetime.now()),
is_active=True, day_of_week='weekday', season='winter', calendar_year=2015, service_type='rail',
ridership=1060, measurement_timestamp=UTC_TIMEZONE.localize(datetime(2015, 1, 1)))
system_ridership_fact_rail2 = models.SystemRidership(
id=12, created_on=UTC_TIMEZONE.localize(datetime.now()), is_active=True,
day_of_week='weekday', season='spring', calendar_year=2015, service_type='rail',
ridership=800, measurement_timestamp=UTC_TIMEZONE.localize(datetime(2015, 4, 1)))
system_ridership_fact_rail3 = models.SystemRidership(
id=13, created_on=UTC_TIMEZONE.localize(datetime.now()), is_active=True,
day_of_week='weekday', season='fall', calendar_year=2015, service_type='rail',
ridership=1200, measurement_timestamp=UTC_TIMEZONE.localize(datetime(2015, 7, 1)))
system_ridership_fact_rail4 = models.SystemRidership(id=14,
created_on=UTC_TIMEZONE.localize(datetime.now()), is_active=True,
day_of_week='saturday', season='winter', calendar_year=2015, service_type='rail',
ridership=1090, measurement_timestamp=UTC_TIMEZONE.localize(datetime(2015, 1, 6)))
system_ridership_fact_rail5 = models.SystemRidership(
id=15, created_on=UTC_TIMEZONE.localize(datetime.now()), is_active=True,
day_of_week='saturday', season='spring', calendar_year=2015, service_type='rail',
ridership=900, measurement_timestamp=UTC_TIMEZONE.localize(datetime(2015, 4, 6)))
system_ridership_fact_rail6 = models.SystemRidership(id=16,
created_on=UTC_TIMEZONE.localize(datetime.now()), is_active=True,
day_of_week='saturday', season='fall', calendar_year=2015, service_type='rail',
ridership=1340, measurement_timestamp=UTC_TIMEZONE.localize(datetime(2015, 7, 6)))
system_ridership_fact_rail7 = models.SystemRidership(id=17,
created_on=UTC_TIMEZONE.localize(datetime.now()), is_active=True,
day_of_week='sunday', season='winter', calendar_year=2015, service_type='rail',
ridership=1500, measurement_timestamp=UTC_TIMEZONE.localize(datetime(2015, 1, 7)))
system_ridership_fact_rail8 = models.SystemRidership(
id=18, created_on=UTC_TIMEZONE.localize(datetime.now()), is_active=True,
day_of_week='sunday', season='spring', calendar_year=2015, service_type='rail',
ridership=1400, measurement_timestamp=UTC_TIMEZONE.localize(datetime(2015, 4, 7)))
system_ridership_fact_rail9 = models.SystemRidership(id=19,
created_on=UTC_TIMEZONE.localize(datetime.now()), is_active=True,
day_of_week='sunday', season='fall', calendar_year=2015, service_type='rail',
ridership=1300, measurement_timestamp=UTC_TIMEZONE.localize(datetime(2015, 7, 7)))
session.add_all([system_ridership_fact_bus1,
system_ridership_fact_bus2,
system_ridership_fact_bus3,
system_ridership_fact_bus4,
system_ridership_fact_bus5,
system_ridership_fact_bus6,
system_ridership_fact_bus7,
system_ridership_fact_bus8,
system_ridership_fact_bus9,
system_ridership_fact_rail1,
system_ridership_fact_rail2,
system_ridership_fact_rail3,
system_ridership_fact_rail4,
system_ridership_fact_rail5,
system_ridership_fact_rail6,
system_ridership_fact_rail7,
system_ridership_fact_rail8,
system_ridership_fact_rail9])
session.commit()
self.session = session
def tearDown(self):
models.Base.metadata.drop_all(self.engine)
def test_initial_trends(self):
etl.update_system_trends(self.session)
system_trends = self.session.query(models.SystemTrend).all()
self.assertEqual(len(system_trends), 2)
bus_trend = None
rail_trend = None
for st in system_trends:
if st.service_type == 'BUS':
bus_trend = st
else:
rail_trend = st
expected_bus_json = json.dumps([
["2014-12-29T06:00:00+00:00", 70000.0],
["2015-03-30T05:00:00+00:00", 77000.0],
["2015-06-29T05:00:00+00:00", 84000.0]
])
self.assertEqual(len(json.loads(bus_trend.trend)), 3, msg=bus_trend.trend)
self.assertEqual(bus_trend.trend, expected_bus_json, msg=bus_trend.trend)
expected_rail_json = json.dumps([
["2014-12-29T06:00:00+00:00", 7890.0],
["2015-03-30T05:00:00+00:00", 6300.0],
["2015-09-28T05:00:00+00:00", 8640.0]
])
self.assertEqual(rail_trend.trend, expected_rail_json, msg=rail_trend.trend)
class UpdateHighRidershipRoutesTests(unittest.TestCase):
def setUp(self):
self.engine = create_engine('sqlite:///:memory:')
Session = sessionmaker()
Session.configure(bind=self.engine)
self.session = Session()
models.Base.metadata.create_all(self.engine)
# creating 31 routes with 1 daily ridership model
for number in range(1, 31):
route = models.Route(id=number,
route_number=number,
route_name='TEST ROUTE {0}'.format(number),
service_type='LOCAL')
self.session.add(route)
# starting ridership is based on the route number
ridership = 1000 + (10 * number)
timestamp = utils.get_period_timestamp('weekday', 'spring', 2015)
daily_ridership = models.DailyRidership(created_on=datetime.now(),
is_current=True,
day_of_week='weekday',
season='spring',
calendar_year=2015,
ridership=ridership,
route_id=route.id,
measurement_timestamp=timestamp)
self.session.add(daily_ridership)
self.session.commit()
etl.update_weekly_performance(self.session)
etl.update_high_ridership_routes(self.session, 10)
def tearDown(self):
models.Base.metadata.drop_all(self.engine)
def test_initial_rankings(self):
high_ridership_routes = self.session.query(models.Route)\
.filter_by(is_high_ridership=True)\
.all()
self.assertEqual(len(high_ridership_routes), 10)
returned_routes = set([route.route_number for route in high_ridership_routes])
expected_routes = {21, 22, 23, 24, 25, 26, 27, 28, 29, 30}
self.assertEqual(returned_routes, expected_routes)
def test_single_update(self):
route_1 = self.session.query(models.DailyRidership).filter_by(route_id=1).one()
route_1.ridership = 5000
self.session.commit()
etl.update_weekly_performance(self.session)
etl.update_high_ridership_routes(self.session, 10)
high_ridership_routes = self.session.query(models.Route) \
.filter_by(is_high_ridership=True) \
.all()
self.assertEqual(len(high_ridership_routes), 10)
returned_routes = set([route.route_number for route in high_ridership_routes])
expected_routes = {1, 22, 23, 24, 25, 26, 27, 28, 29, 30}
self.assertEqual(returned_routes, expected_routes)
def test_updates(self):
route_1 = self.session.query(models.DailyRidership).filter_by(route_id=1).one()
route_5 = self.session.query(models.DailyRidership).filter_by(route_id=5).one()
route_1.ridership = 10000
route_5.ridership = 5000
self.session.commit()
etl.update_weekly_performance(self.session)
etl.update_high_ridership_routes(self.session, 10)
high_ridership_routes = self.session.query(models.Route) \
.filter_by(is_high_ridership=True) \
.all()
self.assertEqual(len(high_ridership_routes), 10)
returned_routes = set([route.route_number for route in high_ridership_routes])
expected_routes = {1, 5, 23, 24, 25, 26, 27, 28, 29, 30}
self.assertEqual(returned_routes, expected_routes)
class UpdateWeeklyPerformanceTests(unittest.TestCase):
def setUp(self):
self.engine = create_engine('sqlite:///:memory:')
Session = sessionmaker()
Session.configure(bind=self.engine)
self.session = Session()
models.Base.metadata.create_all(self.engine)
# creating 3 routes, 9 daily and 9 service hour models per route
for number in range(1, 4):
route = models.Route(id=number,
route_number=number,
route_name='TEST ROUTE {0}'.format(number),
service_type='LOCAL')
self.session.add(route)
self.session.commit()
year = 2010
for day in ['weekday', 'sunday', 'saturday']:
for season in ['summer', 'spring', 'winter']:
ridership = int(1000 / number) + (10 * number)
productivity = (10 * number) - (number + 3)
timestamp = utils.get_period_timestamp(day, season, year)
daily = models.DailyRidership(created_on=datetime.utcnow(),
is_current=True,
day_of_week=day,
season=season,
calendar_year=year,
ridership=ridership,
route_id=route.id,
measurement_timestamp=timestamp)
productivity = models.ServiceHourRidership(created_on=datetime.utcnow(),
is_current=True,
day_of_week=day,
season=season,
calendar_year=year,
ridership=productivity,
route_id=route.id,
measurement_timestamp=timestamp)
self.session.add(daily)
self.session.add(productivity)
self.session.commit()
etl.update_weekly_performance(self.session)
def tearDown(self):
models.Base.metadata.drop_all(self.engine)
def test_persist(self):
dailies = self.session.query(models.DailyRidership).all()
hourlies = self.session.query(models.ServiceHourRidership).all()
self.assertEqual(len(dailies), 27)
self.assertEqual(len(hourlies), 27)
self.assertEqual(self.session.query(models.WeeklyPerformance).count(), 9)
def test_performance_calculations(self):
# 5050 + 1010 + 1010
wp1 = self.session.query(models.WeeklyPerformance).filter_by(route_id=1, season='spring').one()
self.assertEqual(wp1.ridership, 7070)
# 2600 + 520 + 520
wp2 = self.session.query(models.WeeklyPerformance).filter_by(route_id=2, season='summer').one()
self.assertEqual(wp2.ridership, 3640)
# 1815 + 363 + 363
wp3 = self.session.query(models.WeeklyPerformance).filter_by(route_id=3, season='winter').one()
self.assertEqual(wp3.ridership, 2541)
self.assertEqual(wp1.productivity, 6)
self.assertEqual(wp2.productivity, 15)
self.assertEqual(wp3.productivity, 24)
def test_seasons(self):
springs = self.session.query(models.WeeklyPerformance).filter_by(route_id=1, season='spring')
for s in springs:
self.assertEqual(s.measurement_timestamp.isoformat(), '2010-03-29T05:00:00')
summers = self.session.query(models.WeeklyPerformance).filter_by(route_id=2, season='summer')
for m in summers:
self.assertEqual(m.measurement_timestamp.isoformat(), '2010-06-28T05:00:00')
winters = self.session.query(models.WeeklyPerformance).filter_by(route_id=3, season='winter')
for w in winters:
self.assertEqual(w.measurement_timestamp.isoformat(), '2009-12-28T06:00:00')
```
|
{
"source": "jga/conversationalist",
"score": 2
}
|
#### File: conversationalist/tests/test_adapters.py
```python
import os
import unittest
from conversationalist import adapters, classes
from .adapters import TopicHeaderAdapter, ParticipationAdapter
from .mocking import MockAPI
class FindTopicHeaderTests(unittest.TestCase):
def test_no_match(self):
status = {
'text': 'no match here'
}
pattern = r'/d'
self.assertFalse(adapters.find_topic_header(status, pattern))
class TransformWithTopicHeadersTests(unittest.TestCase):
def test_transform(self):
tests_path = os.path.dirname(__file__)
timeline_file_path = os.path.join(tests_path, 'json/timeline.json')
conversation = classes.Conversation(adapter=TopicHeaderAdapter)
conversation.load(timeline_file_path)
data = adapters.transform_with_topic_headers(conversation, '\d', return_goup=0)
self.assertEqual(data['topic_headers'], ['1', '2', '3', '4', '5'])
class TransformWithParticipationAndStylesTests(unittest.TestCase):
def test_add_origin_tweet(self):
tests_path = os.path.dirname(__file__)
timeline_file_path = os.path.join(tests_path, 'json/timeline.json')
conversation = classes.Conversation(adapter=ParticipationAdapter)
conversation.load(timeline_file_path)
data = adapters.transform_with_participation_and_styles(conversation, [], '\d', 0)
self.assertEqual(data['nav'], ['1', '2', '3', '4', '5'])
```
#### File: conversationalist/tests/test_classes.py
```python
from datetime import datetime, timezone, timedelta
from dateutil.parser import parse
from io import StringIO
import json
import os
import re
import sys
import unittest
from conversationalist import classes, adapters
from .adapters import ConvoParticipationAdapter as ParticipationAdapter
from .adapters import ConvoTextAdapter as TextAdapter
from .mocking import generate_mock_user, generate_mock_status, generate_mock_statuses, generate_mock_timeline_data
from .mocking import MockAPI
class StatusEncoderTests(unittest.TestCase):
def test_origin_encoding(self):
origin_user = generate_mock_user()
origin_user.id = 5
origin_user.screen_name = 'original_user'
origin_status = generate_mock_status(1, user=origin_user)
origin_status.text = 'Original status text'
child_status = generate_mock_status(2)
child_status.origin = origin_status
child_status.in_reply_to_status_id = 1
child_status_json = json.loads(classes.StatusEncoder().encode(child_status))
self.assertEqual(child_status_json['origin']['author']['id'], 5, msg=child_status_json)
class TimelineTests(unittest.TestCase):
def setUp(self):
dt1 = datetime(2001, 2, 3, 4, 5, 6, tzinfo=timezone.utc)
dt2 = dt1 + timedelta(hours=-1)
dt3 = dt1 + timedelta(hours=-3)
dt4 = dt1 + timedelta(hours=-5)
dt5 = dt1 + timedelta(hours=-6)
dt6 = dt1 + timedelta(hours=-7)
dt7 = dt1 + timedelta(hours=-10)
datetime_fixtures = [dt1, dt2, dt3, dt4, dt5, dt6, dt7]
self.mock_timeline_data = generate_mock_timeline_data(datetime_fixtures=datetime_fixtures)
self.earliest_datetime = dt7
self.latest_datetime = dt1
tests_path = os.path.dirname(__file__)
self.test_file_directory = os.path.join(tests_path, 'tmp_test_output/')
def test_get_earliest_status(self):
timeline = classes.Timeline()
timeline.data = self.mock_timeline_data
earliest_status = timeline.get_earliest_status()
self.assertTrue(earliest_status.created_at == self.earliest_datetime)
self.assertTrue(earliest_status.created_at.day == self.earliest_datetime.day)
self.assertTrue(earliest_status.created_at.hour == self.earliest_datetime.hour)
self.assertTrue(earliest_status.created_at.minute == self.earliest_datetime.minute)
self.assertTrue(earliest_status.created_at.second == self.earliest_datetime.second)
def test_generate_timeline(self):
api = MockAPI()
timeline = classes.Timeline(api=api, username='testuser')
self.assertEqual(len(list(timeline.data.values())), 7)
def test_generate_timeline_multiple_requests(self):
api = MockAPI(multi_response=True)
timeline = classes.Timeline(api=api, username='testuser')
self.assertEqual(len(list(timeline.data.values())), 7)
def test_generate_timeline_with_tight_central_cutoff(self):
api = MockAPI()
timeline = classes.Timeline(api=api, username='testuser', timeframe=-12)
self.assertEqual(len(list(timeline.data.values())), 7)
def test_generate_timeline_with_tight_utc_cutoff(self):
api = MockAPI()
timeline = classes.Timeline(api=api, username='testuser', timeframe=1)
self.assertTrue(len(list(timeline.data.values())), 1)
def test_batch_with_maximum_id(self):
timeline = classes.Timeline(username='testuser')
api = MockAPI(multi_response=True)
timeline.api = api
statuses = timeline.get_timeline_batch(max_id=2)
self.assertEqual(statuses[0].text, 'Content for tweet mock status 1')
def test_old_status_beyond_cutoff(self):
current_datetime = datetime.now(tz=timezone.utc)
fresh_status = generate_mock_status(2, created_at=current_datetime)
expired_datetime = current_datetime + timedelta(hours=-10)
old_status = generate_mock_status(1, created_at=expired_datetime)
api = MockAPI(statuses=[fresh_status, old_status])
timeline = classes.Timeline(username='testuser', timeframe=2)
timeline.api = api
timeline.earliest_id = 2
timeline.earliest_status = fresh_status
timeline.data['1'] = old_status
timeline.data['2'] = fresh_status
self.assertEqual(timeline.earliest_status.id, 2)
self.assertFalse(timeline._has_next_tweets())
self.assertEqual(timeline.earliest_status.id, 1)
def test_load_statuses(self):
status1 = generate_mock_status(1)
original_created_at = status1.created_at
bad_status1 = generate_mock_status(1)
statuses = [status1, bad_status1]
api = MockAPI(statuses=statuses)
timeline = classes.Timeline(api=api, username='testuser')
timeline.load(statuses)
self.assertTrue(len(list(timeline.data.values())), 1)
self.assertEqual(timeline.data['1'].created_at.microsecond,
original_created_at.microsecond)
def test_load_status_with_reply_id(self):
original_user = generate_mock_user()
original_user.id = 2
original_user.screen_name = 'reply_user'
original_status = generate_mock_status(1, user=original_user)
child_status = generate_mock_status(2)
child_status.in_reply_to_status_id = 1
api = MockAPI([original_status, child_status])
timeline = classes.Timeline(api=api, username='testuser')
self.assertEqual(timeline.data['2'].origin.author_name, 'reply_user')
def test_load_status_with_reply_error(self):
child_status = generate_mock_status(2)
child_status.in_reply_to_status_id = 7
api = MockAPI([child_status])
out = StringIO()
sys.stdout = out
classes.Timeline(api=api, username='testuser')
output = out.getvalue().strip()
self.assertEqual(output, 'Error while fetching origin for tweet 2')
def test_has_next_tweets_no_statuses(self):
statuses = []
api = MockAPI(statuses=statuses)
timeline = classes.Timeline(api=api, username='testuser')
self.assertFalse(timeline._has_next_tweets())
def test_has_next_tweets_exceeded_cutoff(self):
old_status = generate_mock_status(1)
old_status.created_at = old_status.created_at + timedelta(hours=-2)
statuses = [old_status]
api = MockAPI(statuses=statuses)
timeline = classes.Timeline(api=api, username='testuser', timeframe=1)
self.assertFalse(timeline._has_next_tweets())
def test_has_next_tweets_exhausted(self):
status = generate_mock_status(1)
statuses = [status]
api = MockAPI(statuses=statuses)
timeline = classes.Timeline(api=api, username='testuser')
self.assertFalse(timeline._has_next_tweets())
def test_has_next_tweets_under_cutoff(self):
status = generate_mock_status(1)
status2 = generate_mock_status(2)
statuses = [status, status2]
api = MockAPI(statuses=statuses)
timeline = classes.Timeline(api=api, username='testuser', timeframe=5)
status3 = generate_mock_status(3)
status3.created_at = status.created_at + timedelta(hours=-1)
timeline.data[str(status3.id)] = status3
self.assertTrue(timeline._has_next_tweets())
self.assertEqual(timeline.earliest_status.id, 3)
def test_timeline_to_json(self):
statuses = list()
for index in range(1, 6):
text = "The text for mock status {0}".format(index)
created_at = datetime.now()
status = generate_mock_status(index, text=text, created_at=created_at)
statuses.append(status)
api = MockAPI(statuses=statuses)
timeline = classes.Timeline(api=api, username='testuser')
test_output_file_path = os.path.join(self.test_file_directory, 'test_timeline_to_json.json')
try:
timeline.to_json(test_output_file_path)
with open(test_output_file_path) as data_file:
data = json.load(data_file)
self.assertTrue('start' in data)
start_iso_date_string = data['start']
start = parse(start_iso_date_string)
self.assertTrue(isinstance(start, datetime))
self.assertTrue(start.tzinfo)
self.assertEqual(start.utcoffset(), timedelta(0))
self.assertTrue('cutoff' in data)
self.assertTrue('cutoff' in data)
cutoff_iso_date_string = data['cutoff']
cutoff = parse(cutoff_iso_date_string)
self.assertTrue(isinstance(cutoff, datetime))
self.assertTrue(cutoff.tzinfo)
self.assertEqual(cutoff.utcoffset(), timedelta(0))
self.assertTrue('total' in data)
self.assertEqual(data['total'], 5)
self.assertTrue('data' in data)
self.assertTrue(isinstance(data['data'], dict))
self.assertTrue('username' in data)
self.assertEqual(data['username'], 'testuser')
finally:
if os.path.isfile(test_output_file_path):
os.remove(test_output_file_path)
class PrepareHourlySummaryTests(unittest.TestCase):
def test_summary(self):
start = datetime(2001, 2, 3, 5, 6, 7, 8)
cutoff = datetime(2001, 2, 3, 0, 0, 0, 0)
summary = adapters.initialize_hourly_summary(start, cutoff)
self.assertEqual(len(list(summary.values())), 6, msg=summary)
self.assertTrue('2001-02-03T00:00:00' in summary, msg=summary)
self.assertTrue('2001-02-03T01:00:00' in summary, msg=summary)
self.assertTrue('2001-02-03T02:00:00' in summary, msg=summary)
self.assertTrue('2001-02-03T03:00:00' in summary, msg=summary)
self.assertTrue('2001-02-03T04:00:00' in summary, msg=summary)
self.assertTrue('2001-02-03T05:00:00' in summary, msg=summary)
class ConversationTests(unittest.TestCase):
def setUp(self):
dt1 = datetime(2001, 2, 3, 4, 5, 6, tzinfo=timezone.utc)
dt2 = dt1 + timedelta(hours=-1)
dt3 = dt1 + timedelta(hours=-3)
dt4 = dt1 + timedelta(hours=-5)
dt5 = dt1 + timedelta(hours=-6)
dt6 = dt1 + timedelta(hours=-7)
dt7 = dt1 + timedelta(hours=-10)
self.earliest_datetime = dt7
self.latest_datetime = dt1
datetime_fixtures = [dt1, dt2, dt3, dt4, dt5, dt6, dt7]
statuses = generate_mock_statuses(datetime_fixtures=datetime_fixtures)
api = MockAPI(statuses=statuses)
# we skip passing in the API so we can force start and cutoff properties
timeline = classes.Timeline(username='testuser')
timeline.start = datetime(2001, 2, 3, 5, 30, 0, tzinfo=timezone.utc)
timeline.cutoff = timeline.start + timedelta(hours=-24)
timeline.api = api
timeline._generate_timeline()
encoder = classes.TimelineEncoder()
timeline_json = json.loads(encoder.encode(timeline))
self.test_timeline = timeline_json
def test_title_after_initial_update(self):
conversation = classes.Conversation(timeline=self.test_timeline,
adapter=ParticipationAdapter)
self.assertEqual(conversation.data['title'], 'Tick Tock')
def test_custom_title_after_initial_update(self):
conversation = classes.Conversation(timeline=self.test_timeline,
title='Other Title',
adapter=ParticipationAdapter )
self.assertEqual(conversation.data['title'], 'Other Title')
def test_empty_timeline_interval(self):
conversation = classes.Conversation()
start, cutoff = conversation._get_timeline_interval()
self.assertEqual(start, None)
self.assertEqual(cutoff, None)
def test_title_periods_initial_update(self):
conversation = classes.Conversation(timeline=self.test_timeline,
adapter=ParticipationAdapter)
self.assertTrue(isinstance(conversation.data['periods'], list))
periods = conversation.data['periods']
self.assertTrue(len(periods), 6)
def test_title_periods_subtitles(self):
conversation = classes.Conversation(timeline=self.test_timeline,
adapter=ParticipationAdapter)
periods = conversation.data['periods']
for period in periods:
subtitle = period['subtitle']
self.assertTrue(isinstance(subtitle, str))
regex_pattern = re.compile(r'(Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)')
result = regex_pattern.match(subtitle)
self.assertTrue(result)
def test_period_format(self):
conversation = classes.Conversation(timeline=self.test_timeline,
adapter=ParticipationAdapter)
periods = conversation.data['periods']
for period in periods:
self.assertTrue('subtitle' in period)
self.assertTrue('statuses' in period)
self.assertTrue('empty' in period)
self.assertTrue('id' in period)
self.assertTrue('empty_message' in period)
def test_period_status_format(self):
conversation = classes.Conversation(timeline=self.test_timeline,
adapter=ParticipationAdapter)
periods = conversation.data['periods']
for period in periods:
statuses = period['statuses']
for status in statuses:
self.assertTrue('origin' in status)
self.assertTrue('text' in status)
self.assertTrue('style_classes' in status)
self.assertTrue('created_at' in status)
self.assertTrue('author' in status)
self.assertTrue('in_reply_to_status_id' in status)
def test_hour_parsing(self):
timeline_json = {
'start': "2001-02-03T20:44:32.316656+00:00",
'cutoff': "2001-02-03T15:44:32.316656+00:00",
}
conversation = classes.Conversation(timeline_json)
start, cutoff = conversation._get_timeline_interval()
self.assertEqual(start.hour, 20)
self.assertEqual(cutoff.hour, 15)
def test_timezone_parsing(self):
timeline_json = {
'start': "2001-02-03T20:44:32.316656+00:00",
'cutoff': "2001-02-03T15:44:32.316656+00:00",
}
conversation = classes.Conversation(timeline_json)
start, cutoff = conversation._get_timeline_interval()
self.assertTrue(start.tzinfo)
self.assertTrue(cutoff.tzinfo)
def test_eastern_time_parsing(self):
timeline_json = {
'start': "2001-02-03T20:44:32.316656-05:00",
'cutoff': "2001-02-03T15:44:32.316656-05:00",
}
conversation = classes.Conversation(timeline_json)
start, cutoff = conversation._get_timeline_interval()
start = start.astimezone(timezone.utc)
cutoff = cutoff.astimezone(timezone.utc)
self.assertEqual(start.hour, 1)
self.assertEqual(cutoff.hour, 20)
def test_central_time_parsing(self):
timeline_json = {
'start': "2001-02-03T20:44:32.316656-06:00",
'cutoff': "2001-02-03T15:44:32.316656-06:00",
}
conversation = classes.Conversation(timeline_json)
start, cutoff = conversation._get_timeline_interval()
start = start.astimezone(timezone.utc)
cutoff = cutoff.astimezone(timezone.utc)
self.assertEqual(start.hour, 2)
self.assertEqual(cutoff.hour, 21)
def test_adapter(self):
conversation = classes.Conversation(timeline=self.test_timeline,
adapter=ParticipationAdapter)
self.assertEqual(len(list(conversation.data['nav'])), 7, msg=conversation.data)
def test_nav_sort(self):
conversation = classes.Conversation(timeline=self.test_timeline,
adapter=ParticipationAdapter)
self.assertEqual(conversation.data['nav'], ['1', '2', '3', '4', '5', '6', '7'])
def test_style_words(self):
conversation = classes.Conversation(timeline=self.test_timeline,
adapter=ParticipationAdapter)
for period in conversation.data['periods']:
for status in period['statuses']:
self.assertEqual(set(status['style_classes'].split()), {'mock', 'status'})
def test_period_sort_earliest(self):
conversation = classes.Conversation(timeline=self.test_timeline, adapter=TextAdapter)
periods = conversation.data['periods']
earliest = self.earliest_datetime
clean_earliest = earliest.replace(minute=0, second=0)
unix_epoch = datetime(1970, 1, 1, tzinfo=earliest.tzinfo)
seconds = (clean_earliest - unix_epoch).total_seconds()
key = int(seconds)
self.assertEqual(key, periods[0]['id'], msg=periods)
def test_period_sort_latest(self):
conversation = classes.Conversation(timeline=self.test_timeline, adapter=TextAdapter)
periods = conversation.data['periods']
last = self.latest_datetime
clean_latest = last.replace(minute=0, second=0)
unix_epoch = datetime(1970, 1, 1, tzinfo=last.tzinfo)
seconds = (clean_latest - unix_epoch).total_seconds()
key = int(seconds)
self.assertEqual(key, periods[6]['id'], msg=periods)
def test_load_timeline_json(self):
tests_path = os.path.dirname(__file__)
timeline_file_path = os.path.join(tests_path, 'json/timeline.json')
conversation = classes.Conversation(adapter=ParticipationAdapter)
conversation.load(timeline_file_path)
data = conversation.data
self.assertEqual(len(data['periods']), 5)
self.assertEqual(data['periods'][0]['subtitle'], 'Tuesday, February 23, 2010 8PM')
self.assertEqual(data['periods'][1]['subtitle'], 'Wednesday, February 24, 2010 5AM')
self.assertEqual(data['periods'][2]['subtitle'], 'Wednesday, February 24, 2010 6AM')
self.assertEqual(data['periods'][3]['subtitle'], 'Wednesday, February 24, 2010 7AM')
self.assertEqual(data['periods'][4]['subtitle'], 'Wednesday, February 24, 2010 8AM')
self.assertEqual(data['title'], 'Tick Tock')
self.assertEqual(data['nav'], ['1', '2', '3', '4', '5'])
self.assertTrue(isinstance(data['participation'], classes.Participation))
participation = data['participation']
self.assertEqual(participation.participants.keys(), {'test_author'}, msg=participation.participants)
participant = participation.participants['test_author']
# It's six instead of five because of one of the tweets is a reply
self.assertEqual(participant.exchange_count, 6)
def test_load_timeline_json_central(self):
tests_path = os.path.dirname(__file__)
timeline_file_path = os.path.join(tests_path, 'json/timeline_central.json')
conversation = classes.Conversation(adapter=ParticipationAdapter)
conversation.load(timeline_file_path)
data = conversation.data
self.assertEqual(len(data['periods']), 5)
self.assertEqual(data['periods'][0]['subtitle'], 'Tuesday, February 23, 2010 8PM')
self.assertEqual(data['periods'][1]['subtitle'], 'Wednesday, February 24, 2010 5AM')
self.assertEqual(data['periods'][2]['subtitle'], 'Wednesday, February 24, 2010 6AM')
self.assertEqual(data['periods'][3]['subtitle'], 'Wednesday, February 24, 2010 7AM')
self.assertEqual(data['periods'][4]['subtitle'], 'Wednesday, February 24, 2010 8AM')
self.assertEqual(data['title'], 'Tick Tock')
self.assertEqual(data['nav'], ['1', '2', '3', '4', '5'])
self.assertTrue(isinstance(data['participation'], classes.Participation))
participation = data['participation']
self.assertEqual(participation.participants.keys(), {'test_author'}, msg=participation.participants)
participant = participation.participants['test_author']
self.assertEqual(participant.exchange_count, 5)
class ParticipationTests(unittest.TestCase):
def test_ranked_profiles(self):
participation = classes.Participation()
busy_participant = classes.Participant('busy_participant', 'test.url.busy_participant')
busy_participant.exchange_count = 10
participation.participants[busy_participant.name] = busy_participant
quiet_participant = classes.Participant('quiet_participant', 'test.url.quiet_participant')
quiet_participant.exchange_count = 2
participation.participants[quiet_participant.name] = quiet_participant
inactive_participant = classes.Participant('inactive_participant', 'test.url.inactive_participant')
inactive_participant.exchange_count = 0
participation.participants[inactive_participant.name] = inactive_participant
ranked = participation.get_ranked_profiles()
self.assertEqual(ranked[0].name, 'busy_participant')
self.assertEqual(ranked[1].name, 'quiet_participant')
self.assertEqual(ranked[2].name, 'inactive_participant')
```
|
{
"source": "jgaff/cif-file-ingester",
"score": 3
}
|
#### File: cif-file-ingester/cif_file_ingester/converter.py
```python
import sys
from pypif import pif
from .parse_cif_pmg import parse_cif
def convert(files=[], **kwargs):
"""
Convert files into a pif
:param files: to convert
:param kwargs: any other arguments
:return: the pif produced by this conversion
"""
print('Converting {} CIFs'.format(len(files)))
systems = []
for f in files:
converted_pif = parse_cif(f)
if converted_pif:
systems.append(converted_pif)
return systems
if __name__ == '__main__':
with open(sys.argv[1].replace('.cif','-pif.json'), 'w') as output_file:
pif.dump(convert(files=[sys.argv[1]]), output_file, indent=4)
```
|
{
"source": "jgaff/pif-dft",
"score": 3
}
|
#### File: dfttopif/parsers/abinit.py
```python
from .base import DFTParser, InvalidIngesterException
import os
import glob
from ase.calculators.abinit import Abinit
from pypif.obj.common import Value, Property, Scalar
class AbinitParser(DFTParser):
'''
Parser for ABINIT calculations
'''
_label = None
def __init__(self, files):
# Check whether any file has as name ABINIT in the file in the first two lines
super(AbinitParser, self).__init__(files)
for f in files:
try:
with open(f, 'r') as fp:
for line in [fp.readline(), fp.readline()]:
if "ABINIT" in line:
is_abinit = True
except:
continue
if not is_abinit:
raise InvalidIngesterException('No Abinit files found')
def get_name(self): return "ABINIT"
def _get_label(self):
'''Find the label for the output files
for this calculation
'''
if self._label is None:
foundfiles = False
for f in self._files:
if ".files" in f:
foundfiles = True
self._label = f.split(".")[0]
with open(self._label + '.files', 'r') as fp:
line = fp.readline().split()[0]
if line != self._label + ".in":
fp.close()
raise Exception('first line must be label.in')
line = fp.readline().split()[0]
if line != self._label + ".txt":
fp.close()
raise Exception('second line must be label.txt')
line = fp.readline().split()[0]
if line != self._label + "i":
fp.close()
raise Exception('third line must be labeli')
line = fp.readline().split()[0]
if line != self._label + "o":
fp.close()
raise Exception('fourth line must be labelo')
fp.close()
if foundfiles:
return self._label
else:
raise Exception('label.files not found')
#ASE format
# (self.prefix + '.in') # input
# (self.prefix + '.txt')# output
# (self.prefix + 'i') # input
# (self.prefix + 'o') # output
else:
return self._label
def get_cutoff_energy(self):
if not self._label:
self._get_label()
# Open up the label.txt file
fp = open(os.path.join(self._directory, self._label + '.out'), 'r')
foundecho = False
# Look for ecut after the occurence of "echo values of preprocessed input variables"
for line in fp:
if "echo values of preprocessed input variables" in line:
foundecho = True
if "ecut" in line and foundecho:
words = line.split()
return Value(scalars=[Scalar(value=float(words[1]))], units=words[2])
# Error handling: ecut not found
raise Exception('ecut not found')
```
|
{
"source": "JGageWright/DataHandlers",
"score": 3
}
|
#### File: JGageWright/DataHandlers/importer_snippets.py
```python
import pandas as pd
import numpy as np
import tkinter as tk
from tkinter import filedialog
from data_structures import experiment
import os
Tk = tk.Tk()
Tk.withdraw()
def ask_path():
return filedialog.asksaveasfile(mode='w').name
def df_to_excel(df, sheet_name='Sheet1'):
'''
Uses pandas to always return a .xlsx file of the given df
Giving the save name a file extension results in multiple files being saved
'''
where = filedialog.asksaveasfile(mode='wb', filetypes=[('Microsoft Excel Worksheet', '.xlsx')],
defaultextension='.xlsx')
save_name = where.name
if save_name[-5:] != '.xlsx':
save_name = str(save_name + '.xlsx')
with pd.ExcelWriter(save_name) as writer:
df.to_excel(writer, engine='openpyxl', sheet_name=sheet_name)
def cary630(filename):
'''
Given path, shapes .CSV data output by
Aligent's Cary 630 Spectrometer (managed by MicroLab)
to a usable dataframe with integer index
'''
df = pd.read_csv(filename,
header=4,
names=['Wavenumber', 'Absorbance'])
return df
def load_experiment(filetype: str = '.csv', csv_dirname: str = None) -> experiment:
'''
:param filetype: .xlsx or .csv
:return: experiment object
Creates an experiment object for a previously exported experiment.
If filetype = .xlsx, the excel file must have sheets named 'data' and 'params'
If filetype = .csv, two CSVs in the selected folder must be named 'data' and 'params', respecitively
'''
if filetype == '.xlsx':
file = filedialog.askopenfilename(filetypes=[('Excel Worksheet', '.xlsx')])
x = pd.ExcelFile(file, engine='openpyxl')
sheets = {}
for sheet in x.sheet_names:
df = pd.read_excel(file, sheet_name=sheet, index_col=0)
sheets[str(sheet)] = df
data = sheets['data']
del sheets['data']
params = sheets['params']
del sheets['params']
opt = []
for sheet in sheets.keys():
opt.append(sheets[sheet])
exp = experiment(data, params, opt)
return exp
elif filetype == '.csv':
if csv_dirname is None:
dirname = filedialog.askdirectory(title='Select a folder of CSV files')
else:
dirname = csv_dirname
filenames = os.listdir(dirname)
data = pd.read_csv(dirname+'/data.csv', index_col=0)
params = pd.read_csv(dirname + '/params.csv', index_col=0)
filenames.remove('data.csv')
filenames.remove('params.csv')
opt = []
for file in filenames:
opt.append(
pd.read_csv(dirname+'/'+file, index_col=0)
)
exp = experiment(data, params, opt)
return exp
def biologic_mpt(path: str, technique: str=None, area: float=None) -> pd.DataFrame:
"""
Should work for all .mpt files, tested on CV, CVA, CA, PEIS, ZIR.
In principle, one function could be written with options to resort columns in various ways.
impedance.py includes an impedance importer, so could use that one
Args:
path (str): Path to .mpt file
technique (str, optional): Technique type. Defaults to None.
area (float, optional): Electrode area for normalization in cm2. Defaults to None.
Returns:
pd.DataFrame: dataframe of all data, sorted with relevant columns first when applicable
"""
# .mpt has a variable number of header lines, but it tells you how many on line 2
with open(path, 'r', encoding="latin-1") as input_file:
lines = input_file.readlines()
header_line = lines[1]
num_header_lines = int(header_line.split(":")[1])
headers = lines[num_header_lines-1].split('\t')[0:-1]
data_lines = lines[num_header_lines:]
# Make a dataframe with the data
df = pd.DataFrame(data={}, columns=headers)
for i in range(len(data_lines)):
df.loc[len(df)] = data_lines[i].split('\t')
# Convert text to numbers
for col in df.columns:
try:
df[col] = pd.to_numeric(df[col])
except:
pass
# Convert current to Amps (default mA) and compute current densities.
df['<I>/A'] = df['<I>/mA'] / 1000
if area is not None:
df['j/A.cm-2'] = df['<I>/A'] / area
df['j/mA.cm-2'] = df['j/A.cm-2'] * 1000
# Sort more relevant columns first
if technique in ['CVA', 'CV', 'CA']:
aux_cols = df.columns.drop('Ewe/V').drop('<I>/mA').drop('time/s').drop('j/mA.cm-2')
df = df.reindex(columns=['Ewe/V', '<I>/mA', 'j/mA.cm-2', 'time/s', *aux_cols])
df['cycle number'] = df['cycle number'].astype(np.int64) # Be extra sure cycle number is an integer
return df
def CHI_txt(path):
"""Converts CH Instuments .txt data into pandas Datarame
Args:
path (str): path to .txt file
Raises:
ValueError: if data cannot be converted to a numerical datatype
Returns:
pd.DataFrame: Dataframe containing all data after header
"""
header = []
with open(path, 'r') as ff:
lines = ff.readlines()
# date = lines[0]
technique = lines[1].replace('\n', '')
i = 1
for line in lines[1:]: # First line always date with ,
if ',' not in line:
header.append(line)
if line != '\n': # readlines counts blank lines, but pandas does not.
i += 1
else:
break
df = pd.read_csv(path, header=i)
df.columns = df.columns.str.replace(' ', '')
for col in df.columns:
try:
df[col] = pd.to_numeric(df[col])
except:
raise ValueError('Column could not be converted to numeric')
if technique == 'A.C. Impedance':
df.rename({r"Z'/ohm": 'Zre/ohm',
r'Z"/ohm': 'Zim/ohm',
r"Z/ohm": 'Zmag/ohm'}, axis=1, inplace=True)
df['Zcx/ohm'] = df['Zre/ohm'] + 1j*df['Zim/ohm']
df['Angular_Freq'] = df['Freq/Hz']*2*np.pi
return df
def CHI_txt_todict(path, dict):
"""Converts CH Instuments .txt data into pandas Datarame and appends to passed dictionary
Args:
path (str): path to .txt file
dict (dict): dictionary to append df
Raises:
ValueError: ValueError: if data cannot be converted to a numerical datatype
Returns:
dict: Dictionary of dataframe with filepath keys. Dataframes contain all data after header
"""
header = []
with open(path, 'r') as ff:
lines = ff.readlines()
# date = lines[0]
technique = lines[1].replace('\n', '')
i = 1
for line in lines[1:]: # First line always date with ,
if ',' not in line:
header.append(line)
if line != '\n': # readlines counts blank lines, but pandas does not.
i += 1
else:
break
df = pd.read_csv(path, header=i)
df.columns = df.columns.str.replace(' ', '')
for col in df.columns:
try:
df[col] = pd.to_numeric(df[col])
except:
raise ValueError('Column could not be converted to numeric')
if technique == 'A.C. Impedance':
df.rename({r"Z'/ohm": 'Zre/ohm',
r'Z"/ohm': 'Zim/ohm',
r"Z/ohm": 'Zmag/ohm'}, axis=1, inplace=True)
df['Zcx/ohm'] = df['Zre/ohm'] + 1j*df['Zim/ohm']
df['Angular_Freq'] = df['Freq/Hz']*2*np.pi
dict[path] = df
return dict
def Gamry_dta(file: str):
"""Converts Gamry's .DTA file to usable format.
------------Gamry's Notation-----------------
Gamry's Vf (filtered voltage) is the measured potential.
If you select IR Compensation, the actual applied voltage between current interruptions is the sum of the Vf and Vu data.
In Corrosion Potential measurements, or in the Open Circuit Voltage (OCV) measurement before an experiment, Vf is the filtered, measured, open-circuit voltage.
A digital filter has been applied to minimize pickup of the AC mains frequency (50 or 60 Hz).
Gamry's Vu is the uncompensated voltage, i.e. the voltage-drop across the solution resistance between the Working Electrode and the tip of the Reference Electrode.
If you select IR Compensation (current interrupt), Vu is the difference between the measured voltage with the current flowing, and the voltage during the period of the current interrupt. The actual applied voltage between current interruptions is the sum of the Vf and Vu data.
Gamry's Vm, where it appears, is included only for compatibility with older versions of the Gamry Framework Software.
---------------------------------------------
Args:
file (str): Path to .DTA file
Returns:
pd.DataFrame: data
"""
# Get technique. It sits in the third line of a .DTA file.
# The actual text that appears here may be mutable in the experimental setup, check that if you're getting errors.
with open(file, 'r') as opened_file:
lines = opened_file.readlines()
technique = lines[2].split('\t')[2]
# Import data
if technique == 'Chronopotentiometry Scan':
df = pd.read_csv(file,
delimiter='\t',
skiprows=58,
names=['empty', '', 'Time/sec', 'Potential/V', 'Current/A', 'Vu/V', 'Sig', 'Ach', 'IERange', 'Over'],
index_col=0,
usecols=lambda x: x != 'empty'
)
elif technique == 'Open Circuit Potential':
df = pd.read_csv(file,
delimiter='\t',
skiprows=47,
names=['empty', '', 'Time/sec', 'Potential/V', 'Vm/V', 'Ach', 'Over'],
index_col=0,
usecols=lambda x: x != 'empty'
)
del df['Over'] # get rid of this useless, unparsable shit
return df
```
#### File: JGageWright/DataHandlers/LinReg.py
```python
import numpy as np
from numpy.linalg import lstsq
import pandas as pd
import matplotlib.pyplot as plt
class PolyReg:
"""
Uses sklear linear regression machinery, but has relevant callable attributes.
No parameters can be fixed
Polynomial coefficients in order of decreasing degree are in coef[i].
Note that ss_yy is commonly named ss_tot in other implementations. This is the total sum of squares.
coef = ndarray of fitting parameters in order of decreasing degree
ss_res = sum of squares of residuals.
std_err = ndarray of standard errors of the fitting parameters in order of decreasing degree. These are calculated as the square root of diagonal elements in the covariance matrix.
"""
def __init__(self, xdata, ydata, degree: int):
"""
:param xdata: Array of xdata
:param ydata: Array of ydata
:param degree: Degree of polynomial fit
"""
self.xdata = xdata
self.ydata = ydata
self.degree = degree
self.coef, self.cov = np.polyfit(xdata, ydata, degree, cov=True)
self.residuals = ydata - np.polyval(self.coef, xdata)
self.ss_res = np.sum(self.residuals**2)
self.ss_yy = np.sum((ydata - np.mean(ydata)) ** 2)
self.ss_xx = np.sum((xdata - np.mean(xdata)) ** 2)
self.ss_xy = np.sum((xdata - np.mean(xdata))*(ydata - np.mean(ydata)))
self.r_squared = 1 - (self.ss_res / self.ss_yy)
self.std_err = np.sqrt(np.diag(self.cov))
self.s_y = np.sqrt(self.ss_res / (len(ydata) - 1 - self.degree))
self.roots = np.roots(self.coef)
def report(self):
'''
Returns some quantities of interest
'''
params = {}
for i in range(len(self.coef)):
params['coef_deg' + str(len(self.coef) - i -1)] = self.coef[i]
params['std_err_deg' + str(len(self.coef) - i -1)] = self.std_err[i]
params['r_squared'] = self.r_squared
params['s_y'] = self.s_y
series = pd.Series(params)
return series
def eval(self, x):
return np.polyval(self.coef, x)
class LinFixB:
"""Linear regression class similar to PolyReg, but with degree = 0 and the y-intercept (b) fixed at 0
"""
def __init__(self, xdata, ydata) -> None:
self.xdata = xdata
self.ydata = ydata
x_col = xdata[:, np.newaxis] # x must be a column vector for np.lstsq
y_col = ydata[:, np.newaxis] # y must have same shape as x
self.coef, ss_res, rank, s = lstsq(x_col, y_col, rcond=False)
b = np.zeros([1]) # insert intercept manually
self.coef = np.append(self.coef, b)
self.cov = np.cov(xdata, ydata)
self.residuals = ydata - np.polyval(self.coef, xdata)
self.ss_res = np.sum(self.residuals**2)
self.ss_yy = np.sum((ydata) ** 2)
self.r_squared = 1 - (self.ss_res / self.ss_yy)
self.std_err = np.sqrt(np.diag(self.cov)) # Not sure if this is correct
self.s_y = np.sqrt(self.ss_res / (len(ydata) - 2)) # len(ydata) - 1 - degree of fit
self.roots = np.roots(self.coef)
def report(self):
'''
Returns some quantities of interest
'''
params = {}
for i in range(len(self.coef)):
params['coef_deg' + str(len(self.coef) - i -1)] = self.coef[i]
params['r_squared'] = self.r_squared
series = pd.Series(params)
return series
def eval(self, x):
return np.polyval(self.coef, x)
def linear_region(x, y, bounds: tuple=None, index=False):
"""Fits specified region to a line and plots it over the data
Args:
x (array): x data
y (array): y data
bounds (tuple, optional): Bounds of region to fit in units of x or array indicies if index is True. Defaults to None.
index (bool, optional): If bounds should be read as array indicies. Defaults to False.
Returns:
tuple: LinReg.PolyReg object containing linear fit, figure, axes
"""
if bounds != None:
if index is True:
bound_idx = bounds
elif index is False:
bound_idx = np.argmin(np.abs(x - bounds[0])), np.argmin(np.abs(x - bounds[1]))
elif bounds == None:
bound_idx = (0, len(x) - 1)
fit = PolyReg(x[bound_idx[0]:bound_idx[1]], y[bound_idx[0]:bound_idx[1]], 1)
fig, ax = plt.subplots()
ax.scatter(x, y, c='C0')
x_space = np.linspace(x[bound_idx[0]], x[bound_idx[1]], 1000)
ax.plot(x_space, fit.eval(x_space), c='C1')
return fit, fig, ax
```
|
{
"source": "jgagneastro/banyan_sigma",
"score": 3
}
|
#### File: jgagneastro/banyan_sigma/banyan_sigma.py
```python
import numpy as np #Numpy maths
from scipy.special import erfc
import os #Access to environment variables
import pandas as pd #Pandas dataframes will be used to store BANYAN Sigma outputs
from astropy.table import Table #Reading astro-formatted tables
import warnings #Raise user-defined Python warnings
import pdb #Debugging
from scipy.stats import describe #Useful for debugging
from scipy.special import logsumexp #Useful to sum logarithms in a numerically stable way
#A more user-friendly way to set break points
stop = pdb.set_trace
#A very small number used for numerical stability
tiny_number = 1e-318
#The total number of stars in the Besancon model within 300 pc to tranlate FPR to NFP
total_besancon_objects = 7152397.0
#Initiate some global constants
#1 AU/yr to km/s divided by 1000
kappa = 0.004743717361
#Not using "from astropy import units as u; kappa=u.au.to(u.km)/u.year.to(u.s)" because astropy defines one year as exactly 365.25 days instead of 365 days
#J2000.0 Equatorial position of the Galactic North (b=90 degrees) from Carrol and Ostlie
ra_pol = 192.8595
dec_pol = 27.12825
#J2000.0 Galactic latitude gb of the Celestial North pole (dec=90 degrees) from Carrol and Ostlie
l_north = 122.932
#Galactic Coordinates matrix
TGAL = (np.array([[-0.0548755604, -0.8734370902, -0.4838350155],
[0.4941094279, -0.4448296300, 0.7469822445],
[-0.8676661490, -0.1980763734, 0.4559837762]]))
#Initiate some secondary variables
sin_dec_pol = np.sin(np.radians(dec_pol))
cos_dec_pol = np.cos(np.radians(dec_pol))
#Main BANYAN_SIGMA routine
def banyan_sigma(stars_data=None,column_names=None,hypotheses=None,ln_priors=None,ntargets_max=1e7,ra=None,dec=None,pmra=None,pmdec=None,epmra=None,epmdec=None,dist=None,edist=None,rv=None,erv=None,psira=None,psidec=None,epsira=None,epsidec=None,plx=None,eplx=None,constraint_dist_per_hyp=None,constraint_edist_per_hyp=None,unit_priors=False,lnp_only=False,no_xyz=False,use_rv=None,use_dist=None,use_plx=None,use_psi=None,custom_models=None):
#Automatically detect Astropy Tables and transform them to pandas dataframes
if stars_data is not None:
if isinstance(stars_data,Table):
#First remove multi-dimensional columns to avoid crash
for keys in stars_data.keys():
if stars_data[keys].ndim != 1:
stars_data.remove_column(keys)
#Now transform to pandas dataframe
stars_data = stars_data.to_pandas()
#Check input consistency
if stars_data is None and (ra is None or dec is None or pmra is None or pmdec is None or epmra is None or epmdec is None):
raise ValueError('Either an input structure (stars_data) or all of the ra,dec,pmra,pmdec,epmra and epmdec keywords must be specified !')
if constraint_dist_per_hyp is not None and constraint_edist_per_hyp is None:
raise ValueError('f constraint_dist_per_hyp is specified, constraint_edist_per_hyp must also be specified !')
#Default column names
default_column_names = {'RA':'RA','DEC':'DEC','PMRA':'PMRA','PMDEC':'PMDEC','EPMRA':'EPMRA','EPMDEC':'EPMDEC'}
if use_rv is True:
default_column_names['RV'] = 'RV'
default_column_names['ERV'] = 'ERV'
if use_plx is True:
default_column_names['PLX'] = 'PLX'
default_column_names['EPLX'] = 'EPLX'
if use_dist is True:
default_column_names['DIST'] = 'DIST'
default_column_names['EDIST'] = 'EDIST'
if use_psi is True:
default_column_names['PSIRA'] = 'PSIRA'
default_column_names['PSIDEC'] = 'PSIDEC'
default_column_names['EPSIRA'] = 'EPSIRA'
default_column_names['EPSIDEC'] = 'EPSIDEC'
#Merge user-issued column data with the default values (the user-issued values take predominance)
if column_names is not None:
column_names = {**default_column_names, **column_names}
else:
column_names = default_column_names
#Check if a column named PLX, DIST, RV, PSIRA, etc. exist in stars_data but not in column_names. If this is the case, issue a warning so that the user understands that some data are not being considered.
if stars_data is not None:
if 'PLX' in stars_data.keys() and 'PLX' not in column_names.keys() and use_plx is None:
warnings.warn('Parallaxes (PLX) were not read from the input data, because the PLX key was not included in the column_names keyword of banyan_sigma(). You can also call banyan_sigma() with the use_plx=True keyword to read them, or with use_plx=False to avoid this warning message.')
if 'DIST' in stars_data.keys() and 'DIST' not in column_names.keys() and use_dist is None:
warnings.warn('Distances (DIST) were not read from the input data, because the DIST key was not included in the column_names keyword of banyan_sigma(). You can also call banyan_sigma() with the use_dist=True keyword to read them, or with use_dist=False to avoid this warning message.')
if 'RV' in stars_data.keys() and 'RV' not in column_names.keys() and use_rv is None:
warnings.warn('Radial velocities (RV) were not read from the input data, because the RV key was not included in the column_names keyword of banyan_sigma(). You can also call banyan_sigma() with use_rv=True to read them, or with use_rv=False to avoid this warning message.')
if ('PSIRA' in stars_data.keys() and 'PSIRA' not in column_names.keys()) or ('PSIDEC' in stars_data.keys() and 'PSIDEC' not in column_names.keys()) and use_psi is None:
warnings.warn('The PSI parameters (PSIRA,PSIDEC) were not read from the input data, because the PSIRA and PSIDEC keys were not included in the column_data keyword of banyan_sigma(). You can also call banyan_sigma() with use_psi=True keyword to read them, or with use_psi=False to avoid this warning message.')
#Create a table of data for BANYAN SIGMA to use
if ra is not None:
nobj = np.size(ra)
zeros = np.zeros(nobj)
data_table = pd.DataFrame({'RA':ra,'DEC':dec,'PMRA':pmra,'PMDEC':pmdec,'EPMRA':epmra,'EPMDEC':epmdec,'PSIRA':zeros,'PSIDEC':zeros,'EPSIRA':zeros,'EPSIDEC':zeros})
if ra is None:
nobj = stars_data.shape[0]
zeros = np.zeros(nobj)
data_table = pd.DataFrame({'RA':stars_data[column_names['RA']],'DEC':stars_data[column_names['DEC']],'PMRA':stars_data[column_names['PMRA']],'PMDEC':stars_data[column_names['PMDEC']],'EPMRA':stars_data[column_names['EPMRA']],'EPMDEC':stars_data[column_names['EPMDEC']],'PSIRA':zeros,'PSIDEC':zeros,'EPSIRA':zeros,'EPSIDEC':zeros})
#Fill up the data table with stars_data if it is specified
if stars_data is not None:
for keys in column_names.keys():
#Skip special keys
if (keys == 'NAME') or (keys == 'PLX') or (keys == 'EPLX'):
continue
data_table[keys] = stars_data[column_names[keys]]
if 'PLX' in column_names.keys():
data_table['DIST'] = 1e3/stars_data[column_names['PLX']]
if 'PLX' in column_names.keys() and 'EPLX' in column_names.keys():
data_table['EDIST'] = 1e3/stars_data[column_names['PLX']]**2*stars_data[column_names['EPLX']]
#Transform parallaxes to distances directly in data_table
if 'PLX' in data_table.keys() and 'EPLX' in data_table.keys():
data_table['EDIST'] = 1e3/data_table['PLX']**2*data_table['EPLX']
data_table = data_table.drop('EPLX', 1)
if 'PLX' in data_table.keys():
data_table['DIST'] = 1e3/data_table['PLX']
data_table = data_table.drop('PLX', 1)
#If measurements are specified as keywords, put them in the data table
if ra is not None:
data_table['RA'] = ra
if dec is not None:
data_table['DEC'] = dec
if pmra is not None:
data_table['PMRA'] = pmra
if pmdec is not None:
data_table['PMDEC'] = pmdec
if epmra is not None:
data_table['EPMRA'] = epmra
if epmdec is not None:
data_table['EPMDEC'] = epmdec
if plx is not None:
data_table['DIST'] = 1e3/plx
if plx is not None and eplx is not None:
data_table['EDIST'] = 1e3/plx**2*eplx
if dist is not None:
data_table['DIST'] = dist
if edist is not None:
data_table['EDIST'] = edist
if rv is not None:
data_table['RV'] = rv
if erv is not None:
data_table['ERV'] = erv
if psira is not None:
data_table['PSIRA'] = psira
if psidec is not None:
data_table['PSIDEC'] = psidec
if epsira is not None:
data_table['EPSIRA'] = epsira
if epsidec is not None:
data_table['EPSIDEC'] = epsidec
#Check for unphysical data
if np.max((data_table['RA'] < 0.) | (data_table['RA'] >= 360.)) != 0:
raise ValueError('Some RA values are unphysical')
if np.max((data_table['DEC'] < -90.) | (data_table['DEC'] > 90.)) != 0:
raise ValueError('Some DEC values are unphysical')
if np.max((data_table['EPMRA'] < 0.) | (data_table['EPMDEC'] < 0.)) != 0:
raise ValueError('Some EPMRA or EPMDEC values are unphysical')
if np.max((np.isnan(data_table['RA']) | (np.isnan(data_table['DEC'])) | (np.isnan(data_table['PMRA'])) | (np.isnan(data_table['PMDEC'])) | (np.isnan(data_table['EPMRA'])) | (np.isnan(data_table['EPMDEC'])))) != 0:
raise ValueError('The observables ra,dec,pmra,pmdec,epmra and epmdec must be specified (and finite) for each object !')
if 'RV' in data_table.keys() and 'ERV' not in data_table.keys():
raise ValueError('RV is defined in the data table but not ERV')
if 'DIST' in data_table.keys() and 'EDIST' not in data_table.keys():
raise ValueError('DIST is defined in the data table but not EDIST')
if 'ERV' in data_table.keys():
if np.max(data_table['ERV'] <= 0.):
raise ValueError('Some ERV values are unphysical')
if 'RV' in data_table.keys() and 'ERV' in data_table.keys():
if np.max(np.isfinite(data_table['RV']) & np.isnan(data_table['ERV'])):
raise ValueError('Some RV values are specified without ERV')
if 'DIST' in data_table.keys() and 'EDIST' in data_table.keys():
if np.max((data_table['DIST'] < 0.) | (data_table['EDIST'] <= 0.)):
raise ValueError('Some DIST or EDIST values are unphysical')
if np.max(np.isfinite(data_table['DIST']) & np.isnan(data_table['EDIST'])):
raise ValueError('Some DIST values are specified without EDIST')
if np.max(((data_table['PSIRA'] != 0.) | (data_table['PSIDEC'] != 0.)) & ((data_table['EPSIRA'] == 0.) | (data_table['EPSIDEC'] == 0.)) | (data_table['EPSIRA'] < 0.) | (data_table['EPSIDEC'] < 0.)):
raise ValueError('Some EPSIRA or EPSIDEC values are unphysical')
#Fill the data table with empty RVs and distances if they were not specified
if 'RV' not in data_table.keys():
data_table['RV'] = np.nan
if 'ERV' not in data_table.keys():
data_table['ERV'] = np.nan
if 'DIST' not in data_table.keys():
data_table['DIST'] = np.nan
if 'EDIST' not in data_table.keys():
data_table['EDIST'] = np.nan
if custom_models is not None:
parameters_str = custom_models
else:
#Data file containing the parameters of Bayesian hypotheses
parameters_file = os.path.dirname(__file__)+os.sep+'data'+os.sep+'banyan_sigma_parameters.fits'
#Check if the file exists
if not os.path.isfile(parameters_file):
raise ValueError('The multivariate Gaussian parameters file could not be found ! Please make sure that you did not move "'+os.sep+'data'+os.sep+'banyan_sigma_parameters.fits" from the same path as the Python file banyan_sigma.py !')
#Read the parameters of Bayesian hypotheses
parameters_str = Table.read(parameters_file,format='fits')
#Remove white spaces in names
parameters_str['NAME'] = np.chararray.strip(np.array(parameters_str['NAME']))
#Index the table by hypothesis name
parameters_str.add_index('NAME')
npar = np.size(parameters_str)
#Build a unique list of Bayesian hypotheses
if hypotheses is None:
hypotheses = np.array(parameters_str['NAME'])
indexes = np.unique(hypotheses,return_index=True)[1]
hypotheses = hypotheses[sorted(indexes)]
#Make sure that hypotheses are all upper case
#Also make sure that all hypotheses are not in bytes format
hypotheses = np.array([hyp.upper().decode('UTF-8') for hyp in hypotheses.tolist()])
nhyp = hypotheses.size
#If constraint_dist_per_hyp is set, check that all hypotheses are included
if constraint_dist_per_hyp is not None:
if sorted(constraint_dist_per_hyp.keys()) != sorted(constraint_edist_per_hyp.keys()):
raise ValueError('The tag names of constraint_dist_per_hyp and constraint_edist_per_hyp are different')
if sorted(constraint_dist_per_hyp.keys()) != sorted(hypotheses.tolist()):
raise ValueError('The tag names of constraint_dist_per_hyp and the list of Bayesian hypotheses are different')
#Build constraint_dist_per_hyp into an array
dist_per_hyp_arr = np.empty((nobj,nhyp))*np.nan
edist_per_hyp_arr = np.empty((nobj,nhyp))*np.nan
#Read the distance constraints for each Bayesian hypothesis
for i in range(nhyp):
dist_per_hyp_arr[:,i] = constraint_dist_per_hyp[hypotheses[i]]
edist_per_hyp_arr[:,i] = constraint_edist_per_hyp[hypotheses[i]]
#Verify that all distance constraints are physical
if np.max(dist_per_hyp_arr < 0. | edist_per_hyp_arr <= 0.):
raise ValueError('Some of the specified constraint_dist_per_hyp or constraint_edist_per_hyp values are unphysical')
if np.max(np.isfinite(dist_per_hyp_arr) & np.isnan(edist_per_hyp_arr)):
raise ValueError('Some of the specified constraint_edist_per_hyp are not finite where constraint_dist_per_hyp are finite')
#Check that either all or none of the distance constraints are finite for a given object
if np.max(np.isfinite(np.nansum(dist_per_hyp_arr,axis=1)) and np.isnan(np.sum(dist_per_hyp_arr,axis=1))):
raise ValueError('The constraint_dist_per_hyp and constraint_edist_per_hyp values must be all finite or all non-finite for a given star')
#Override priors to unity if the keyword unit_priors is set
if unit_priors is True:
parameters_str['LN_PRIOR'] = 0.
#Determine whether a trigonometric distance or a per-hypothesis distance constraint was set
if constraint_dist_per_hyp is not None:
distance_is_set = (np.isfinite(data_table['DIST']) | np.isfinite(np.nansum(dist_per_hyp_arr,axis=1)))
else:
distance_is_set = np.isfinite(data_table['DIST'])
#Assign the correct Bayesian priors to each star
g_pm = (np.where(np.isnan(data_table['RV']) & (~distance_is_set)))[0]
g_pm_rv = (np.where(np.isfinite(data_table['RV']) & (~distance_is_set)))[0]
g_pm_dist = (np.where(np.isnan(data_table['RV']) & distance_is_set))[0]
g_pm_rv_dist = (np.where(np.isfinite(data_table['RV']) & distance_is_set))[0]
ln_priors_nd = np.zeros((nobj,nhyp))
ln_priors_nd_manual = np.zeros((nobj,nhyp))
for i in range(nhyp):
#Skip the field hypotheses as they do not have a Bayesian prior
if hypotheses[i].find('FIELD') != -1:
continue
#Read the parameters structure to identify the 4 priors associated with a given young association
ln_priors_i = parameters_str.loc[hypotheses[i]]['LN_PRIOR']
#In the cases where only one prior is designated, assign it to all stars
if ln_priors_i.size == 1:
ln_priors_nd[:,i] = ln_priors_i[0]
else:
#Otherwise assign them properly as a function of available observables
ln_priors_nd[g_pm,i] = ln_priors_i[0]
ln_priors_nd[g_pm_rv,i] = ln_priors_i[1]
ln_priors_nd[g_pm_dist,i] = ln_priors_i[2]
ln_priors_nd[g_pm_rv_dist,i] = ln_priors_i[3]
#Include manual priors if they are specified as an input structure
if ln_priors is not None:
for i in range(nhyp):
#The field hypotheses *can* have manual priors
if hypotheses[i] not in ln_priors.keys():
warnings.warn('The prior for hypothesis '+hypotheses[i]+' was left to its default value as it was not specified manually')
continue
ln_priors_nd_manual[:,i] = ln_priors[hypotheses[i]]
#Normalize manual priors with the field hypothesis (because they get applied only on young associations)
gnorm = np.where(['FIELD' in hyp for hyp in hypotheses.tolist()])
norm_priors_1d = logsumexp(ln_priors_nd_manual[:,gnorm[0]],axis=1)
ln_priors_nd_manual -= np.tile(norm_priors_1d,(nhyp,1)).transpose()
#Apply the manual priors on top of the default priors
ln_priors_nd += ln_priors_nd_manual
#If both trigonometric distances and per-hypothesis distance constraints are set, transform the per-hypothesis distance constraints into priors
both_distances_set = []
if constraint_dist_per_hyp is not None:
both_distances_set = np.where(np.isfinite(data_table['DIST']) & np.isfinite(np.nansum(dist_per_hyp_arr,axis=1)))
if np.size(both_distances_set) != 0:
xdist_measured = np.tile(data_table['DIST'].iloc[both_distances_set[0]],(nhyp,1)).transpose()
xedist_measured = np.tile(data_table['EDIST'].iloc[both_distances_set[0]],(nhyp,1)).transpose()
ln_prob_dist_differences = -(xdist_measured-dist_per_hyp_arr[both_distances_set[0],:])**2/(2.0*(xedist_measured**2+edist_per_hyp_arr[both_distances_set[0],:]**2))
#Treat these values as priors so normalize them with the field hypotheses (because they get applied only on young associations)
gnorm = np.where(['FIELD' in hyp for hyp in hypotheses.tolist()])
norm_priors_1d = logsumexp(ln_prob_dist_differences[:,gnorm[0]],axis=1)
ln_prob_dist_differences -= np.tile(norm_priors_1d,(nhyp,1)).transpose()
#Apply these values on the priors
ln_priors_nd[both_distances_set[0],L] += ln_prob_dist_differences
#Remove the per-hypothesis distance constraints on these particular objects and just keep the trigonometric distances
dist_per_hyp_arr[both_distances_set[0],:] = np.nan
edist_per_hyp_arr[both_distances_set[0],:] = np.nan
#Initiate an array that will contain the ln probabilities if those are the only required outputs
if lnp_only is True:
all_lnprobs = np.empty((nobj,nhyp))*np.nan
#Loop on hypotheses to run BANYAN Sigma on
output_str_allhyps_list = []
for i in range(nhyp):
#print("HYP "+str(i))
#If constraint_dist_per_hyp is set, determine which distance constraint must be used now
dist_for_this_hypothesis = data_table['DIST'].values
edist_for_this_hypothesis = data_table['EDIST'].values
if constraint_dist_per_hyp is not None:
gdist_per_hyp = np.where(np.isfinite(dist_per_hyp_arr[:,i]))
dist_for_this_hypotheses[gdist_per_hyp[0]] = dist_per_hyp[gdist_per_hyp[0],i]
edist_for_this_hypotheses[gdist_per_hyp[0]] = edist_per_hyp_arr[gdist_per_hyp[0],i]
#Loop over individual multivariate Gaussians if the model is a mixture
ngauss = np.size(parameters_str.loc[hypotheses[i]])
output_str_multimodel_list = []
if lnp_only is True:
all_lnprobs_hypi = np.zeros((nobj,ngauss))
for gaussi in range(ngauss):
#Somehow we cannot access the Gaussian index without the table breaking when there is just one Gaussian component, so here we grab the right table row
if ngauss == 1:
parameters_str_row = parameters_str.loc[hypotheses[i]]
else:
parameters_str_row = parameters_str.loc[hypotheses[i]][gaussi]
#Determine how many batches will be needed to avoid saturating the RAM
nbatches = np.int(np.ceil(nobj/ntargets_max))
output_str_list = []
for ci in range(nbatches):
#Determine the indices of the stars to be selected
ind_from = np.int(np.round(ci*ntargets_max))
ind_to = np.int(ind_from + np.round(ntargets_max))
ind_to = np.minimum(ind_to,np.int(nobj))
#Create a sub-structure of input data
data_table_ci = data_table[ind_from:ind_to]
dist_for_this_hypothesis_ci = dist_for_this_hypothesis[ind_from:ind_to]
edist_for_this_hypothesis_ci = edist_for_this_hypothesis[ind_from:ind_to]
nobj_ci = np.size(data_table_ci)
#Solve the BANYAN Sigma integrals for this hypothesis and this batch of targets
output_str_ci = banyan_sigma_solve_multivar(data_table_ci['RA'].values,data_table_ci['DEC'].values,data_table_ci['PMRA'].values,data_table_ci['PMDEC'].values,data_table_ci['EPMRA'].values,data_table_ci['EPMDEC'].values,rv_measured=data_table_ci['RV'].values,rv_error=data_table_ci['ERV'].values,dist_measured=dist_for_this_hypothesis_ci,dist_error=edist_for_this_hypothesis_ci,psira=data_table_ci['PSIRA'].values,psidec=data_table_ci['PSIDEC'].values,psira_error=data_table_ci['EPSIRA'].values,psidec_error=data_table_ci['EPSIDEC'].values,precision_matrix=parameters_str_row['PRECISION_MATRIX'],center_vec=parameters_str_row['CENTER_VEC'],precision_matrix_determinant=parameters_str_row['PRECISION_DETERM'])
#Store the log of probabilities if those are the only required output
if lnp_only is True:
all_lnprobs_hypi[ind_from:ind_to,gaussi] = output_str_ci['LN_P']
continue
#Append the dataframe in the Python list
output_str_list.append(output_str_ci)
#Contatenate the list of Dataframes
output_str = pd.concat(output_str_list,ignore_index=True)
#Reformat the output structure if this hypothesis is a multivariate Gaussian mixture
if ngauss != 1:
#Use column multi-indexing to add a second title to the columns, which corresponds to the ID if the Gaussian mixture component
dataframe_column_names = output_str.columns
output_str.columns = [np.array(dataframe_column_names),np.array(np.tile('Gauss'+str(gaussi),dataframe_column_names.size))]
output_str_multimodel_list.append(output_str)
#If only log probs are required, compile them in the main array
if lnp_only is True:
if ngauss == 1:
all_lnprobs[:,i] = all_lnprobs_hypi
else:
weights = parameters_str.loc[hypotheses[i]]['COEFFICIENT']
weights /= np.sum(weights)
all_lnprobs[:,i] = logsumexp(np.tile(np.log(weights),(nobj,1))+all_lnprobs_hypi,axis=1)
continue
#Reformat the output structure if there is more than one multivariate gaussian
if ngauss != 1:
#Concatenate the list of pandas dataframes into a single dataframe
output_str_multimodel = pd.concat(output_str_multimodel_list,axis=1)
#Create a 2D array of weights to combine the Gaussian mixture components
weights = parameters_str.loc[hypotheses[i]]['COEFFICIENT']
weights /= np.sum(weights)
logweights_2d = np.tile(np.log(weights),(nobj,1))
#Combine each column of the dataframe with a weighted average
output_str = pd.DataFrame()
#Had to add a .values here
for coli in output_str_multimodel.columns.get_level_values(0):
output_str[coli] = logsumexp(logweights_2d+output_str_multimodel[coli].values,axis=1)
#Use column multi-indexing to add a second title to the columns, which corresponds to the name of the Bayesian hypothesis
dataframe_column_names = output_str.columns
output_str.columns = [np.array(dataframe_column_names),np.array(np.tile(hypotheses[i],dataframe_column_names.size))]
#Add the dataframe to the per-hypothesis list of dataframes
output_str_allhyps_list.append(output_str)
#Concatenate the list of pandas dataframes into a single dataframe
output_str_all = pd.concat(output_str_allhyps_list,axis=1)
#Fetch all log probabilities (if lnp_only is set, this variable already exists)
if lnp_only is False:
all_lnprobs = output_str_all['LN_P'].values
#Normalize probabilities directly in log space
ln_norm_output = all_lnprobs - np.tile(logsumexp(all_lnprobs,axis=1),(nhyp,1)).transpose()
#Compute [0,1] probabilities
norm_output = np.exp(ln_norm_output)
#Identify hypotheses that correspond to moving groups or associations
yind = (np.where(np.array([hypothesis.find('FIELD') == -1 for hypothesis in hypotheses])))[0]
#Create an array of normalized YMG probabilities (no field)
ln_norm_output_only_ymg = all_lnprobs[:,yind] - np.tile(logsumexp(all_lnprobs[:,yind],axis=1),(yind.size,1)).transpose()
#Calculate the weighted YMG prior
ln_prior_moving_groups = logsumexp(ln_priors_nd[:,yind]+ln_norm_output_only_ymg,axis=1)
#Identify hypotheses that correspond to the field
ffind = (np.where(np.array([hypothesis.find('FIELD') != -1 for hypothesis in hypotheses])))[0]
#Weight the priors w/r/t the Bayesian probabilities and project these priors onto the field. This is a way to avoid having the priors change the relative moving group probabilities, as their goal is strictly to maximize young association vs FIELD classification performance
#Normalize probabilities directly in log space, projecting the inverse young association prior on the field probability
ln_P_with_prior = all_lnprobs
ln_P_with_prior[:,ffind] -= np.tile(ln_prior_moving_groups,(ffind.size,1)).transpose()
#Renormalize
ln_norm_output_prior = ln_P_with_prior - np.tile(logsumexp(ln_P_with_prior,axis=1),(nhyp,1)).transpose()
#Return log probabilities if this is the only required output
if lnp_only is True:
return ln_norm_output_prior
#Compute [0,1] probabilities
norm_output_prior = np.exp(ln_norm_output_prior)
#Data file containing the parameters of Bayesian hypotheses
metrics_computed = False
metrics_file = os.path.dirname(__file__)+os.sep+'data'+os.sep+'banyan_sigma_metrics.fits'
#Check if the file exists
if not os.path.isfile(metrics_file):
warnings.warn('The performance metrics file could not be found ! Performance metrics will not be calculated. Please make sure that you did not move "'+os.sep+'data'+os.sep+'banyan_sigma_metrics.fits" from the same path as the Python file banyan_sigma.py !')
#Avoid computing biased metrics if the unit_priors keyword was set
if os.path.isfile(metrics_file) and unit_priors is False:
metrics_str = Table.read(metrics_file,format='fits')
#Remove white spaces in association names
metrics_str['NAME'] = np.chararray.strip(np.array(metrics_str['NAME']))
#Index the table by hypothesis name
metrics_str.add_index('NAME')
#Loop on young associations to determine their individual metrics
tpr = np.empty((nobj,yind.size))*np.nan
fpr = np.empty((nobj,yind.size))*np.nan
ppv = np.empty((nobj,yind.size))*np.nan
for yindi in range(yind.size):
#Calculate the individual normalized probabilities for a given young association
probs_yindi = np.exp(ln_norm_output_prior[:,yindi] - logsumexp(ln_norm_output_prior[:,[yindi,ffind[0]]],axis=1))
#Store the interpolated values depending on observables
if g_pm.size != 0:
mode_index = 0
tpr[g_pm,yindi] = np.interp(probs_yindi[g_pm],metrics_str.loc[hypotheses[yind[yindi]]]['PROBS'],metrics_str.loc[hypotheses[yind[yindi]]]['TPR'][mode_index,:])
fpr[g_pm,yindi] = np.interp(probs_yindi[g_pm],metrics_str.loc[hypotheses[yind[yindi]]]['PROBS'],metrics_str.loc[hypotheses[yind[yindi]]]['FPR'][mode_index,:])
ppv[g_pm,yindi] = np.interp(probs_yindi[g_pm],metrics_str.loc[hypotheses[yind[yindi]]]['PROBS'],metrics_str.loc[hypotheses[yind[yindi]]]['PPV'][mode_index,:])
if g_pm_rv.size != 0:
mode_index = 1
tpr[g_pm_rv,yindi] = np.interp(probs_yindi[g_pm_rv],metrics_str.loc[hypotheses[yind[yindi]]]['PROBS'],metrics_str.loc[hypotheses[yind[yindi]]]['TPR'][mode_index,:])
fpr[g_pm_rv,yindi] = np.interp(probs_yindi[g_pm_rv],metrics_str.loc[hypotheses[yind[yindi]]]['PROBS'],metrics_str.loc[hypotheses[yind[yindi]]]['FPR'][mode_index,:])
ppv[g_pm_rv,yindi] = np.interp(probs_yindi[g_pm_rv],metrics_str.loc[hypotheses[yind[yindi]]]['PROBS'],metrics_str.loc[hypotheses[yind[yindi]]]['PPV'][mode_index,:])
if g_pm_dist.size != 0:
mode_index = 2
tpr[g_pm_dist,yindi] = np.interp(probs_yindi[g_pm_dist],metrics_str.loc[hypotheses[yind[yindi]]]['PROBS'],metrics_str.loc[hypotheses[yind[yindi]]]['TPR'][mode_index,:])
fpr[g_pm_dist,yindi] = np.interp(probs_yindi[g_pm_dist],metrics_str.loc[hypotheses[yind[yindi]]]['PROBS'],metrics_str.loc[hypotheses[yind[yindi]]]['FPR'][mode_index,:])
ppv[g_pm_dist,yindi] = np.interp(probs_yindi[g_pm_dist],metrics_str.loc[hypotheses[yind[yindi]]]['PROBS'],metrics_str.loc[hypotheses[yind[yindi]]]['PPV'][mode_index,:])
if g_pm_rv_dist.size != 0:
mode_index = 3
tpr[g_pm_rv_dist,yindi] = np.interp(probs_yindi[g_pm_rv_dist],metrics_str.loc[hypotheses[yind[yindi]]]['PROBS'],metrics_str.loc[hypotheses[yind[yindi]]]['TPR'][mode_index,:])
fpr[g_pm_rv_dist,yindi] = np.interp(probs_yindi[g_pm_rv_dist],metrics_str.loc[hypotheses[yind[yindi]]]['PROBS'],metrics_str.loc[hypotheses[yind[yindi]]]['FPR'][mode_index,:])
ppv[g_pm_rv_dist,yindi] = np.interp(probs_yindi[g_pm_rv_dist],metrics_str.loc[hypotheses[yind[yindi]]]['PROBS'],metrics_str.loc[hypotheses[yind[yindi]]]['PPV'][mode_index,:])
#Build the combination weights
ln_weights = np.copy(ln_norm_output_only_ymg)
#Any group with less than 1% probability is ignored to avoid propagating potential NaNs
ln_weights[np.where(ln_weights < np.log(1e-2))] = np.log(tiny_number)
#Re-normalize weights
ln_weights -= np.tile(logsumexp(ln_weights,axis=1),(yind.size,1)).transpose()
#Calculate the weighted metrics
tpr_weighted = np.exp(logsumexp(np.log(np.maximum(tpr,tiny_number))+ln_weights,axis=1))
fpr_weighted = np.exp(logsumexp(np.log(np.maximum(fpr,tiny_number))+ln_weights,axis=1))
ppv_weighted = np.exp(logsumexp(np.log(np.maximum(ppv,tiny_number))+ln_weights,axis=1))
metrics_computed = True
#Determine the most probable hypothesis
most_probable_index = np.nanargmax(norm_output_prior,axis=1)
#Loop on objects to determine lists of good hypotheses
hyp_lists = []
best_ya = []
norm_output_only_ymg = np.exp(ln_norm_output_only_ymg)
for obji in range(nobj):
#Identify all young associations with relative P>5%
ind_obji = (np.where(norm_output_only_ymg[obji,:] > .05))[0]
if len(ind_obji) == 0:
hyp_lists.append('FIELD')
best_ya.append('FIELD')
continue
#Find the most probable moving group
best_ya_ind = np.nanargmax(norm_output_only_ymg[obji,:])
best_ya.append(hypotheses[yind][best_ya_ind])
#Sort by decreasing P
ind_obji = ind_obji[np.flip(np.argsort(norm_output_only_ymg[obji,ind_obji]),axis=0)]
#Build a list of associations
if len(ind_obji) > 1:
hyp_lists.append(';'.join([x+y for x,y in zip(hypotheses[yind][ind_obji].tolist(),['('+str(x)+')' for x in np.round(norm_output_only_ymg[obji,ind_obji]*1e2).astype(int).tolist()])]))
if len(ind_obji) == 1:
hyp_lists.append(hypotheses[yind][best_ya_ind])
#Build a final output dataframe
output_final = pd.DataFrame()
#Store the star names if they are given
if 'NAME' in data_table.keys():
output_final['NAME'] = data_table['NAME']
#Store global results
output_final['YA_PROB'] = np.nansum(norm_output_prior[:,yind],axis=1)
output_final['LIST_PROB_YAS'] = hyp_lists
output_final['BEST_HYP'] = hypotheses[most_probable_index]
output_final['BEST_YA'] = best_ya
#Add a second column title "General"
dataframe_column_names = output_final.columns
output_final.columns = [np.array(dataframe_column_names),np.array(np.tile('Global',dataframe_column_names.size))]
if metrics_computed is True:
output_final['TPR','Metrics'] = tpr_weighted
output_final['FPR','Metrics'] = fpr_weighted
output_final['PPV','Metrics'] = ppv_weighted
output_final['NFP','Metrics'] = fpr_weighted*total_besancon_objects
#Create a Dataframe with all probabilities
probs_frame = pd.DataFrame(norm_output_prior,columns=[np.array(np.tile('ALL',nhyp)),hypotheses])
#Add the per-group stuff
if metrics_computed is True:
output_final = pd.concat([output_str_all.swaplevel(axis=1),probs_frame,output_final.swaplevel(axis=1)[['Metrics']],output_final.swaplevel(axis=1)[['Global']].swaplevel(axis=1)],axis=1)
else:
output_final = pd.concat([output_str_all.swaplevel(axis=1),probs_frame,output_final.swaplevel(axis=1)[['Global']].swaplevel(axis=1)],axis=1)
#Add star names if they were provided
if 'NAME' in data_table.keys():
output_final.index = data_table['NAME']
#Return the final structure
return output_final
def banyan_sigma_solve_multivar(ra,dec,pmra,pmdec,pmra_error,pmdec_error,precision_matrix=None,center_vec=None,rv_measured=None,rv_error=None,dist_measured=None,dist_error=None,psira=None,psidec=None,psira_error=None,psidec_error=None,lnP_only=False,precision_matrix_determinant=None,debug=False):
#PROBLEM: PSIRA_ERROR AND PSIDEC_ERROR ARE NOT USED ?
"""
Solve the radial velocity and distance marginalization integrals (if needed) and compute log(probability) with Bayes theorem for an array of stars and a single multivariate Gaussian XYZUVW model. This is a subroutine of banyan_sigma.
Temporary note: multivar_model is IDL's "association_structure"
params (ra,dec): Sky position (degrees)
params (pmra,pmdec): Proper motion (mas/yr). pmra must include the cos(delta) term
params (pmra_error,pmdec_error): Measurement errors on proper motion (mas/yr)
param precision_matrix: Inverse of the covariance matrix [XYZUVW] of the multivariate Gaussian model (mixed units of pc and km/s)
param precision_matrix_determinant; [X]
param center_vec: Central XYZUVW position of the multivariate Gaussian model (mixed units of pc and km/s)
params (rv_measured,rv_error): Radial velocity measurement and error (km/s) - Optional inputs
params (dist_measured,dist_error): Distance measurement and error (pc) - Optional inputs
params (psira,psidec): Psi vector (described in Gagne et al., in preparation) describing the parallax motion of the star. This can be used to model the effect of parallax motion when a proper motion was measured from only two epochs ([mas/yr]) - Optional inputs
params (epsira,epsidec): Measurement errors of the psi vector ([mas/yr]) - Optional inputs
keyword full_statistical_errors: Compute [full statistical errors]
keyword lnP_only: Only return the ln(probability)
"""
#Check for parameter consistency
num_stars = np.size(ra)
if np.size(dec) != num_stars or np.size(pmra) != num_stars or np.size(pmdec) != num_stars or np.size(pmra_error) != num_stars or np.size(pmdec_error) != num_stars:
raise ValueError('The dimensions ra, dec, pmra, pmdec, pmra_error and pmdec_error do not agree. They must all be numpy arrays of the same length.')
#Check for radial velocity keyword consistencies
if rv_measured is not None or rv_error is not None:
if np.size(rv_measured) != num_stars or np.size(rv_error) != num_stars:
raise ValueError('The dimensions of rv_measured or rv_error do not agree with those of ra, etc. They must all be numpy arrays of the same length.')
#Check for distance keyword consistencies
if dist_measured is not None or dist_error is not None:
if np.size(dist_measured) != num_stars or np.size(dist_error) != num_stars:
raise ValueError('The dimensions of dist_measured or dist_error do not agree with those of ra, etc. They must all be numpy arrays of the same length.')
#Check for psi keyword consistencies
if psira is not None or psidec is not None or psira_error is not None or psidec_error is not None:
if np.size(psira) != num_stars or np.size(psidec) != num_stars or np.size(psira_error) != num_stars or np.size(psidec_error) != num_stars:
raise ValueError('The dimensions of psira, psidec, psira_error or psidec_error do not agree with those of ra, etc. They must all be numpy arrays of the same length.')
#Check that center_vec is a 6-elements array
if np.shape(center_vec) != (6,):
raise ValueError('center_vec must be a 6-elements numpy array.')
#Check that precision_matrix is a 6x6 matrix
if np.shape(precision_matrix) != (6, 6):
raise ValueError('precision_matrix must be a 6x6-elements numpy array.')
#Compute Galactic coordinates
(gl,gb) = equatorial_galactic(ra,dec)
#lambda is defined in Gagne et al. (2017, ApJS, X, Y, equation 7)
cos_gl = np.cos(np.radians(gl))
cos_gb = np.cos(np.radians(gb))
sin_gl = np.sin(np.radians(gl))
sin_gb = np.sin(np.radians(gb))
lambda_vector = np.array([cos_gb*cos_gl,cos_gb*sin_gl,sin_gb]).transpose()
#Build matrices A and B to convert sky quantities in the Galactic coordinates frame. The A matrix is defined in Gagne et al. (2017, ApJS, X, Y, equation 7)
A_matrix = np.zeros((num_stars,3,3))
cos_ra = np.cos(np.radians(ra))
cos_dec = np.cos(np.radians(dec))
sin_ra = np.sin(np.radians(ra))
sin_dec = np.sin(np.radians(dec))
A_matrix[:,0,0] = cos_ra * cos_dec
A_matrix[:,1,0] = sin_ra * cos_dec
A_matrix[:,2,0] = sin_dec
A_matrix[:,0,1] = -sin_ra
A_matrix[:,1,1] = cos_ra
A_matrix[:,0,2] = -cos_ra * sin_dec
A_matrix[:,1,2] = -sin_ra * sin_dec
A_matrix[:,2,2] = cos_dec
#The B matrix is not directly referenced in the BANYAN Sigma paper.
B_matrix = matrix_set_product_A_single(TGAL,A_matrix)
#The M vector is defined in Gagne et al. (2017, ApJS, X, Y, equation 7)
M_vector = matrix_vector_set_product_v_single(B_matrix,np.array([1.0,0.0,0.0]))
#The N vector is defined in Gagne et al. (2017, ApJS, X, Y, equation 7)
N_vector_sub = np.array([np.zeros(num_stars), np.array(kappa*pmra), np.array(kappa*pmdec)]).transpose()
N_vector = matrix_vector_set_product(B_matrix,N_vector_sub)
#The varphi vector is defined in Gagne et al. (2017, ApJS, X, Y, equation 20)
if psira is not None:
varphi_vector_sub = np.array([np.zeros(num_stars),np.array(kappa*psira), np.array(kappa*psidec)]).transpose()
varphi_vector = matrix_vector_set_product(B_matrix,varphi_vector_sub)
#OMEGA is defined in Gagne et al. (2017, ApJS, X, Y, equation 6)
zero_vector = np.zeros([num_stars,3])
OMEGA_vector = np.concatenate((zero_vector,M_vector),axis=1)
#GAMMA is defined in Gagne et al. (2017, ApJS, X, Y, equation 6)
GAMMA_vector = np.concatenate((lambda_vector,N_vector),axis=1)
#PHI is defined in Gagne et al. (2017, ApJS, X, Y, equation 20)
if psira is not None:
PHI_vector = np.concatenate((zero_vector,varphi_vector),axis=1)
#tau is defined in Gagne et al. (2017, ApJS, X, Y, equation 5)
TAU_vector = np.repeat(center_vec.reshape(1,6),num_stars,axis=0)
if psira is not None:
TAU_vector += PHI_vector
#Take scalar products in multivariate space
OMEGA_OMEGA = scalar_set_product_multivariate(OMEGA_vector,OMEGA_vector,precision_matrix)
GAMMA_GAMMA = scalar_set_product_multivariate(GAMMA_vector,GAMMA_vector,precision_matrix)
OMEGA_GAMMA = scalar_set_product_multivariate(OMEGA_vector,GAMMA_vector,precision_matrix)
OMEGA_TAU = scalar_set_product_multivariate(OMEGA_vector,TAU_vector,precision_matrix)
GAMMA_TAU = scalar_set_product_multivariate(GAMMA_vector,TAU_vector,precision_matrix)
TAU_TAU = scalar_set_product_multivariate(TAU_vector,TAU_vector,precision_matrix)
#If radial velocity or distance measurements are given, propagate them to the relevant scalar products
if dist_measured is not None and dist_error is not None:
#Find where measured distances are finite
finite_ind = np.where(np.isfinite(dist_measured) & np.isfinite(dist_error))
if np.size(finite_ind) != 0:
norm = np.maximum(dist_error[finite_ind],1e-3)**2
GAMMA_GAMMA[finite_ind] += 1.0/norm
GAMMA_TAU[finite_ind] += dist_measured[finite_ind]/norm
TAU_TAU[finite_ind] += dist_measured[finite_ind]**2/norm
if rv_measured is not None and rv_error is not None:
#Find where measured RVs are finite
finite_ind = np.where(np.isfinite(rv_measured) & np.isfinite(rv_error))
if np.size(finite_ind) != 0:
norm = np.maximum(rv_error[finite_ind],1e-3)**2
OMEGA_OMEGA[finite_ind] += 1.0/norm
OMEGA_TAU[finite_ind] += rv_measured[finite_ind]/norm
TAU_TAU[finite_ind] += rv_measured[finite_ind]**2/norm
#Calculate the determinant of the precision matrix unless it is given as a parameter
if precision_matrix_determinant is None:
precision_matrix_determinant = np.linalg.det(precision_matrix)
if precision_matrix_determinant <= 0:
raise ValueError('The determinant of the precision matrix bust be positive and non-zero !')
#Calculate optimal distance and radial velocity
beta = (GAMMA_GAMMA - OMEGA_GAMMA**2/OMEGA_OMEGA)/2.0
if np.nanmin(beta) < 0:
raise ValueError('beta has an ill-defined value !')
gamma = OMEGA_GAMMA*OMEGA_TAU/OMEGA_OMEGA - GAMMA_TAU
dist_optimal = (np.sqrt(gamma**2+32.0*beta) - gamma) / (4.0*beta)
rv_optimal = (4.0 - GAMMA_GAMMA*dist_optimal**2 + GAMMA_TAU*dist_optimal)/(OMEGA_GAMMA*dist_optimal)
#Create arrays that contain the measured RV and distance if available, or the optimal values otherwise
dist_optimal_or_measured = dist_optimal
if dist_measured is not None and dist_error is not None:
finite_ind = np.where(np.isfinite(dist_measured) & np.isfinite(dist_error))
if np.size(finite_ind) != 0:
dist_optimal_or_measured[finite_ind] = dist_measured[finite_ind]
rv_optimal_or_measured = rv_optimal
if rv_measured is not None and rv_error is not None:
finite_ind = np.where(np.isfinite(rv_measured) & np.isfinite(rv_error))
if np.size(finite_ind) != 0:
rv_optimal_or_measured[finite_ind] = rv_measured[finite_ind]
#Propagate proper motion measurement errors
EX = np.zeros(num_stars)
EY = np.zeros(num_stars)
EZ = np.zeros(num_stars)
(U, V, W, EU, EV, EW) = equatorial_UVW(ra,dec,pmra,pmdec,rv_optimal_or_measured,dist_optimal_or_measured,pmra_error=pmra_error,pmdec_error=pmdec_error)
#Determine by how much the diagonal of the covariance matrix must be inflated to account for the measurement errors
covariance_matrix = np.linalg.inv(precision_matrix)
covariance_diagonal = np.diag(covariance_matrix)
inflation_array = np.array([EX,EY,EZ,EU,EV,EW]).transpose()
inflation_factors = 1.0 + inflation_array**2/np.repeat(covariance_diagonal.reshape(1,6),num_stars,axis=0)
#Calculate how much the determinant of the covariance matrices must be inflated
inflation_covariance_determinant = np.exp(np.sum(np.log(inflation_factors),axis=1))
#Make sure that no matrix becomes unphysical
if np.nanmin(inflation_covariance_determinant) <= 0:
raise ValueError('At least one covariance matrix has a negative or null determinant as a consequence of the measurement errors !')
#Calculate new determinants for the precision matrices
precision_matrix_inflated_determinant = precision_matrix_determinant/inflation_covariance_determinant
#Apply this to the precision matrices
precision_matrix_inflated = matrix_set_inflation(precision_matrix, 1.0/np.sqrt(inflation_factors))
#Recalculate the scalar products with new precision matrices
OMEGA_OMEGA = scalar_set_product_multivariate_variablemetric(OMEGA_vector,OMEGA_vector,precision_matrix_inflated)
GAMMA_GAMMA = scalar_set_product_multivariate_variablemetric(GAMMA_vector,GAMMA_vector,precision_matrix_inflated)
OMEGA_GAMMA = scalar_set_product_multivariate_variablemetric(OMEGA_vector,GAMMA_vector,precision_matrix_inflated)
OMEGA_TAU = scalar_set_product_multivariate_variablemetric(OMEGA_vector,TAU_vector,precision_matrix_inflated)
GAMMA_TAU = scalar_set_product_multivariate_variablemetric(GAMMA_vector,TAU_vector,precision_matrix_inflated)
TAU_TAU = scalar_set_product_multivariate_variablemetric(TAU_vector,TAU_vector,precision_matrix_inflated)
#If radial velocity or distance measurements are given, propagate them to the relevant scalar products
if dist_measured is not None and dist_error is not None:
#Find where measured distances are finite
finite_ind = np.where(np.isfinite(dist_measured) & np.isfinite(dist_error))
if np.size(finite_ind) != 0:
norm = np.maximum(dist_error[finite_ind],1e-3)**2
GAMMA_GAMMA[finite_ind] += 1.0/norm
GAMMA_TAU[finite_ind] += dist_measured[finite_ind]/norm
TAU_TAU[finite_ind] += dist_measured[finite_ind]**2/norm
if rv_measured is not None and rv_error is not None:
#Find where measured RVs are finite
finite_ind = np.where(np.isfinite(rv_measured) & np.isfinite(rv_error))
if np.size(finite_ind) != 0:
norm = np.maximum(rv_error[finite_ind],1e-3)**2
OMEGA_OMEGA[finite_ind] += 1.0/norm
OMEGA_TAU[finite_ind] += rv_measured[finite_ind]/norm
TAU_TAU[finite_ind] += rv_measured[finite_ind]**2/norm
#Update optimal distance and radial velocity
beta = (GAMMA_GAMMA - OMEGA_GAMMA**2/OMEGA_OMEGA)/2.0
if np.nanmin(beta) < 0:
raise ValueError('beta has an ill-defined value !')
gamma = OMEGA_GAMMA*OMEGA_TAU/OMEGA_OMEGA - GAMMA_TAU
dist_optimal = (np.sqrt(gamma**2+32.0*beta) - gamma) / (4.0*beta)
rv_optimal = (4.0 - GAMMA_GAMMA*dist_optimal**2 + GAMMA_TAU*dist_optimal)/(OMEGA_GAMMA*dist_optimal)
#Calculate error bars on the optimal distance and radial velocity
edist_optimal = 1.0/np.sqrt(GAMMA_GAMMA)
erv_optimal = 1.0/np.sqrt(OMEGA_OMEGA)
#Calculate final quantities for ln probability
zeta = (TAU_TAU - OMEGA_TAU**2/OMEGA_OMEGA)/2.0
xarg = gamma/np.sqrt(2.0*beta)
lnP_coeff = -0.5*np.log(OMEGA_OMEGA) - 2.5*np.log(beta) + 0.5*np.log(precision_matrix_inflated_determinant)
lnP_part1 = xarg**2/2.0 - zeta
lnP_part2 = np.log(np.maximum(parabolic_cylinder_f5_mod(xarg),tiny_number))
lnP = lnP_coeff + lnP_part1 + lnP_part2
#Return ln_P if only this is required
if lnP_only:
return lnP
#Create arrays that contain the measured RV and distance if available, or the optimal values otherwise
dist_optimal_or_measured = dist_optimal
edist_optimal_or_measured = edist_optimal
if dist_measured is not None and dist_error is not None:
finite_ind = np.where(np.isfinite(dist_measured) & np.isfinite(dist_error))
if np.size(finite_ind) != 0:
dist_optimal_or_measured[finite_ind] = dist_measured[finite_ind]
edist_optimal_or_measured[finite_ind] = dist_error[finite_ind]
rv_optimal_or_measured = rv_optimal
erv_optimal_or_measured = erv_optimal
if rv_measured is not None and rv_error is not None:
finite_ind = np.where(np.isfinite(rv_measured) & np.isfinite(rv_error))
if np.size(finite_ind) != 0:
rv_optimal_or_measured[finite_ind] = rv_measured[finite_ind]
erv_optimal_or_measured[finite_ind] = rv_error[finite_ind]
#Calculate XYZ and UVW positions at the optimal (or measured) RV and distance
(X, Y, Z, EX, EY, EZ) = equatorial_XYZ(ra,dec,dist_optimal_or_measured,dist_error=edist_optimal_or_measured)
(U, V, W, EU, EV, EW) = equatorial_UVW(ra,dec,pmra,pmdec,rv_optimal_or_measured,dist_optimal_or_measured,pmra_error=pmra_error,pmdec_error=pmdec_error,rv_error=erv_optimal_or_measured,dist_error=edist_optimal_or_measured)
XYZUVW = np.array([X,Y,Z,U,V,W]).transpose()
EXYZUVW = np.array([EX,EY,EZ,EU,EV,EW]).transpose()
#Calculate the Mahalanobis distance from the optimal position to the Gaussian model
vec = XYZUVW - TAU_vector
mahalanobis = np.sqrt(scalar_set_product_multivariate_variablemetric(vec,vec,precision_matrix_inflated))
#Calculate the XYZ (pc) and UVW (km/s) separations from the optimal position to the center of the Gaussian model
XYZ_sep = np.sqrt(np.sum((XYZUVW[:,0:3]-TAU_vector[:,0:3])**2,axis=1))
UVW_sep = np.sqrt(np.sum((XYZUVW[:,3:6]-TAU_vector[:,3:6])**2,axis=1))
#Calculate the 3D N-sigma distances from the optimal position to the center of the Gaussian models
XYZ_sig = np.sqrt(scalar_set_product_multivariate_variablemetric(vec[:,0:3],vec[:,0:3],precision_matrix_inflated[:,0:3,0:3]))
UVW_sig = np.sqrt(scalar_set_product_multivariate_variablemetric(vec[:,3:6],vec[:,3:6],precision_matrix_inflated[:,3:6,3:6]))
#Store the data in a pandas dataframe
output_structure = pd.DataFrame(np.array([lnP,dist_optimal,rv_optimal,edist_optimal,erv_optimal,X,Y,Z,U,V,W,EX,EY,EZ,EU,EV,EW,XYZ_sep,UVW_sep,XYZ_sig,UVW_sig,mahalanobis]).transpose(),columns=['LN_P','D_OPT','RV_OPT','ED_OPT','ERV_OPT','X','Y','Z','U','V','W','EX','EY','EZ','EU','EV','EW','XYZ_SEP','UVW_SEP','XYZ_SIG','UVW_SIG','MAHALANOBIS'])
#Return the output table
return output_structure
def parabolic_cylinder_f5_mod(x):
"""
Calculates the real part of the "modified" Parabolic Cylinder Function D of index v=-5.
The regular function D(-5,x) is equivalent to the real part of:
from scipy.special import pbdv
return pbdv(-5,x)
And is equivalent to the mathematical expression:
exp(x^2/4)/24 * (sqrt(pi/2)*(x^4+6*x^2+3)*erfc(x/sqrt(2)) - exp(-x^2/2)*(x^3+5*x))
The modified parabolic cylinder does away with the exp(x^2/4) term to improve numerical stability, and instead returns:
(sqrt(pi/2)*(x^4+6*x^2+3)*erfc(x/sqrt(2)) - exp(-x^2/2)*(x^3+5*x))/24
"""
#Define shortcuts for efficiency
sqrt2 = np.sqrt(2.)
sqrt_halfpi = np.sqrt(np.pi)/sqrt2
x_over_sqrt2 = x / sqrt2
erfc_x_over_sqrt2 = erfc(x_over_sqrt2)
epsilon = np.exp(-x**2/2.0)
#Calculate the output
y = 1/24.0*(sqrt_halfpi*(x**4+6.*x**2+3.)*erfc_x_over_sqrt2 - epsilon*(x**3+5.*x))
return y
def equatorial_galactic(ra,dec):
"""Transforms equatorial coordinates (ra,dec) to Galactic coordinates (gl,gb). All inputs must be numpy arrays of the same dimension
param ra: Right ascension (degrees)
param dec: Declination (degrees)
output (gl,gb): Tuple containing Galactic longitude and latitude (degrees)
"""
#Check for parameter consistency
num_stars = np.size(ra)
if np.size(dec) != num_stars:
raise ValueError('The dimensions ra and dec do not agree. They must all be numpy arrays of the same length.')
#Compute intermediate quantities
ra_m_ra_pol = ra - ra_pol
sin_ra = np.sin(np.radians(ra_m_ra_pol))
cos_ra = np.cos(np.radians(ra_m_ra_pol))
sin_dec = np.sin(np.radians(dec))
cos_dec = np.cos(np.radians(dec))
#Compute Galactic latitude
gamma = sin_dec_pol*sin_dec + cos_dec_pol*cos_dec*cos_ra
gb = np.degrees(np.arcsin(gamma))
#Compute Galactic longitude
x1 = cos_dec * sin_ra
x2 = (sin_dec - sin_dec_pol*gamma)/cos_dec_pol
gl = l_north - np.degrees(np.arctan2(x1,x2))
gl = (gl+360.)%(360.)
#gl = np.mod(gl,360.0) might be better
#Return Galactic coordinates tuple
return (gl, gb)
def matrix_set_product_A_single(A,B):
"""Performs matrix multiplication A#B where B is a set of N matrices. This function is more performant than looping over the N matrices if N is much larger than the matrix dimension D. A and the individual Bs must be square. The columns of A are multiplied by the rows of Bs. In IDL this function is called matrix_multiply_square_act.
param A: DxD numpy array
param B: NxDxD numpy array
"""
#Verify matrix dimensions
matrix_dim = A.shape[0]
set_size = B.shape[0]
if A.shape[1] != matrix_dim or B.shape[1] != matrix_dim or B.shape[2] != matrix_dim:
raise ValueError('The dimensions D of matrices A and B do not agree - A must have dimension DxD and B must have dimension NxDxD')
if np.size(A.shape) != 2 or np.size(B.shape) != 3:
raise ValueError('The number of dimensions of matrices A and B are not valid - A must have dimension DxD and B must have dimension NxDxD')
#Initiate resulting matrix C and perform by-element matrix multiplication
C = np.zeros([set_size,matrix_dim,matrix_dim])
for i in range(0,matrix_dim):
for j in range(0,matrix_dim):
for k in range(0,matrix_dim):
C[:,i,j] += A[i,k] * B[:,k,j]
#Return the resulting matrix
return C
def matrix_vector_set_product_v_single(A,v):
"""Performs matrix-vector multiplication A#v where A is a set of matrices and v is a single vector. This function is more performant than looping over the N sets if N is much larger than the matrix-vector dimension D. A must be square. Each column of A is multiplied by the vector v in a scalar product. In IDL this function is called matrix_vector_product_vct.
param A: NxDxD numpy array
param v: D numpy array
"""
#Verify matrix dimensions
matrix_dim = A.shape[1]
set_size = A.shape[0]
if A.shape[2] != matrix_dim or v.shape[0] != matrix_dim:
raise ValueError('The dimensions D of matrix A and vector v do not agree - A must have dimension NxDxD and v must have dimension D')
if np.size(A.shape) != 3 or np.size(v.shape) != 1:
raise ValueError('The number of dimensions of matrix A vector v are not valid - A must have dimension NxDxD and v must have dimension D')
#Initiate resulting vector w and perform by-element matrix-vector multiplication
w = np.zeros([set_size,matrix_dim])
for i in range(0,matrix_dim):
for k in range(0,matrix_dim):
w[:,i] += A[:,i,k] * v[k]
#Return the resulting vector
return w
def matrix_vector_set_product(A,v):
"""
Performs matrix-vector multiplication A#v where both A and v are sets of N matrices and N vectors. This function is more performant than looping over the N sets if N is much larger than the matrix-vector dimension D. A must be square. Each column of A is multiplied by the vector v in a scalar product. In IDL this function is called matrix_vector_product.
param A: NxDxD numpy array
param v: NxD numpy array
"""
#Verify matrix dimensions
matrix_dim = A.shape[1]
set_size = A.shape[0]
if A.shape[2] != matrix_dim or v.shape[1] != matrix_dim:
raise ValueError('The dimensions D of matrix A and vector v do not agree - A must have dimension NxDxD and v must have dimension NxD')
if np.size(A.shape) != 3 or np.size(v.shape) != 2:
raise ValueError('The number of dimensions of matrix A vector v are not valid - A must have dimension NxDxD and v must have dimension NxD')
#Initiate resulting vector w and perform by-element matrix-vector multiplication
w = np.zeros([set_size,matrix_dim])
for i in range(0,matrix_dim):
for k in range(0,matrix_dim):
w[:,i] += A[:,i,k] * v[:,k]
#Return the resulting vector
return w
def scalar_set_product_multivariate(u,v,metric):
"""
Performs scalar multiplication in a non-Euclidian metric u#(metric)#v. Both u and v are sets of N vectors. This function is more performant than looping over the N vectors if N is much larger than the vector dimension D. In IDL this function is called inner_product_multi.
param u: NxD numpy array
param v: NxD numpy array
param metric: DxD numpy array
"""
#Verify matrix dimensions
matrix_dim = u.shape[1]
set_size = u.shape[0]
if v.shape[0] != set_size or v.shape[1] != matrix_dim:
raise ValueError('The dimensions of vectors u and v do not agree - both must have dimension NxD')
if metric.shape[0] != matrix_dim or metric.shape[1] != matrix_dim:
raise ValueError('The dimensions of the metric are incompatible with vectors u and v - It must have dimension DxD where u and v have dimensions NxD')
if np.size(u.shape) != 2 or np.size(v.shape) != 2 or np.size(metric.shape) != 2:
raise ValueError('The number of dimensions of vectors u, v and metric matrix are not valid - u and v must have dimension NxD and metric must have dimension DxD')
#Initiate resulting scalar w and perform by-element matrix multiplication
w = np.zeros(set_size)
for i in range(0,matrix_dim):
for j in range(0,matrix_dim):
w += u[:,i] * v[:,j] * metric[i,j]
#Return the resulting scalar
return w
def scalar_set_product_multivariate_variablemetric(u,v,metric):
"""
Performs scalar multiplication in a non-Euclidian metric u#(metric)#v. Both u and v are sets of N vectors, and "metric" is a set of matrices. This function is more performant than looping over the N vectors if N is much larger than the vector dimension D. In IDL this function is called inner_product_multi.
param u: NxD numpy array
param v: NxD numpy array
param metric: NxDxD numpy array
"""
#Verify matrix dimensions
matrix_dim = u.shape[1]
set_size = u.shape[0]
if v.shape[0] != set_size or v.shape[1] != matrix_dim:
raise ValueError('The dimensions of vectors u and v do not agree - both must have dimension NxD')
if metric.shape[0] != set_size or metric.shape[1] != matrix_dim or metric.shape[2] != matrix_dim:
raise ValueError('The dimensions of the metric are incompatible with vectors u and v - It must have dimension NxDxD where u and v have dimensions NxD')
if np.size(u.shape) != 2 or np.size(v.shape) != 2 or np.size(metric.shape) != 3:
raise ValueError('The number of dimensions of vectors u, v and metric matrix are not valid - u and v must have dimension NxD and metric must have dimension NxDxD')
#Initiate resulting scalar w and perform by-element matrix multiplication
w = np.zeros(set_size)
for i in range(0,matrix_dim):
for j in range(0,matrix_dim):
w += u[:,i] * v[:,j] * metric[:,i,j]
#Return the resulting scalar
return w
def matrix_set_inflation(A,v):
"""
Performs the inflation of the diagonal of a single matrix A with set of factors v. This is the equivalent of v#A#v.
param A: DxD numpy array
param v: NxD numpy array
"""
#Verify matrix dimensions
matrix_dim = A.shape[0]
set_size = v.shape[0]
if A.shape[1] != matrix_dim or v.shape[1] != matrix_dim:
raise ValueError('The dimensions of matrix A vector v do not agree - A must have dimension DxD and v must have dimension NxD')
if np.size(A.shape) != 2 or np.size(v.shape) != 2:
raise ValueError('The number of dimensions of matrix A or vector v are not valid - A must have dimension DxD and v must have dimension NxD')
#Calculate B = A#v
B = np.empty((set_size,matrix_dim,matrix_dim))
for i in range(0,matrix_dim):
for j in range(0,matrix_dim):
B[:,i,j] = A[i,j] * v[:,j]
#Calculate C = v#B = v#A#v
C = np.empty((set_size,matrix_dim,matrix_dim))
for i in range(0,matrix_dim):
for j in range(0,matrix_dim):
C[:,i,j] = v[:,i] * B[:,i,j]
#Return the resulting set of matrices
return C
def equatorial_XYZ(ra,dec,dist,dist_error=None):
"""
Transforms equatorial coordinates (ra,dec) and distance to Galactic position XYZ. All inputs must be numpy arrays of the same dimension.
param ra: Right ascension (degrees)
param dec: Declination (degrees)
param dist: Distance (parsec)
param dist_error: Error on distance (parsec)
output (X,Y,Z): Tuple containing Galactic position XYZ (parsec)
output (X,Y,Z,EX,EY,EZ): Tuple containing Galactic position XYZ and their measurement errors, used if any measurement errors are given as inputs (parsec)
"""
#Verify keywords
num_stars = np.size(ra)
if np.size(dec) != num_stars or np.size(dist) != num_stars:
raise ValueError('ra, dec and distance must all be numpy arrays of the same size !')
if dist_error is not None and np.size(dist_error) != num_stars:
raise ValueError('dist_error must be a numpy array of the same size as ra !')
#Compute Galactic coordinates
(gl, gb) = equatorial_galactic(ra,dec)
cos_gl = np.cos(np.radians(gl))
cos_gb = np.cos(np.radians(gb))
sin_gl = np.sin(np.radians(gl))
sin_gb = np.sin(np.radians(gb))
X = cos_gb * cos_gl * dist
Y = cos_gb * sin_gl * dist
Z = sin_gb * dist
if dist_error is not None:
#X_gb = sin_gb * cos_gl * dist * np.pi/180.
#X_gl = cos_gb * sin_gl * dist * np.pi/180.
X_dist = cos_gb * cos_gl
EX = np.abs(X_dist * dist_error)
Y_dist = cos_gb * sin_gl
EY = np.abs(Y_dist * dist_error)
Z_dist = sin_gb
EZ = np.abs(Z_dist * dist_error)
return (X, Y, Z, EX, EY, EZ)
else:
return (X, Y, Z)
def equatorial_UVW(ra,dec,pmra,pmdec,rv,dist,pmra_error=None,pmdec_error=None,rv_error=None,dist_error=None):
"""
Transforms equatorial coordinates (ra,dec), proper motion (pmra,pmdec), radial velocity and distance to space velocities UVW. All inputs must be numpy arrays of the same dimension.
param ra: Right ascension (degrees)
param dec: Declination (degrees)
param pmra: Proper motion in right ascension (milliarcsecond per year). Must include the cos(delta) term
param pmdec: Proper motion in declination (milliarcsecond per year)
param rv: Radial velocity (kilometers per second)
param dist: Distance (parsec)
param ra_error: Error on right ascension (degrees)
param dec_error: Error on declination (degrees)
param pmra_error: Error on proper motion in right ascension (milliarcsecond per year)
param pmdec_error: Error on proper motion in declination (milliarcsecond per year)
param rv_error: Error on radial velocity (kilometers per second)
param dist_error: Error on distance (parsec)
output (U,V,W): Tuple containing Space velocities UVW (kilometers per second)
output (U,V,W,EU,EV,EW): Tuple containing Space velocities UVW and their measurement errors, used if any measurement errors are given as inputs (kilometers per second)
"""
#Verify keywords
num_stars = np.size(ra)
if np.size(dec) != num_stars or np.size(pmra) != num_stars or np.size(pmdec) != num_stars or np.size(dist) != num_stars:
raise ValueError('ra, dec, pmra, pmdec, rv and distance must all be numpy arrays of the same size !')
if pmra_error is not None and np.size(pmra_error) != num_stars:
raise ValueError('pmra_error must be a numpy array of the same size as ra !')
if pmdec_error is not None and np.size(pmdec_error) != num_stars:
raise ValueError('pmdec_error must be a numpy array of the same size as ra !')
if rv_error is not None and np.size(rv_error) != num_stars:
raise ValueError('rv_error must be a numpy array of the same size as ra !')
if dist_error is not None and np.size(dist_error) != num_stars:
raise ValueError('dist_error must be a numpy array of the same size as ra !')
#Compute elements of the T matrix
cos_ra = np.cos(np.radians(ra))
cos_dec = np.cos(np.radians(dec))
sin_ra = np.sin(np.radians(ra))
sin_dec = np.sin(np.radians(dec))
T1 = TGAL[0,0]*cos_ra*cos_dec + TGAL[0,1]*sin_ra*cos_dec + TGAL[0,2]*sin_dec
T2 = -TGAL[0,0]*sin_ra + TGAL[0,1]*cos_ra
T3 = -TGAL[0,0]*cos_ra*sin_dec - TGAL[0,1]*sin_ra*sin_dec + TGAL[0,2]*cos_dec
T4 = TGAL[1,0]*cos_ra*cos_dec + TGAL[1,1]*sin_ra*cos_dec + TGAL[1,2]*sin_dec
T5 = -TGAL[1,0]*sin_ra + TGAL[1,1]*cos_ra
T6 = -TGAL[1,0]*cos_ra*sin_dec - TGAL[1,1]*sin_ra*sin_dec + TGAL[1,2]*cos_dec
T7 = TGAL[2,0]*cos_ra*cos_dec + TGAL[2,1]*sin_ra*cos_dec + TGAL[2,2]*sin_dec
T8 = -TGAL[2,0]*sin_ra + TGAL[2,1]*cos_ra
T9 = -TGAL[2,0]*cos_ra*sin_dec - TGAL[2,1]*sin_ra*sin_dec + TGAL[2,2]*cos_dec
#Calculate UVW
reduced_dist = kappa*dist
U = T1*rv + T2*pmra*reduced_dist + T3*pmdec*reduced_dist
V = T4*rv + T5*pmra*reduced_dist + T6*pmdec*reduced_dist
W = T7*rv + T8*pmra*reduced_dist + T9*pmdec*reduced_dist
#Return only (U, V, W) tuple if no errors are set
if pmra_error is None and pmdec_error is None and rv_error is None and dist_error is None:
return (U, V, W)
#Propagate errors if they are specified
if pmra_error is None:
pmra_error = np.zeros(num_stars)
if pmdec_error is None:
pmdec_error = np.zeros(num_stars)
if rv_error is None:
rv_error = np.zeros(num_stars)
if dist_error is None:
dist_error = np.zeros(num_stars)
reduced_dist_error = kappa*dist_error
#Calculate derivatives
T23_pm = np.sqrt((T2*pmra)**2+(T3*pmdec)**2)
T23_pm_error = np.sqrt((T2*pmra_error)**2+(T3*pmdec_error)**2)
EU_rv = T1 * rv_error
EU_pm = T23_pm_error * reduced_dist
EU_dist = T23_pm * reduced_dist_error
EU_dist_pm = T23_pm_error * reduced_dist_error
T56_pm = np.sqrt((T5*pmra)**2+(T6*pmdec)**2)
T56_pm_error = np.sqrt((T5*pmra_error)**2+(T6*pmdec_error)**2)
EV_rv = T4 * rv_error
EV_pm = T56_pm_error * reduced_dist
EV_dist = T56_pm * reduced_dist_error
EV_dist_pm = T56_pm_error * reduced_dist_error
T89_pm = np.sqrt((T8*pmra)**2+(T9*pmdec)**2)
T89_pm_error = np.sqrt((T8*pmra_error)**2+(T9*pmdec_error)**2)
EW_rv = T7 * rv_error
EW_pm = T89_pm_error * reduced_dist
EW_dist = T89_pm * reduced_dist_error
EW_dist_pm = T89_pm_error * reduced_dist_error
#Calculate error bars
EU = np.sqrt(EU_rv**2 + EU_pm**2 + EU_dist**2 + EU_dist_pm**2)
EV = np.sqrt(EV_rv**2 + EV_pm**2 + EV_dist**2 + EV_dist_pm**2)
EW = np.sqrt(EW_rv**2 + EW_pm**2 + EW_dist**2 + EW_dist_pm**2)
#Return measurements and error bars
return (U, V, W, EU, EV, EW)
```
|
{
"source": "jga/goldfinchsong",
"score": 3
}
|
#### File: goldfinchsong/goldfinchsong/cli.py
```python
from collections import OrderedDict
import configparser
from logging import config as log_config
from logging import getLogger
import click
from tinydb import TinyDB
from .classes import Manager
logger = getLogger(__name__)
LOGGER_CONFIG = {
'version': 1,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(asctime)s %(module)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'file': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'simple',
'filename': 'goldfinchsong.log',
'encoding': 'utf-8'
}
},
'loggers': {
'goldfinchsong': {
'handlers': ['file', 'console'],
'propagate': True,
'level': 'INFO',
},
}
}
def get_image_directory(command_line_input, active_configuration):
"""
Provides path to image directory.
Arguments:
command_line_input (str | ``None``): A path that may optionally be submitted by user. A string
or ``None`` are expected types.
active_configuration (dict): Active configuration options.
Returns:
str: A path to the image directory. Default: ``images``
"""
if command_line_input is not None:
return command_line_input
elif 'image_directory' in active_configuration:
return active_configuration['image_directory']
return 'images'
def parse_configuration(config_parser):
"""
Extracts credential and text conversion information.
Args:
config_parser: A ``ConfigParser`` from the standard library
loaded with local configuration file.
Returns:
dict: The returned dict contains twitter credentials, any text conversions, and
any image and/or log configuration information made available.
"""
active_configuration = dict()
active_configuration['credentials'] = config_parser['goldfinchsong']
if config_parser.has_section('goldfinchsong.log'):
if 'log_level' in config_parser['goldfinchsong.log']:
active_configuration['log_level'] = config_parser['goldfinchsong.log']['log_level']
LOGGER_CONFIG['loggers']['goldfinchsong']['level'] = active_configuration['log_level']
if 'log_location' in config_parser['goldfinchsong.log']:
active_configuration['log_location'] = config_parser['goldfinchsong.log']['log_location']
LOGGER_CONFIG['handlers']['file']['filename'] = active_configuration['log_location']
log_config.dictConfig(LOGGER_CONFIG)
active_configuration['text_conversions'] = None
if config_parser.has_section('goldfinchsong.conversions'):
pairings = config_parser['goldfinchsong.conversions']
text_conversions = OrderedDict()
for abbreviated, original in pairings.items():
text_conversions[original] = abbreviated
active_configuration['text_conversions'] = text_conversions
if config_parser.has_section('goldfinchsong.images'):
images_configuration = config_parser['goldfinchsong.images']
if 'image_directory' in images_configuration:
active_configuration['image_directory'] = images_configuration['image_directory']
if config_parser.has_section('goldfinchsong.db'):
db_configuration = config_parser['goldfinchsong.db']
if 'db_location' in db_configuration:
active_configuration['db_location'] = db_configuration['db_location']
return active_configuration
@click.command()
@click.option('--action', default='post')
@click.option('--conf', default='goldfinchsong.ini')
@click.option('--images', default=None)
def run(action, conf, images):
"""
Uploads an image tweet.
The :class:`~.goldfinchsong.classes.Manager` class does the actual
work; this function contributes logic for extracting configuration
settings and creating a ``TinyDB`` instance.
Arguments:
action (str): An action name.
conf (str): File path for a configuration file. By default, this
function looks for ``goldfinchsong.ini`` under the directory from
which the user executes the function.
images (str): File path to a directory with images that will be uploaded by tweets.
"""
config_parser = configparser.ConfigParser()
# make parsing of config file names case-sensitive
config_parser.optionxform = str
config_parser.read(conf)
if config_parser.has_section('goldfinchsong'):
active_configuration = parse_configuration(config_parser)
if action == 'post':
logger.info('POST requested.')
image_directory = get_image_directory(images, active_configuration)
db = TinyDB(active_configuration['db_location'])
manager = Manager(active_configuration['credentials'],
db,
image_directory,
active_configuration['text_conversions'])
content = manager.post_tweet()
logger.info('Sent POST with image {0} and text {1}'.format(content[0], content[1]))
else:
logger.error('The "{0}" action is not supported.'.format(action))
else:
logger.error('Twitter credentials and DB settings must be placed within ini file.')
```
#### File: goldfinchsong/tests/test_utils.py
```python
from collections import OrderedDict
from datetime import datetime, timezone
import unittest
from os.path import join
from tinydb import TinyDB, storages
from goldfinchsong import utils
IMAGE_NAMES = ['goldfinch1.jpg', 'goldfinch2.jpg', 'goldfinch3.jpg',
'goldfinch4.jpg', 'goldfinch5.jpg']
TEST_TEXT1 = 'This is a test of the goldfinchsong project. This test checks ' \
'abbreviations, vowel elision, length checking, and other logic. ' \
'Tests are important!'
TEST_TEXT2 = 'This is a test of the goldfinchsong project. Tests ' \
'abbreviations, vowel elision, length checking, and other logic. ' \
'Tests are important!'
class LoadContentTests(unittest.TestCase):
def test_basic_load(self):
image_directory = 'tests/images/'
db = TinyDB(storage=storages.MemoryStorage)
content = utils.load_content(db, image_directory)
full_image_path = content[0]
image_file = full_image_path.replace(image_directory, '')
status_text = content[1]
self.assertTrue(image_file in IMAGE_NAMES)
self.assertEqual(image_file.replace('.jpg', ''), status_text)
def test_storage_in_db(self):
image_directory = 'tests/images/'
# let's load a list of tweets into the db
db = TinyDB(storage=storages.MemoryStorage)
image_names = [
'goldfinch1.jpg',
'goldfinch2.jpg',
'goldfinch3.jpg',
'goldfinch4.jpg'
]
for image_name in image_names:
delivery_timestamp = datetime.now(tz=timezone.utc).isoformat()
tweet = {'image': image_name, 'delivered_on': delivery_timestamp}
db.insert(tweet)
content = utils.load_content(db, image_directory)
self.assertEqual(content[2], 'goldfinch5.jpg')
tweets = db.all()
self.assertEqual(len(tweets), 4, msg=tweets)
class UtilitiesTests(unittest.TestCase):
def test_apply_abbreviations(self):
text_conversions = {
'abbreviations': 'abbr',
'goldfinchsong': 'gf',
'important': 'impt'
}
# exhausts all conversions before reaching limit
new_text1 = utils.apply_abbreviations(TEST_TEXT1, text_conversions)
expected_text1 = 'This is a test of the gf project. This test checks ' \
'abbr, vowel elision, length checking, and other logic. ' \
'Tests are impt!'
self.assertEqual(expected_text1, new_text1)
new_text2 = utils.apply_abbreviations(TEST_TEXT2, text_conversions)
self.assertTrue(len(new_text2) <= 117)
def test_apply_vowel_elision(self):
result_text = utils.apply_vowel_elision(TEST_TEXT1)
expected_text = 'This is a tst of the gldfnchsng prjct. Ths tst chcks ' \
'abbrvtns, vwl elsn, lngth chckng, and othr lgc. Tsts ' \
'are imprtnt!'
self.assertEqual(expected_text, result_text)
def test_assemble_elided_status(self):
complete_words = ['test', 'a', 'is', 'This']
elided_words = ['systm', 'gldfnch', 'the', 'of']
result = utils.assemble_elided_status(complete_words, elided_words)
self.assertEqual('This is a test of the gldfnch systm', result)
def test_chop_words(self):
result_text = utils.chop_words(TEST_TEXT1)
expected_text = 'This is a test of the goldfinchsong project. This test checks ' \
'abbreviations, vowel elision, length checking, and'
self.assertEqual(expected_text, result_text)
def test_is_image(self):
image_files = [
'image.gif',
'image.jpg',
'image.jpeg',
'image.png',
'image.GIF',
'image.JPG',
'image.JPEG',
'image.PNG',
'image.GiF',
'image.JpG',
'image.JpEg',
'image.PnG'
]
for image_file in image_files:
self.assertTrue(utils.is_image_file(image_file))
def test_is_not_image(self):
image_files = [
'image.docx',
'image.pdf',
'image.md',
'image.html',
'image.css',
'image.odt',
'image.sh',
'image.xlsx',
'image.txt',
'image.c',
'image.py',
'image'
]
for image_file in image_files:
self.assertFalse(utils.is_image_file(image_file))
def test_trim_file_extensions(self):
image_files = [
'image.gif',
'image.jpg',
'image.jpeg',
'image.png',
'image.GIF',
'image.JPG',
'image.JPEG',
'image.PNG',
'image.GiF',
'image.JpG',
'image.JpEg',
'image.PnG'
]
for image_file in image_files:
self.assertEqual(utils.trim_file_extension(image_file), 'image')
def test_to_compact_text(self):
text_conversions = {
'abbreviations': 'abbrs',
'goldfinchsong': 'gfnch',
'important': 'importnt'
}
candidate_text1 = utils.to_compact_text(TEST_TEXT1, 100, text_conversions)
expected_text1 = 'Ths is a tst of the gfnch prjct. Ths tst chcks abbrs, ' \
'vwl elsn, lngth chckng, and othr lgc. Tsts are'
self.assertEqual(expected_text1, candidate_text1)
candidate_text2 = utils.to_compact_text(TEST_TEXT1, 50, text_conversions)
expected_text2 = 'Ths is a tst of the gfnch prjct. Ths tst chcks'
self.assertEqual(expected_text2, candidate_text2)
candidate_text3 = utils.to_compact_text(TEST_TEXT1, 20, text_conversions)
expected_text3 = 'Ths is a tst of the'
self.assertEqual(expected_text3, candidate_text3)
def test_extract_status_text(self):
conversion_data = (
('abbreviations', 'abbrs'),
('goldfinchsong', 'gfnch'),
('important', 'importnt'),
)
text_conversions = OrderedDict(conversion_data)
file = 'Some_goldfinchsong_image-file_with_a_very_long_set_of_' \
'characters_and_abbreviations_that_conveys_important_info.png'
candidate_text1 = utils.extract_status_text(file, text_conversions, maximum_length=100,)
expected_text1 = 'Some gfnch image-file with a very long set of characters and abbrs that conveys important info'
self.assertEqual(expected_text1, candidate_text1)
candidate_text2 = utils.extract_status_text(file, text_conversions, maximum_length=70,)
expected_text2 = 'Sme gfnch imge-fle wth a vry lng st of chrctrs and abbrs tht cnvys'
self.assertEqual(expected_text2, candidate_text2)
def test_get_unused_files(self):
available_files = list()
for index in range(1,101):
image_name = 'image{0}.png'.format(index)
available_files.append(image_name)
db = TinyDB(storage=storages.MemoryStorage)
for id in range(1,52):
image_name = 'image{0}.png'.format(id)
db.insert({'image': image_name})
unused_files = utils.get_unused_files(db, available_files)
self.assertEqual(len(unused_files), 49)
self.assertEqual(unused_files[0], 'image52.png')
self.assertEqual(unused_files[5], 'image57.png')
self.assertEqual(unused_files[10], 'image62.png')
self.assertEqual(unused_files[15], 'image67.png')
self.assertEqual(unused_files[20], 'image72.png')
self.assertEqual(unused_files[33], 'image85.png')
self.assertEqual(unused_files[48], 'image100.png')
def test_db_purge_when_all_posted(self):
available_files = list()
for index in range(1,101):
image_name = 'image{0}.png'.format(index)
available_files.append(image_name)
db = TinyDB(storage=storages.MemoryStorage)
for id in range(1,106):
image_name = 'image{0}.png'.format(id)
db.insert({'image': image_name})
self.assertEqual(len(db.all()), 105)
unused_files = utils.get_unused_files(db, available_files)
self.assertEqual(len(unused_files), 100)
self.assertEqual(unused_files[0], 'image1.png')
self.assertEqual(unused_files[5], 'image6.png')
self.assertEqual(unused_files[10], 'image11.png')
self.assertEqual(unused_files[33], 'image34.png')
self.assertEqual(unused_files[50], 'image51.png')
```
|
{
"source": "jgailbreath/pycardano",
"score": 3
}
|
#### File: integration-test/test/test_all.py
```python
import os
import pathlib
import tempfile
import time
import cbor2
from retry import retry
from pycardano import *
@retry(tries=10, delay=6)
def check_chain_context(chain_context):
print(f"Current chain tip: {chain_context.last_block_slot}")
class TestAll:
# Define chain context
NETWORK = Network.TESTNET
OGMIOS_WS = "ws://localhost:1337"
chain_context = OgmiosChainContext(OGMIOS_WS, Network.TESTNET)
check_chain_context(chain_context)
payment_key_path = os.environ.get("PAYMENT_KEY")
extended_key_path = os.environ.get("EXTENDED_PAYMENT_KEY")
if not payment_key_path or not extended_key_path:
raise Exception(
"Cannot find payment key. Please specify environment variable PAYMENT_KEY and extended_key_path"
)
payment_skey = PaymentSigningKey.load(payment_key_path)
payment_vkey = PaymentVerificationKey.from_signing_key(payment_skey)
extended_payment_skey = PaymentExtendedSigningKey.load(extended_key_path)
extended_payment_vkey = PaymentExtendedVerificationKey.from_signing_key(
extended_payment_skey
)
@retry(tries=2, delay=6)
def test_mint(self):
address = Address(self.payment_vkey.hash(), network=self.NETWORK)
# Load payment keys or create them if they don't exist
def load_or_create_key_pair(base_dir, base_name):
skey_path = base_dir / f"{base_name}.skey"
vkey_path = base_dir / f"{base_name}.vkey"
if skey_path.exists():
skey = PaymentSigningKey.load(str(skey_path))
vkey = PaymentVerificationKey.from_signing_key(skey)
else:
key_pair = PaymentKeyPair.generate()
key_pair.signing_key.save(str(skey_path))
key_pair.verification_key.save(str(vkey_path))
skey = key_pair.signing_key
vkey = key_pair.verification_key
return skey, vkey
tempdir = tempfile.TemporaryDirectory()
PROJECT_ROOT = tempdir.name
root = pathlib.Path(PROJECT_ROOT)
# Create the directory if it doesn't exist
root.mkdir(parents=True, exist_ok=True)
"""Generate keys"""
key_dir = root / "keys"
key_dir.mkdir(exist_ok=True)
# Generate policy keys, which will be used when minting NFT
policy_skey, policy_vkey = load_or_create_key_pair(key_dir, "policy")
"""Create policy"""
# A policy that requires a signature from the policy key we generated above
pub_key_policy_1 = ScriptPubkey(policy_vkey.hash())
# A policy that requires a signature from the extended payment key
pub_key_policy_2 = ScriptPubkey(self.extended_payment_vkey.hash())
# A time policy that disallows token minting after 10000 seconds from last block
must_before_slot = InvalidHereAfter(self.chain_context.last_block_slot + 10000)
# Combine two policies using ScriptAll policy
policy = ScriptAll([pub_key_policy_1, pub_key_policy_2, must_before_slot])
# Calculate policy ID, which is the hash of the policy
policy_id = policy.hash()
"""Define NFT"""
my_nft = MultiAsset.from_primitive(
{
policy_id.payload: {
b"MY_NFT_1": 1, # Name of our NFT1 # Quantity of this NFT
b"MY_NFT_2": 1, # Name of our NFT2 # Quantity of this NFT
}
}
)
native_scripts = [policy]
"""Create metadata"""
# We need to create a metadata for our NFTs, so they could be displayed correctly by blockchain explorer
metadata = {
721: { # 721 refers to the metadata label registered for NFT standard here:
# https://github.com/cardano-foundation/CIPs/blob/master/CIP-0010/registry.json#L14-L17
policy_id.payload.hex(): {
"MY_NFT_1": {
"description": "This is my first NFT thanks to PyCardano",
"name": "PyCardano NFT example token 1",
"id": 1,
"image": "ipfs://QmRhTTbUrPYEw3mJGGhQqQST9k86v1DPBiTTWJGKDJsVFw",
},
"MY_NFT_2": {
"description": "This is my second NFT thanks to PyCardano",
"name": "PyCardano NFT example token 2",
"id": 2,
"image": "ipfs://QmRhTTbUrPYEw3mJ<KEY>",
},
}
}
}
# Place metadata in AuxiliaryData, the format acceptable by a transaction.
auxiliary_data = AuxiliaryData(AlonzoMetadata(metadata=Metadata(metadata)))
"""Build transaction"""
# Create a transaction builder
builder = TransactionBuilder(self.chain_context)
# Add our own address as the input address
builder.add_input_address(address)
# Since an InvalidHereAfter rule is included in the policy, we must specify time to live (ttl) for this transaction
builder.ttl = must_before_slot.after
# Set nft we want to mint
builder.mint = my_nft
# Set native script
builder.native_scripts = native_scripts
# Set transaction metadata
builder.auxiliary_data = auxiliary_data
# Calculate the minimum amount of lovelace that need to hold the NFT we are going to mint
min_val = min_lovelace(Value(0, my_nft), self.chain_context)
# Send the NFT to our own address
nft_output = TransactionOutput(address, Value(min_val, my_nft))
builder.add_output(nft_output)
# Build and sign transaction
signed_tx = builder.build_and_sign(
[self.payment_skey, self.extended_payment_skey, policy_skey], address
)
print("############### Transaction created ###############")
print(signed_tx)
print(signed_tx.to_cbor())
# Submit signed transaction to the network
print("############### Submitting transaction ###############")
self.chain_context.submit_tx(signed_tx.to_cbor())
time.sleep(3)
utxos = self.chain_context.utxos(str(address))
found_nft = False
for utxo in utxos:
output = utxo.output
if output == nft_output:
found_nft = True
assert found_nft, f"Cannot find target NFT in address: {address}"
nft_to_send = TransactionOutput(
address,
Value(
20000000,
MultiAsset.from_primitive({policy_id.payload: {b"MY_NFT_1": 1}}),
),
)
builder = TransactionBuilder(self.chain_context)
builder.add_input_address(address)
builder.add_output(nft_to_send)
# Create final signed transaction
signed_tx = builder.build_and_sign([self.payment_skey], address)
print("############### Transaction created ###############")
print(signed_tx)
print(signed_tx.to_cbor())
# Submit signed transaction to the network
print("############### Submitting transaction ###############")
self.chain_context.submit_tx(signed_tx.to_cbor())
time.sleep(3)
utxos = self.chain_context.utxos(str(address))
found_nft = False
for utxo in utxos:
output = utxo.output
if output == nft_to_send:
found_nft = True
assert found_nft, f"Cannot find target NFT in address: {address}"
@retry(tries=2, delay=6)
def test_plutus(self):
# ----------- Giver give ---------------
with open("plutus_scripts/fortytwo.plutus", "r") as f:
script_hex = f.read()
forty_two_script = cbor2.loads(bytes.fromhex(script_hex))
script_hash = plutus_script_hash(forty_two_script)
script_address = Address(script_hash, network=self.NETWORK)
giver_address = Address(self.payment_vkey.hash(), network=self.NETWORK)
builder = TransactionBuilder(self.chain_context)
builder.add_input_address(giver_address)
datum = PlutusData() # A Unit type "()" in Haskell
builder.add_output(
TransactionOutput(script_address, 50000000, datum_hash=datum_hash(datum))
)
signed_tx = builder.build_and_sign([self.payment_skey], giver_address)
print("############### Transaction created ###############")
print(signed_tx)
print(signed_tx.to_cbor())
print("############### Submitting transaction ###############")
self.chain_context.submit_tx(signed_tx.to_cbor())
time.sleep(3)
# ----------- Fund taker a collateral UTxO ---------------
taker_address = Address(self.extended_payment_vkey.hash(), network=self.NETWORK)
builder = TransactionBuilder(self.chain_context)
builder.add_input_address(giver_address)
builder.add_output(TransactionOutput(taker_address, 5000000))
signed_tx = builder.build_and_sign([self.payment_skey], giver_address)
print("############### Transaction created ###############")
print(signed_tx)
print(signed_tx.to_cbor())
print("############### Submitting transaction ###############")
self.chain_context.submit_tx(signed_tx.to_cbor())
time.sleep(3)
# ----------- Taker take ---------------
redeemer = Redeemer(
RedeemerTag.SPEND, 42, ExecutionUnits(mem=10000000, steps=10000000000)
)
utxo_to_spend = self.chain_context.utxos(str(script_address))[0]
taker_address = Address(self.extended_payment_vkey.hash(), network=self.NETWORK)
builder = TransactionBuilder(self.chain_context)
builder.add_script_input(utxo_to_spend, forty_two_script, datum, redeemer)
take_output = TransactionOutput(taker_address, 25123456)
builder.add_output(take_output)
non_nft_utxo = None
for utxo in self.chain_context.utxos(str(taker_address)):
if isinstance(utxo.output.amount, int):
non_nft_utxo = utxo
break
builder.collaterals.append(non_nft_utxo)
signed_tx = builder.build_and_sign([self.extended_payment_skey], taker_address)
print("############### Transaction created ###############")
print(signed_tx)
print(signed_tx.to_cbor())
print("############### Submitting transaction ###############")
self.chain_context.submit_tx(signed_tx.to_cbor())
time.sleep(3)
utxos = self.chain_context.utxos(str(taker_address))
found = False
for utxo in utxos:
output = utxo.output
if output == take_output:
found = True
assert found, f"Cannot find target UTxO in address: {taker_address}"
```
|
{
"source": "jgaines13/looker-to-powerpoint",
"score": 3
}
|
#### File: looker-to-powerpoint/extensions/folder_to_word.py
```python
import datetime
from docx import Document
from docx.shared import Inches
from fastapi import BackgroundTasks
from main import app
from core import get_sdk_all_access, send_email, get_temp_file_name, get_output_file_name
slug = 'folder_to_word'
def write_docx_from_folder(folder_id: int):
sdk = get_sdk_all_access()
folder = sdk.folder(folder_id)
looks = folder.looks
timestamp = datetime.datetime.now()
document = Document()
document.sections[0].header.paragraphs[0].text = folder.name
document.sections[0].footer.paragraphs[0].text = f'Created {timestamp}'
titles = []
pages = len(looks) - 1
for idx, look in enumerate(looks):
image = sdk.run_look(look.id, 'png')
image_file = get_temp_file_name(
slug,
'.'.join(['look', str(look.id), 'png'])
)
with open(image_file, 'wb') as file:
file.write(image)
document.add_heading(look.title, 0)
document.add_paragraph(look.description)
document.add_picture(image_file, width=Inches(5))
if idx < pages:
document.add_page_break()
word_file_name = get_output_file_name(
slug,
'.'.join([folder.name, 'docx']),
timestamp=True
)
document.save(word_file_name)
@app.get('/extensions/%s/folder/{folder_id}' % slug, status_code=202)
def endpoint(folder_id: int, background_tasks: BackgroundTasks):
"""Creates Word document from all looks in a given folder"""
background_tasks.add_task(write_docx_from_folder, folder_id)
return {'message': 'Generating DOCS in background'}
@app.get(f'/extensions/{slug}/folders', status_code=200)
def folders():
"""Folders endpoint for Folder to Word action: returns list of all folders. Does not include root
folders i.e. Shared, Users and LookML."""
sdk = get_sdk_all_access()
all_folders = sdk.all_folders()
folders = []
for folder in all_folders:
if folder.name not in ['Shared', 'Users', 'lookml']:
folders.append({
folder.id: folder.name
})
return {'folders': folders}
```
|
{
"source": "jgalar/reml",
"score": 2
}
|
#### File: reml/reml/babeltrace2.py
```python
import logging
import re
from reml.project import Project, Version
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Babeltrace2Project(Project):
def __init__(self) -> None:
self._name = "Babeltrace2"
self._changelog_project_name = "Babeltrace"
super().__init__()
@staticmethod
def _is_release_series_valid(series: str) -> bool:
try:
tokenized_version = series.split(".")
if len(tokenized_version) != 2:
return False
if int(tokenized_version[0]) != 2:
return False
return True
except:
# Any error is the result of an unexpected release series format anyhow.
return False
def _ci_release_job_name(self, version):
series = "{}.{}".format(version.major, version.minor)
return "babeltrace_v{}_release".format(series)
def _update_version(self, new_version: Version) -> None:
with open(self._repo_base_path + "/configure.ac", "r") as original:
original_contents = original.read()
new_contents = re.sub(
r"^m4_define\(\[bt_version_major\],.*\)$",
"m4_define([bt_version_major], [{}])".format(new_version.major),
original_contents,
flags=re.MULTILINE,
)
new_contents = re.sub(
r"^m4_define\(\[bt_version_minor\],.*\)$",
"m4_define([bt_version_minor], [{}])".format(new_version.minor),
new_contents,
flags=re.MULTILINE,
)
new_contents = re.sub(
r"^m4_define\(\[bt_version_patch\],.*\)$",
"m4_define([bt_version_patch], [{}])".format(new_version.patch),
new_contents,
flags=re.MULTILINE,
)
with open(self._repo_base_path + "/configure.ac", "w") as new:
new.write(new_contents)
def _get_release_name(self) -> str:
with open(self._repo_base_path + "/configure.ac", "r") as original:
contents = original.read()
return re.search(
r"^m4_define\(\[bt_version_name\], \[(.*)\]\)*$",
contents,
flags=re.MULTILINE,
).group(1)
def _commit_and_tag(self, new_version: Version) -> None:
release_name = self._get_release_name()
commit_msg = 'Release: Babeltrace {}.{}.{} "{}"'.format(
new_version.major, new_version.minor, new_version.patch, release_name
)
self._repo.git.add("ChangeLog")
self._repo.git.commit("-s", "-m" + commit_msg)
self._repo.git.tag(
"-s",
"v{}".format(str(new_version)),
"-m Version {}".format(str(new_version)),
)
new_version = Version(
new_version.major, new_version.minor, new_version.patch + 1
)
commit_msg = "Update working version to Babeltrace v{}".format(str(new_version))
self._update_version(new_version)
self._repo.git.add("configure.ac")
self._repo.git.commit("-s", "-m" + commit_msg)
```
#### File: reml/reml/lttngtools.py
```python
import logging
import re
from reml.project import Project, Version
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class LTTngToolsProject(Project):
def __init__(self) -> None:
self._name = "LTTng-tools"
self._changelog_project_name = "lttng-tools"
super().__init__()
@staticmethod
def _is_release_series_valid(series: str) -> bool:
try:
tokenized_version = series.split(".")
if len(tokenized_version) != 2:
return False
if int(tokenized_version[0]) != 2:
return False
return True
except:
# Any error is the result of an unexpected release series format anyhow.
return False
def _update_version(self, new_version: Version) -> None:
with open(self._repo_base_path + "/configure.ac", "r") as original:
contents = original.read()
exp = re.compile(r"AC_INIT.*")
span = exp.search(contents).span()
with open(self._repo_base_path + "/configure.ac", "w") as new:
new.write(contents[0 : span[0]])
new.write(
"AC_INIT([lttng-tools],[{}],[<EMAIL>],[],[https://lttng.org])".format(
str(new_version)
)
)
new.write(contents[span[1] :])
def _commit_and_tag(self, new_version: Version) -> None:
self._update_version(new_version)
self._repo.git.add("ChangeLog", "configure.ac")
commit_msg = "Update version to v{}".format(str(new_version))
self._repo.git.commit("-s", "-m" + commit_msg)
self._repo.git.tag(
"-s",
"v{}".format(str(new_version)),
"-m Version {}".format(str(new_version)),
)
```
#### File: reml/reml/project.py
```python
import re
import git
import glob
import tempfile
import shutil
import subprocess
import jenkinsapi
import requests
import time
import hashlib
import os
from enum import Enum
from click import style, echo, confirm, progressbar
from datetime import date
from typing import Optional
import reml.config
class ReleaseType(Enum):
STABLE = 1
RELEASE_CANDIDATE = 2
class InvalidReleaseSeriesError(Exception):
def __init__(self) -> None:
super().__init__()
class InvalidReleaseTypeError(Exception):
def __init__(self) -> None:
super().__init__()
class InvalidReleaseRebuildOptionError(Exception):
def __init__(self) -> None:
super().__init__()
class UnexpectedTagNameError(Exception):
def __init__(self) -> None:
super().__init__()
class AbortedRelease(Exception):
def __init__(self) -> None:
super().__init__()
class Version:
def __init__(
self, major: int, minor: int, patch: int, rc: Optional[int] = None
) -> None:
self._major = major
self._minor = minor
self._patch = patch
self._rc = rc
def __str__(self) -> str:
version_string = "{}.{}.{}".format(self._major, self._minor, self._patch)
if self._rc:
version_string = version_string + "-rc" + str(self._rc)
return version_string
@property
def major(self) -> int:
return self._major
@property
def minor(self) -> int:
return self._minor
@property
def patch(self) -> int:
return self._patch
@property
def rc(self) -> Optional[int]:
return self._rc
class ReleaseDescriptor:
def __init__(self, project_name: str, version: Version, path: str) -> None:
self._project_name = project_name
self._version = version
self._path = path
@property
def name(self) -> str:
return "{} {}".format(self._project_name, str(self._version))
@property
def version(self) -> Version:
return self._version
@property
def path(self) -> str:
return self._path
class ReleaseArtifact:
def __init__(self, name: str, url: str) -> None:
self._name = name
self._url = url
self._dir = tempfile.mkdtemp()
echo(
style("Fetching ")
+ style(self._name, fg="white", bold=True)
+ style("..."),
nl=False,
)
remote = requests.get(self._url)
artifact_path = os.path.join(self._dir, self._name)
with open(artifact_path, "wb") as new_file:
new_file.write(remote.content)
echo(style("✓", fg="green", bold=True))
echo(
style("Hashing ") + style(self._name, fg="white", bold=True) + style("..."),
nl=False,
)
md5 = hashlib.md5()
sha1 = hashlib.sha1()
sha256 = hashlib.sha256()
with open(artifact_path, "rb") as tarball:
content = tarball.read()
md5.update(content)
sha1.update(content)
sha256.update(content)
with open(artifact_path + ".md5", "w") as md5file:
md5file.write("{} {}\n".format(md5.hexdigest(), self._name))
with open(artifact_path + ".sha1", "w") as sha1file:
sha1file.write("{} {}\n".format(sha1.hexdigest(), self._name))
with open(artifact_path + ".sha256", "w") as sha256file:
sha256file.write("{} {}\n".format(sha256.hexdigest(), self._name))
echo(style("✓", fg="green", bold=True))
echo(
style("Signing ") + style(self._name, fg="white", bold=True) + style("..."),
nl=False,
)
subprocess.call(["gpg", "--armor", "-b", artifact_path])
echo(style("✓", fg="green", bold=True))
def upload(self, location: str) -> None:
echo(
style("Uploading artifacts... "),
nl=False,
)
for filename in os.listdir(self._dir):
if not filename.startswith(self._name):
continue
path = os.path.join(self._dir, filename)
subprocess.call(["rsync", path, location + "/"])
echo(style("✓", fg="green", bold=True))
class Project:
def __init__(self) -> None:
self._repo = None
self._workdir = tempfile.mkdtemp()
self._repo_base_path = None
self._config = reml.config.get_project_config(self.name)
try:
self._git_urls = self._config["git_urls"]
if isinstance(self._git_urls, str):
self._git_urls = [self._git_urls]
self._ci_url = self._config["ci_url"]
self._ci_user = self._config["ci_user"]
self._ci_token = self._config["ci_token"]
self._upload_location = self._config["upload_location"]
except KeyError as e:
raise reml.config.MissingConfigurationAttributeError(self.name, e.args[0])
@property
def name(self) -> str:
return self._name
@property
def changelog_project_name(self) -> str:
return getattr(self, "_changelog_project_name", self.name)
@staticmethod
def _is_release_series_valid(series: str) -> bool:
raise NotImplementedError()
@staticmethod
def _branch_name_from_series(series: str) -> str:
return "stable-" + series
@staticmethod
def _version_from_tag(tag_name: str) -> Version:
exp = re.compile(r"v(\d*)\.(\d*)\.(\d*)$")
exp_rc = re.compile(r"v(\d*)\.(\d*)\.(\d*)-rc(\d*)")
rc = None
if exp.match(tag_name):
major, minor, patch = exp.match(tag_name).groups()
else:
if exp_rc.match(tag_name) is None:
raise UnexpectedTagNameError()
else:
major, minor, patch, rc = exp_rc.match(tag_name).groups()
major = int(major)
minor = int(minor)
patch = int(patch)
rc = None if rc is None else int(rc)
return Version(major, minor, patch, rc)
@staticmethod
def _version_from_series(series: str) -> Version:
exp = re.compile(r"(\d*)\.(\d*)")
if exp.match(series):
major, minor = exp.match(series).groups()
else:
raise InvalidReleaseSeriesError()
return Version(major, minor, 0, None)
@staticmethod
def _tag_from_version(version: Version) -> str:
return "v" + str(version)
@staticmethod
def _is_build_running(build: jenkinsapi.build) -> bool:
build_status_update_try_count = 20
while build_status_update_try_count >= 0:
try:
is_running = build.is_running()
return is_running
except requests.exceptions.ConnectionError:
build_status_update_try_count = build_status_update_try_count - 1
if build_status_update_try_count == 0:
raise
def _ci_release_job_name(self, version):
series = "{}.{}".format(version.major, version.minor)
return "{}_v{}_release".format(self.name.lower(), series)
def _update_version(self, new_version: Version) -> None:
raise NotImplementedError()
def _commit_and_tag(self, new_version: Version) -> None:
raise NotImplementedError()
def _clone_repo(self) -> None:
echo("Cloning upstream {} repository... ".format(self.name), nl=False)
git.Git(self._workdir).clone(self._git_urls[0])
self._repo_base_path = glob.glob(self._workdir + "/*/")[0]
self._repo = git.Repo(self._repo_base_path)
echo(style("✓", fg="green", bold=True))
for git_url in self._git_urls[1:]:
self._repo.git.remote("set-url", "--add", "origin", git_url)
def _branch_exists(self, branch_name: str) -> bool:
for ref in self._repo.refs:
if ref.name == ("origin/" + branch_name):
return True
return False
def _set_current_branch(self, branch_name: str, create_branch: bool) -> None:
if create_branch:
echo("Switching to new branch " + branch_name)
self._repo.git.checkout("-b", branch_name)
else:
echo("Switching to branch " + branch_name)
self._repo.git.checkout(branch_name)
def _latest_tag_name(self) -> str:
return self._repo.git.describe("--abbrev=0")
def _update_changelog(self, new_version: Version, tagline: str):
echo("Updating ChangeLog... ".format(self.name), nl=False)
latest_tag_name = self._latest_tag_name()
for ref in self._repo.refs:
if ref.name == latest_tag_name:
latest_tag_sha = ref.commit.hexsha
break
today = date.today()
title = "{}-{:02d}-{:02d} {} {}".format(
today.year,
today.month,
today.day,
self.changelog_project_name,
str(new_version),
)
if tagline:
title = title + " ({})".format(tagline)
title = title + "\n"
changelog_new_section = [title]
for commit in self._repo.iter_commits():
if commit.hexsha == latest_tag_sha:
break
entry = "\t* {}\n".format(commit.summary)
changelog_new_section.append(entry)
with open(self._repo_base_path + "/ChangeLog", "r") as original:
contents = original.read()
with open(self._repo_base_path + "/ChangeLog", "w") as modified:
for entry in changelog_new_section:
modified.write(entry)
modified.write("\n")
modified.write(contents)
echo(style("✓", fg="green", bold=True))
def _publish(self, branch_name: str) -> None:
echo("Pushing new release... ".format(self.name), nl=False)
self._repo.git.push("origin", branch_name + ":" + branch_name, "--tags")
echo(style("✓", fg="green", bold=True))
def _generate_artifact(self, version: Version) -> str:
job_name = self._ci_release_job_name(version)
echo(
style("Launching build job ")
+ style(job_name, fg="white", bold=True)
+ style("... "),
nl=False,
)
server = jenkinsapi.jenkins.Jenkins(self._ci_url, self._ci_user, self._ci_token)
# jenkinsapi 0.3.11 does not handle timeouts nor does it allow
# retries. This may be changed in 0.3.12.
# See: https://github.com/pycontribs/jenkinsapi/issues/767
#
# Meanwhile, simply retry and hope for the best.
create_job_try_count = 20
while create_job_try_count >= 0:
try:
job = server.create_job(job_name, None)
break
except requests.exceptions.ConnectionError:
create_job_try_count = create_job_try_count - 1
if create_job_try_count == 0:
raise
queue_item = job.invoke()
echo(style("✓", fg="green", bold=True))
echo(
style("Waiting for job ")
+ style(job_name, fg="white", bold=True)
+ style(" to be scheduled... "),
nl=False,
)
while True:
try:
queue_item.poll()
build = queue_item.get_build()
break
except jenkinsapi.custom_exceptions.NotBuiltYet:
time.sleep(1)
continue
echo(style("✓", fg="green", bold=True))
estimated_duration_secs = int(build.get_estimated_duration())
delay_secs = 1
with progressbar(
length=estimated_duration_secs,
show_eta=True,
label="Building on " + build.get_slave(),
) as progress:
last_update_time = time.monotonic()
while self._is_build_running(build):
time.sleep(delay_secs)
now = time.monotonic()
progress.update(now - last_update_time)
last_update_time = now
build_status = build.poll()
if build_status["result"] != "SUCCESS":
echo(style("Build failed 🤯", fg="red", bold=True))
raise AbortedRelease()
if len(build.get_artifact_dict()) != 1:
echo(
style(
"Unexpected artifacts generated by the release job 🤯",
fg="red",
bold=True,
)
)
echo("Artifacts: " + str(build.get_artifact_dict()))
raise AbortedRelease()
echo(
style("Getting artifact URL... "),
nl=False,
)
artifact = next(iter(build.get_artifacts()))
echo(style(artifact.url, fg="white", bold=True))
return ReleaseArtifact(artifact.filename, artifact.url)
def release(
self,
series: str,
tagline: str,
dry: bool,
rebuild: bool,
release_type: ReleaseType,
) -> str:
if not self._is_release_series_valid(series):
raise InvalidReleaseSeriesError()
self._clone_repo()
branch_name = self._branch_name_from_series(series)
branch_exists = self._branch_exists(branch_name)
if rebuild:
if not branch_exists:
raise InvalidReleaseRebuildOptionError()
self._set_current_branch(branch_name, False)
latest_tag_name = self._latest_tag_name()
release_version = self._version_from_tag(latest_tag_name)
echo(
style("Rebuilding artifact of version ")
+ style(str(release_version), fg="white", bold=True)
)
elif not branch_exists:
echo(
"Branch "
+ style(branch_name, fg="white", bold=True)
+ " does not exist"
)
if release_type != ReleaseType.RELEASE_CANDIDATE:
raise InvalidReleaseTypeError()
release_version = self._version_from_series(series)
release_version = Version(
release_version.major, release_version.minor, 0, 1
)
else:
echo(
"Branch "
+ style(branch_name, fg="white", bold=True)
+ " already exists"
)
self._set_current_branch(branch_name, False)
latest_tag_name = self._latest_tag_name()
latest_version = self._version_from_tag(latest_tag_name)
major = latest_version.major
if release_type is ReleaseType.RELEASE_CANDIDATE:
minor = latest_version.minor
patch = 0
rc = latest_version.rc + 1
else:
if latest_version.rc is not None:
patch = 0
else:
patch = latest_version.patch + 1
minor = latest_version.minor
rc = None
release_version = Version(major, minor, patch, rc)
echo(
style("Updating version from ")
+ style(str(latest_version), fg="white", bold=True)
+ style(" to ")
+ style(str(release_version), fg="white", bold=True)
)
if not rebuild:
self._update_changelog(release_version, tagline)
self._commit_and_tag(release_version)
if not branch_exists:
self._set_current_branch(branch_name, True)
if (
confirm(
style("Publish tree at ")
+ style(self._repo_base_path, fg="white", bold=True)
+ style(" ?")
)
and not dry
):
self._publish(branch_name)
else:
raise AbortedRelease()
artifact = self._generate_artifact(release_version)
artifact.upload(self._upload_location)
return ReleaseDescriptor(self.name, release_version, self._repo_base_path)
```
|
{
"source": "jgalar/vlttng",
"score": 2
}
|
#### File: vlttng/vlttng/profile.py
```python
import yaml
import copy
class UnknownSourceFormat(Exception):
def __init__(self, source):
self._source = source
@property
def source(self):
return self._source
class InvalidProfile(Exception):
pass
class ParseError(Exception):
pass
class InvalidOverride(Exception):
pass
class GitSource:
def __init__(self, clone_url, checkout):
self._clone_url = clone_url
self._checkout = checkout
@property
def clone_url(self):
return self._clone_url
@property
def checkout(self):
return self._checkout
class HttpFtpSource:
def __init__(self, url):
self._url = url
@property
def url(self):
return self._url
class Project:
def __init__(self, name, source, configure, build_env):
self._name = name
self._source = source
self._configure = configure
self._build_env = build_env
@property
def name(self):
return self._name
@property
def source(self):
return self._source
@property
def configure(self):
return self._configure
@configure.setter
def configure(self, value):
self._configure = value
@property
def build_env(self):
return self._build_env
class Profile:
def __init__(self, virt_env, build_env, projects):
self._virt_env = virt_env
self._build_env = build_env
self._projects = projects
@property
def virt_env(self):
return self._virt_env
@property
def build_env(self):
return self._build_env
@property
def projects(self):
return self._projects
class Override:
OP_REPLACE = 'replace'
OP_APPEND = 'append'
OP_REMOVE = 'remove'
def __init__(self, path, op, rep):
if len(path) is 0:
raise InvalidOverride('Empty override path')
self._path = path
self._op = op
self._rep = rep
@property
def path(self):
return self._path
@property
def op(self):
return self._op
@property
def rep(self):
return self._rep
def apply(self, node):
# find the root node in which to override a property and the
# remaining path from where the existing property is found
common_node = node
remaining_path = self._path
for index, key in enumerate(self._path[:-1]):
if key in common_node:
common_node = common_node[key]
if not isinstance(common_node, dict):
fmt = 'Cannot override a non-associative array property ("{}") with an associative array'
raise InvalidOverride(fmt.format(key))
remaining_path = self._path[index + 1:]
continue
else:
break
# create new property from remaining path
cur_node = common_node
prop_key = remaining_path[-1]
if self._op in (Override.OP_REPLACE, Override.OP_APPEND):
if remaining_path:
for key in remaining_path[:-1]:
cur_node[key] = {}
cur_node = cur_node[key]
if prop_key not in cur_node:
cur_node[prop_key] = ''
# apply
if self._op == Override.OP_REPLACE:
cur_node[prop_key] = self._rep
elif self._op == Override.OP_APPEND:
if prop_key == 'configure':
cur_node[prop_key] += ' '
cur_node[prop_key] += self._rep
elif self._op == Override.OP_REMOVE:
del cur_node[prop_key]
def _source_from_project_node(project_node):
source = project_node['source']
if source.startswith('git://') or source.endswith('.git') or 'checkout' in project_node:
checkout = 'master'
if 'checkout' in project_node:
checkout_node = project_node['checkout']
if checkout_node is not None:
checkout = checkout_node
return GitSource(source, checkout)
if source.startswith('http://') or source.startswith('https://') or source.startswith('ftp://'):
return HttpFtpSource(source)
raise UnknownSourceFormat(source)
def _merge_envs(enva, envb):
env = copy.deepcopy(enva)
env.update(envb)
return env
def _project_from_project_node(name, project_node, base_build_env):
source = _source_from_project_node(project_node)
configure = ''
build_env = {}
if 'configure' in project_node:
configure_node = project_node['configure']
if configure_node is not None:
configure = str(configure_node)
if 'build-env' in project_node:
build_env_node = project_node['build-env']
if build_env_node is not None:
build_env = _merge_envs(base_build_env, build_env_node)
else:
build_env = copy.deepcopy(base_build_env)
return Project(name, source, configure, build_env)
def _validate_projects(projects):
valid_project_names = (
'babeltrace',
'babeltrace2',
'elfutils',
'glib',
'libxml2',
'lttng-analyses',
'lttng-modules',
'lttng-scope',
'lttng-tools',
'lttng-ust',
'popt',
'tracecompass',
'urcu',
)
for name in projects:
if name not in valid_project_names:
raise InvalidProfile('Unknown project name: "{}"'.format(name))
def _merge_nodes(base, patch):
if isinstance(base, dict) and isinstance(patch, dict):
for k, v in patch.items():
if isinstance(v, dict) and k in base:
_merge_nodes(base[k], v)
else:
if k == 'configure' and type(v) is str:
if k not in base:
base[k] = ''
base[k] += ' {}'.format(v)
else:
base[k] = v
def _from_yaml_files(paths, ignored_projects, overrides, verbose):
root_node = {}
for path in paths:
with open(path) as f:
patch_root_node = yaml.load(f, Loader=yaml.FullLoader)
_merge_nodes(root_node, patch_root_node)
for override in overrides:
override.apply(root_node)
if verbose:
print('Effective profile:')
print()
print(yaml.dump(root_node, explicit_start=True, explicit_end=True,
indent=2, default_flow_style=False))
build_env = root_node.get('build-env', {})
virt_env = root_node.get('virt-env', {})
projects = {}
for name, project_node in root_node['projects'].items():
if name in ignored_projects:
continue
if project_node is None:
continue
project = _project_from_project_node(name, project_node, build_env)
projects[name] = project
_validate_projects(projects)
return Profile(virt_env, build_env, projects)
def from_yaml_files(paths, ignored_projects, overrides, verbose):
try:
return _from_yaml_files(paths, ignored_projects, overrides, verbose)
except (UnknownSourceFormat, InvalidProfile):
raise
except Exception as e:
raise ParseError() from e
```
|
{
"source": "JGalego/XrayPy",
"score": 3
}
|
#### File: src/xraypy/xrayapi.py
```python
import os
import re
import base64
import logging
import json
import requests
from requests.auth import HTTPBasicAuth
from xraypy import xrayutils
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "mit"
LOGGER = logging.getLogger(__name__)
JIRA_REST_API = "/rest/api/2"
XRAY_REST_API = "/rest/raven/1.0"
class XrayApiClient(object):
"""Wrapper for Xray's REST API Client"""
def __init__(self, xray_properties):
"""XrayApiClient constructor
:param xray_properties (str) path to the Xray properties JSON file
"""
# Initialize LOGGER
xrayutils.setup_logging(logging.INFO)
# Load Xray properties
with open(xray_properties) as file_props:
self.properties = json.load(file_props)
def get_project_info(self, project):
"""Returns the project info
:param project (str) project key or ID
"""
# Create URL
url = self.properties["host"] + JIRA_REST_API + "/project/" + project
# Make request
LOGGER.info("GET %s", url)
response = requests.get(url, \
auth=HTTPBasicAuth(self.properties["username"], \
self.properties["password"]))
# Check status code
if response.status_code != 200:
LOGGER.error("%d: Failed to get project info", response.status_code)
return json.loads(response.text)
def get_project_id(self, project_key):
"""Returns the project ID
:param project_key (str) project key
"""
project_info = self.get_project_info(project_key)
return project_info["id"]
def get_issue_info(self, issue):
"""Returns information about a JIRA issue
:param issue (str) issue key or id
"""
# Create URL
url = self.properties["host"] + JIRA_REST_API + "/issue/" + issue
# Make request
LOGGER.info("GET %s", url)
if "username" in self.properties and "password" in self.properties:
response = requests.get(url, \
auth=HTTPBasicAuth(self.properties["username"], \
self.properties["password"]))
else:
response = requests.get(url)
# Check status code
if response.status_code != 200:
LOGGER.error("%d: Failed to get issue info", response.status_code)
return json.loads(response.text)
def save_test_run(self, test_execution_key, test_run_info):
"""Saves a test run in the context of a test execution
:param test_execution_key (str) test execution key
:param test_run (dict) test run info
"""
# Create URL
url = self.properties["host"] + XRAY_REST_API + "/import/execution"
# Create test run payload
test_run = {'testExecutionKey': test_execution_key, 'tests': [test_run_info]}
# Set up headers
headers = {'content-type': 'application/json'}
# Make request
LOGGER.info("POST %s", url)
response = requests.post(url, \
data=json.dumps(test_run), \
headers=headers, \
auth=HTTPBasicAuth(self.properties["username"], \
self.properties["password"]))
# Check status code
if response.status_code != 201:
LOGGER.error("%d: Failed to save test run", response.status_code)
return json.loads(response.text)
def get_test_run_info(self, test_execution_key, test_key):
"""Returns information about a test run
:param test_execution_key (str) test execution key
:param test_key (str) test key
"""
# Create URL
url = self.properties["host"] + XRAY_REST_API + \
"/api/testrun?testExecIssueKey=" + test_execution_key + \
"&testIssueKey=" + test_key
# Make request
LOGGER.info("GET %s", url)
response = requests.get(url, \
auth=HTTPBasicAuth(self.properties["username"], \
self.properties["password"]))
# Check status code
if response.status_code != 200:
LOGGER.error("%d: Failed to get test run info", response.status_code)
return json.loads(response.text)
def create_issue(self, issue):
"""Create an issue in JIRA
:param issue (JSON) issue payload
"""
# Create URL
url = self.properties["host"] + JIRA_REST_API + "/issue"
# Set up headers
headers = {'content-type': 'application/json'}
# Make request
LOGGER.info("POST %s", url)
response = requests.post(url, \
data=json.dumps(issue), \
headers=headers, \
auth=HTTPBasicAuth(self.properties["username"], \
self.properties["password"]))
# Check status code
if response.status_code != 201:
LOGGER.error("%d: Failed to create issue", response.status_code)
return json.loads(response.text)
def create_manual_test(self, project_key, summary, description):
"""Creates a manual Test issue in JIRA
:param project_key (str) project key
:param summary (str) issue summary
:param description (str) issue description
"""
# Get project ID
project_id = self.get_project_id(project_key)
# Create test issue payload
issue = {'fields': {'project': {'id': project_id}, 'summary': summary, \
'description': description, 'issuetype': {'name': 'Test'}}}
# Make request
response = self.create_issue(issue)
return response
def create_test_set(self, project_key, summary, description):
"""Creates a Test Set issue in JIRA
:param project_key (str) project key
:param summary (str) issue summary
:param description (str) issue description
"""
# Get project ID
project_id = self.get_project_id(project_key)
# Create test set issue payload
issue = {'fields': {'project': {'id': project_id}, 'summary': summary, \
'description': description, 'issuetype': {'name': 'Test Set'}}}
# Make request
response = self.create_issue(issue)
return response
def create_test_plan(self, project_key, summary, description):
"""Creates a Test Plan issue in JIRA
:param project_key (str) project key
:param summary (str) issue summary
:param description (str) issue description
"""
# Get project ID
project_id = self.get_project_id(project_key)
# Create test plan issue payload
issue = {'fields': {'project': {'id': project_id}, 'summary': summary, \
'description': description, 'issuetype': {'name': 'Test Plan'}}}
# Make request
response = self.create_issue(issue)
return response
def create_test_execution(self, project_key, summary, description):
"""Creates a Test Execution issue in JIRA
:param project_key (str) project key
:param summary (str) issue summary
:param description (str) issue description
"""
# Get project ID
project_id = self.get_project_id(project_key)
# Create test execution issue payload
issue = {'fields': {'project': {'id': project_id}, 'summary': summary, \
'description': description, 'issuetype': {'name': 'Test Execution'}}}
# Make request
response = self.create_issue(issue)
return response
def add_evidence_to_test_run(self, test_run_id, evidence, content_type):
"""Uploads evidence files to a test run
:param test_run_id (str) test run ID
:param evidence (str) path to the evidence file
:param content_type (str) content type of the evidence file
"""
# Create URL
url = self.properties["host"] + XRAY_REST_API + \
"/api/testrun/" + test_run_id + "/attachment"
# Encode the file contents with Base64
encoded_data = ""
with open(evidence, "rb") as evidence_file:
encoded_data = base64.b64encode(evidence_file.read())
# Get file name
filename = os.path.basename(evidence)
# Create attachment payload
attachment = {'data': encoded_data, 'filename': filename, 'contentType': content_type}
# Set up headers
headers = {'content-type': 'application/json'}
# Make request
LOGGER.info("POST %s", url)
response = requests.post(url, \
data=json.dumps(attachment), \
headers=headers, \
auth=HTTPBasicAuth(self.properties["username"], \
self.properties["password"]))
# Check status code
if response.status_code != 201:
LOGGER.error("%d: Failed to upload test run evidence", response.status_code)
return json.loads(response.text)
def export_results(self, **kwargs):
"""Exports test run results from JIRA by
1. Test Execution Key + Test Key
2. Test Execution Key
3. Test Plan Key
4. Saved Filter ID
Example:
export_results(test_execution_key="...", test_key="...")
export_results(test_execution_key="...")
export_results(test_plan_key="...")
export_results(saved_filter_id="...")
"""
# Create base URL
url = self.properties["host"] + XRAY_REST_API + "/testruns?"
# Add URL suffix
if "test_execution_key" in kwargs and "test_key" in kwargs:
url += "testExecKey=" + kwargs["test_execution_key"] + \
"&testKey=" + kwargs["test_key"]
elif "test_plan_Key" in kwargs:
url += "testPlanKey=" + kwargs["test_plan_key"]
elif "test_execution_key" in kwargs:
url += "testExecKey=" + kwargs["test_execution_key"]
elif "saved_filter_id" in kwargs:
url += "savedFilterId=" + kwargs["saved_filter_id"]
# Make request
LOGGER.info("GET %s", url)
response = requests.get(url, \
auth=HTTPBasicAuth(self.properties["username"], \
self.properties["password"]))
# Check status code
if response.status_code != 200:
LOGGER.error("%d: Failed to export results from JIRA", response.status_code)
return json.loads(response.text)
def import_json_results(self, json_results):
"""Import JSON results to JIRA
:param json_results (str) path to the JSON results
"""
# Create base URL
url = self.properties["host"] + XRAY_REST_API + "/import/execution"
# Make request
LOGGER.info("POST %s", url)
response = requests.post(url, \
data=json.dumps(json_results), \
auth=HTTPBasicAuth(self.properties["username"], \
self.properties["password"]), \
verify=False)
# Check status code
if response.status_code != 200:
LOGGER.error("%d: Failed to import JSON results to JIRA", response.status_code)
return json.loads(response.text)
def import_junit_results(self, junit_xml_report, project_key, **kwargs):
"""Import JUnit results to JIRA
:param junit_xml_report (str) path to the JUnit XML report
:param project_key (str) project key
"""
# Create base URL
url = self.properties["host"] + XRAY_REST_API + \
"/import/execution/junit?projectKey=%s" % project_key
# Add Test Plan key
if "test_plan_key" in kwargs:
if kwargs["test_plan_key"]:
url += "&testPlanKey=%s" % kwargs["test_plan_key"]
# Add Test environments
if "test_environments" in kwargs:
if kwargs["test_environments"]:
url += "&testEnvironments=%s" % kwargs["test_environments"]
# Add revision
if "revision" in kwargs:
if kwargs["revision"]:
url += "&revision=%s" % kwargs["revision"]
# Add fix version
if "fix_version" in kwargs:
if kwargs["fix_version"]:
url += "&fixVersion=%s" % kwargs["fix_version"]
# Create multipart form
files = {'file': (os.path.basename(junit_xml_report), open(junit_xml_report, 'rb'), \
'application/xml', {'Expires': '0'})}
# Make request
LOGGER.info("POST %s", url)
response = requests.post(url, \
files=files, \
auth=HTTPBasicAuth(self.properties["username"], \
self.properties["password"]), \
verify=False)
# Check status code
if response.status_code != 200:
LOGGER.error("%d: Failed to import JUnit results to JIRA", response.status_code)
return json.loads(response.text)
def run_jql_query(self, jql_query):
"""Runs a JQL query
:param jql_query (str) JQL query
"""
# Encode unsupported characters in the JQL query so it can be used in the URI
# For more information, please check RFC 3986
jql_query = re.sub(r"\s+", '%20', jql_query)
jql_query = re.sub(r"=", '%3D', jql_query)
# Create URL
url = self.properties["host"] + JIRA_REST_API + "/search?jql=" + jql_query
# Make request
LOGGER.info("GET %s", url)
response = requests.get(url, \
auth=HTTPBasicAuth(self.properties["username"], \
self.properties["password"]))
# Check status code
if response.status_code != 200:
LOGGER.error("%d: Failed to run JQL query", response.status_code)
return json.loads(response.text)
```
|
{
"source": "jgalgarra/introprogpython",
"score": 4
}
|
#### File: introprogpython/codigo/adivina_el_numero.py
```python
import random # Este paquete es necesario para generar números aleatorios
print("Hola, vamos a jugar a adivinar un número secreto entre 1 y 100")
# Elegir al azar un número entero entre 1 y 100
numero_secreto = random.randrange(1,101)
print("Ya lo he pensado, podemos empezar")
intentos = 1 # Contador de intentos. Empieza en uno
# Esta funcion pide al usuario un número. Si es igual devolverá el valor True, el usuario
# acertó. Si no, dará una pista indicando si el número secreto es mayor o menor que el que
# escribió el jugador.
def adivinalo(numintentos, numsec):
numero_humano = int(input("¿Qué numero has pensado?: "))
if (numero_humano == numsec):
print("¡Enhorabuena, has acertado!. Número de intentos:",numintentos)
acierto = True
else:
acierto = False
if (numero_humano < numsec):
print("El número que pensé es mayor que el tuyo")
else:
print("El número que pensé es menor que el tuyo")
print("Has utilizado",intentos,"intentos")
return(acierto)
# Bucle principal del programa. Mientras el usuario no adivine el número repetimos
# la jugada e incrementamos el número de intentos
while not(adivinalo(intentos, numero_secreto)):
intentos = intentos + 1
```
#### File: introprogpython/codigo/animacion.py
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import random
PI = 3.1416
fig, ax = plt.subplots()
x = np.arange(0, 4*PI, 0.01)
line, = ax.plot(x, np.sin(x))
# Init only required for blitting to give a clean slate.
def init():
line.set_ydata(np.ma.array(x, mask=True))
return line,
def animate_sin_wave(i):
line.set_ydata(np.sin(x+ i/10.0)) # update the data
return line,
def animate_square_wave(i):
line.set_ydata(0.5*((4/PI)*np.sin(x+ i/10.0)+(4/(3*PI))*np.sin(3*(x+ i/10.0))+(4/(5*PI))*np.sin(5*(x+ i/10.0)))) # update the data
return line,
def animate_noisy_wave(i):
line.set_ydata(0.5*(np.sin(x+ i/10.0)+0.4*np.random.random(size=len(x)))) # update the data
return line,
speed = 20
frames = 200
# Quitar el comentario de ls función de onda que se representa: seno, seno con ruido gaussiano o 'cuadrada'
#funcion = animate_sin_wave
#funcion = animate_noisy_wave
funcion = animate_square_wave
ani = animation.FuncAnimation(fig, funcion, np.arange(1, frames), init_func=init,
interval=speed, blit=True)
plt.show()
funcion = animate_sin_wave
```
#### File: introprogpython/codigo/funciones.py
```python
print("Vamos a calcular la suma de los números 1 al 10")
suma = 0
for i in range(1,11): # Cuando pones dos números range genera desde el mínimo hasta el máximo menos 1
suma = suma + i
print("La suma de los 10 primeros números enteros vale",suma)
# Espera
dummy = input()
# Imagina que ahora quieres calcular la suma de los 8 primeros enteros. Podemos repetir el código
# y cambiar el límite
print("Vamos a calcular la suma de los números 1 al 8")
suma = 0
for i in range(1,8): # Cuando pones dos números range genera desde el mínimo hasta el máximo menos 1
suma = suma + i
print("La suma de los 8 primeros números enteros vale",suma)
# Esto queda muy feo, hay una manera de hacerlo mejor, con funciones
def suma_primeros_enteros(hasta_que_numero): # hasta_que_numero es un parámetro de entrada
suma = 0
for i in range(1,hasta_que_numero+1): # Cuando pones dos números, range genera una lista de números desde el mínimo hasta el máximo menos 1
suma = suma + i
return(suma) # Valor que devuelve la función
print()
print("La suma de los primeros 5 números enteros vale",suma_primeros_enteros(5))
print("La suma de los primeros 12 números enteros vale",suma_primeros_enteros(12))
# Espera
dummy = input()
# Vamos a jugar poniendo junto todo lo que ya sabemos.
# ¿Qué crees que sucederá con este trozo de código?
valor_suma = 0
while (valor_suma < 500):
num_enteros = int(input("¿Cuántos de los primeros enteros quieres que sume? "))
valor_suma = suma_primeros_enteros(num_enteros)
print("El valor de la suma de los",num_enteros,"primeros enteros es",valor_suma)
print()
```
|
{
"source": "jgalgarra/pythonDAMP",
"score": 4
}
|
#### File: pythonDAMP/codigo/objetos_clases.py
```python
class Person:
def __init__(self, name):
self.name = name
def saluda(self):
print('Hola, me llamo', self.name)
def cambia_nombre(self, nuevo_nombre):
self.name = nuevo_nombre
p = Person('Pepe')
p.saluda()
# Cambiamos el atributo nombre
p.cambia_nombre('Juan')
print("Ahora me llamo",p.name)
print()
dummy=input("Ahora vamos a ver como crear listas de nombres")
#Podemos crear listas de nombres
chico = Person('Adán')
chica = Person('Eva')
lista_obj = [chico,chica]
print(lista_obj)
dummy=input("Esto es la lista, con dos objetos")
dummy=input("Ahora vamos a jugar con el contenido de la lista"
)
print(lista_obj[0].name,"y",lista_obj[1].name,"se encontraron en la calle y se miraron...")
dummy=input("")
class Pareja:
def __init__(self):
self.el = ''
self.ella = ''
self.apellido = ''
def flechazo(self,el,ella):
self.el = el
self.ella = ella
print('Flechazo total entre', self.el, "y", self.ella)
def matrimonio(self,apellido):
self.apellido = apellido
self.senyoresde = self.el+" y "+self.ella+" son ahora los "+apellido
print(self.senyoresde)
par = Pareja()
par.flechazo(lista_obj[0].name,lista_obj[1].name)
par.matrimonio("Pérez")
dummy=input()
```
|
{
"source": "jgalle29/deep_learning",
"score": 3
}
|
#### File: assignment_1/extra_files/pnp_calculations.py
```python
import numpy as np
def relu(x):
return np.maximum(0,x)
def der_relu(x):
return 1.0 * (x>0)
def loss(yout, ygt):
N = yout.shape[-1]
return np.linalg.norm(yout - ygt)**2/(2.0*N)
LEARNING_RATE = 0.5
EPOCHS = 1
X = np.array([[0.75, 0.2, -0.75, 0.2],[0.8, 0.05, 0.8, -0.05]])
Ygt = np.array([[1, 1, -1, -1]])
N = Ygt.shape[-1]
W1 = np.array([[0.6, 0.01],[0.7, 0.43],[0, 0.88]])
W2 = np.array([[0.02, 0.03, 0.09]])
def forward(X,W1,W2):
s1 = np.dot(W1,X)
z1 = relu(s1)
s2 = np.dot(W2, z1)
z2 = s2
return s1, z1, s2, z2
for epoch in range(EPOCHS):
s1, z1, s2, z2 = forward(X,W1,W2)
print(s1)
print(z1)
print(s2)
print(z2)
L = loss(z2, Ygt)
print("Loss", L)
delta2 = (z2 - Ygt)/N
grad_W2 = delta2.dot(z1.T)
delta1 = np.dot(W2.T,delta2) * der_relu(s1)
grad_W1 = delta1.dot(X.T)
W1 -= LEARNING_RATE * grad_W1
W2 -= LEARNING_RATE * grad_W2
print(delta1)
print(grad_W1)
print(delta2)
print(grad_W2)
print("Final params")
print(W1)
print(W2)
s1, z1, s2, z2 = forward(X,W1,W2)
L = loss(z2, Ygt)
print("Loss", L)
# print(s1)
# print(z1)
# print(s2)
# print(z2)
# print(delta2)
# print(W2.T)
# print(np.dot(W2.T,delta2))
```
#### File: deep_learning/assignment_1/mlp_numpy.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
class MLP(object):
"""
This class implements a Multi-layer Perceptron in NumPy.
It handles the different layers and parameters of the model.
Once initialized an MLP object can perform inference, training and it
can also be used for evaluating prediction performance.
"""
def __init__(self, n_hidden, n_classes, weight_decay=0.0,
weight_scale=0.0001, input_dim=3 * 32 * 32, learning_rate= 2e-3):
"""
Constructor for an MLP object. Default values should be used as hints for
the usage of each parameter. Weights of the linear layers should be initialized
using normal distribution with mean = 0 and std = weight_scale. Biases should be
initialized with constant 0. All activation functions are ReLUs.
Args:
n_hidden: list of ints, specifies the number of units
in each hidden layer. If the list is empty, the MLP
will not have any hidden units, and the model
will simply perform a multinomial logistic regression.
n_classes: int, number of classes of the classification problem.
This number is required in order to specify the
output dimensions of the MLP.
weight_decay: L2 regularization parameter for the weights of linear layers.
weight_scale: scale of normal distribution to initialize weights.
"""
self.n_hidden = n_hidden
self.n_classes = n_classes
self.weight_decay = weight_decay
self.weight_scale = weight_scale
self.input_dim = input_dim
self.learning_rate = learning_rate
# Set weight and bias shapes given specified sizes
# W(k) has shape dim(k+1) x dim(k)
# b(k) has shape dim(k+1) x 1
# Convention: the input has shape input_dim x batch_size
# h(k) = W(k) * h(k-1) + b(k)
layer_sizes = [self.input_dim] + self.n_hidden + [self.n_classes]
weight_shapes = [(layer_sizes[i+1], layer_sizes[i]) for i in range(len(layer_sizes) - 1)]
bias_shapes = [(shape[0], 1) for shape in weight_shapes]
# Initialize weights and biases with default initializers
weights = [self._weight_initializer(shape, self.weight_scale)
for shape in weight_shapes]
biases = [self._bias_initializer(shape) for shape in bias_shapes]
# Define activation function and its derivative per layer
activations = [self._relu] * len(self.n_hidden) + [self._linear]
act_derivatives = [self._relu_der] * len(self.n_hidden) + [self._linear_der]
# Use FCLayer wrappers to setup network
self.layers = [FCLayer(W = weights[i], b = biases[i], act_fn = activations[i],
act_der = act_derivatives[i], name = "fc" + str(i), model = self)
for i in range(len(weight_shapes))]
# Activation functions and their derivatives
def _relu(self, x):
return x * (x>0)
def _linear(self, x):
return x
def _relu_der(self, x):
return 1.0 * (x>0)
def _linear_der(self, x):
return np.ones(shape = x.shape)
# Weight and bias initializers
def _weight_initializer(self, weight_shape, weight_scale):
return np.random.normal(scale = weight_scale, size = weight_shape)
def _bias_initializer(self, bias_shape):
return np.zeros(shape = bias_shape)
def inference(self, x):
"""
Performs inference given an input array. This is the central portion
of the network. Here an input array is transformed through application
of several hidden layer transformations (as defined in the constructor).
We recommend you to iterate through the list self.n_hidden in order to
perform the sequential transformations in the MLP. Do not forget to
add a linear output layer (without non-linearity) as the last transformation.
It can be useful to save some intermediate results for easier computation of
gradients for backpropagation during training.
Args:
x: 2D float array of size [batch_size, input_dim]
Returns:
logits: 2D float array of size [batch_size, self.n_classes]. Returns
the logits outputs (before softmax transformation) of the
network. These logits can then be used with loss and accuracy
to evaluate the model.
"""
########################
# PUT YOUR CODE HERE #
#######################
# Make sure convention for input shape holds
if x.shape[0] != self.input_dim:
data_tensor = x.T
else:
data_tensor = x
# Forward data tensor through the network
for layer in self.layers:
layer.forward(data_tensor)
data_tensor = layer.act
# Loss function expects logits in transposed shape
logits = data_tensor.T
assert np.isfinite(logits).all(), "Numerical instability in logits"
########################
# END OF YOUR CODE #
#######################
return logits
def _log_sum_exp(self, logits):
"""
Computes the log-sum-exp trick on logits. Assumes logits is a matrix of
dimensions batch_size x n_classes.
Reference: https://en.wikipedia.org/wiki/LogSumExp
"""
# Extract maximum score per row
row_max = np.max(logits, axis = 1, keepdims = True)
logits_minus_max = logits - row_max
# Apply LSE trick
lse = row_max + np.expand_dims(np.log(np.einsum('ij->i',np.exp(logits_minus_max))), axis = 1)
return lse
def _cross_entropy_loss(self, logits, labels):
lse = self._log_sum_exp(logits)
log_prob = logits - lse
return - np.einsum('ij,ij->', labels, log_prob), np.exp(log_prob)
def _reg_loss(self):
return np.sum([layer.complexity_penalty() for layer in self.layers])
def loss(self, logits, labels):
"""
Computes the multiclass cross-entropy loss from the logits predictions and
the ground truth labels. The function will also add the regularization
loss from network weights to the total loss that is return.
It can be useful to compute gradients of the loss for an easier computation of
gradients for backpropagation during training.
Args:
logits: 2D float array of size [batch_size, self.n_classes].
The predictions returned through self.inference.
labels: 2D int array of size [batch_size, self.n_classes]
with one-hot encoding. Ground truth labels for each
sample in the batch.
Returns:
loss: scalar float, full loss = cross_entropy + reg_loss
"""
########################
# PUT YOUR CODE HERE #
#######################
batch_size = logits.shape[0]
ce_loss, pred_probs = self._cross_entropy_loss(logits, labels)
ce_loss /= batch_size
reg_loss = self._reg_loss()
loss = ce_loss + self.weight_decay * reg_loss
assert np.isfinite(loss), "Numerical instability in logits"
self.delta_out = (pred_probs - labels).T / batch_size
########################
# END OF YOUR CODE #
#######################
return loss
def train_step(self, loss= None, flags = None):
"""
Implements a training step using a parameters in flags.
Use mini-batch Stochastic Gradient Descent to update the parameters of the MLP.
Args:
loss: scalar float.
flags: contains necessary parameters for optimization.
Returns:
"""
########################
# PUT YOUR CODE HERE #
#######################
deltas = [self.delta_out]
layer_index = list(range(1,len(self.layers)))
layer_index.reverse()
for k in layer_index:
current_layer = self.layers[k-1]
following_layer = self.layers[k]
current_delta = following_layer.W.T.dot(deltas[0]) * current_layer.act_der(current_layer.preact)
deltas = [current_delta] + deltas
for k in range(len(self.layers)):
#print(k,self.layers[k].name)
self.layers[k].backward(deltas[k])
self.delta_out = None
########################
# END OF YOUR CODE #
#######################
def accuracy(self, logits, labels):
"""
Computes the prediction accuracy, i.e. the average of correct predictions
of the network.
Args:
logits: 2D float array of size [batch_size, self.n_classes].
The predictions returned through self.inference.
labels: 2D int array of size [batch_size, self.n_classes]
with one-hot encoding. Ground truth labels for
each sample in the batch.
Returns:
accuracy: scalar float, the accuracy of predictions,
i.e. the average correct predictions over the whole batch.
"""
########################
# PUT YOUR CODE HERE #
#######################
batch_size = logits.shape[0]
accuracy = np.sum(np.argmax(logits, axis = 1) == np.argmax(labels, axis = 1))
accuracy /= batch_size
########################
# END OF YOUR CODE #
#######################
return accuracy
class FCLayer(object):
"""
Wrapper for fully-connected layers in the MLP architecture
"""
def __init__(self, W, b, act_fn, act_der, name, model):
self.W = W
self.b = b
self.act_fn = act_fn
self.act_der = act_der
self.name = name
self.model = model
self.input = None
self.preact = None
self.act = None
def forward(self, input):
assert input.shape[0] == self.W.shape[1], "Dimension mismatch in layer \
{} with weight size {} and input size {}".format(self.name, self.W.shape, input.shape)
self.input = input
self.preact = np.dot(self.W, input) + self.b
self.act = self.act_fn(self.preact)
def backward(self, delta):
grad_W = delta.dot(self.input.T) + self.model.weight_decay * self.W
grad_b = delta.sum(axis=1, keepdims=True)
self.W -= self.model.learning_rate * grad_W
self.b -= self.model.learning_rate * grad_b
def complexity_penalty(self):
return 0.5 * np.linalg.norm(self.W)**2
```
#### File: assignment_2/part1/utils.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import numpy as np
import tensorflow as tf
def generate_palindrome(length):
# Generates a single, random palindrome number of 'length' digits.
left = [np.random.randint(0, 10) for _ in range(math.ceil(length/2))]
left = np.asarray(left, dtype=np.int32)
right = np.flip(left, 0) if length % 2 == 0 else np.flip(left[:-1], 0)
return np.concatenate((left, right))
def generate_palindrome_batch(batch_size, length):
# Generates a batch of random palindrome numbers.
batch = [generate_palindrome(length) for _ in range(batch_size)]
return np.asarray(batch, np.int32)
def init_summary_writer(sess, save_path):
# Optional to use.
if not os.path.exists(save_path):
os.makedirs(save_path)
return tf.summary.FileWriter(save_path, sess.graph)
```
#### File: assignment_2/part2/dataset.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import numpy as np
class TextDataset(object):
def __init__(self, filename):
assert os.path.splitext(filename)[1] == ".txt"
self._data = open(filename, 'r').read()
self._data = self.process_str(self._data)
self._chars = sorted(list(set(self._data)))
self._data_size, self._vocab_size = len(self._data), len(self._chars)
print("Initialize dataset with {} characters, {} unique.".format(
self._data_size, self._vocab_size))
self._char_to_ix = {ch:i for i, ch in enumerate(self._chars)}
self._ix_to_char = {i:ch for i, ch in enumerate(self._chars)}
self._offset = 0
def example(self, seq_length):
offset = np.random.randint(0, len(self._data)-seq_length-2)
inputs = [self._char_to_ix[ch] for ch in self._data[offset:offset+seq_length]]
targets = [self._char_to_ix[ch] for ch in self._data[offset+1:offset+seq_length+1]]
return inputs, targets
def batch(self, batch_size, seq_length):
batch_inputs = np.zeros((batch_size, seq_length), np.int32)
batch_targets = np.zeros((batch_size, seq_length), np.int32)
for i in range(batch_size):
batch_inputs[i], batch_targets[i] = self.example(seq_length)
return batch_inputs, batch_targets
def convert_to_string(self, char_ix):
return ''.join(self._ix_to_char[ix] for ix in char_ix)
@property
def vocab_size(self):
return self._vocab_size
def process_str(self, string):
string = re.sub(r"[^A-Za-z0-9(),.!¿?:;ÁÉÍÓÚáéíóúñÑü\-\'\`]", " ", string)
string = re.sub(r"\s{2,}", " ", string)
string = re.sub(r"\\n", " ", string)
return string.strip()
```
#### File: assignment_2/part2/train.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from datetime import datetime
import argparse
import numpy as np
import tensorflow as tf
from dataset import TextDataset
from model import TextGenerationModel
def _check_path(path):
"""
Makes sure path for log and model saving exists
"""
if not tf.gfile.Exists(path):
tf.gfile.MakeDirs(path)
def train(config):
# Initialize the text dataset
dataset = TextDataset(config.txt_file)
# Initialize the model
model = TextGenerationModel(
batch_size=config.batch_size,
seq_length=config.seq_length,
vocabulary_size=dataset.vocab_size,
lstm_num_hidden=config.lstm_num_hidden,
lstm_num_layers=config.lstm_num_layers,
dropout_keep_prob=config.dropout_keep_prob,
prediction_mode=config.prediction_mode
)
###########################################################################
# Implement code here.
###########################################################################
# Placeholders for model sampling
init_sample_char = tf.placeholder(dtype=tf.int32, shape=(config.num_samples))
seq_samples = model._sample(init_input=init_sample_char,
num_samples=config.num_samples,
sample_length=config.sample_length,
init_state=None)
init_sentence = tf.placeholder(dtype=tf.int32, shape=(None, 1))
completed_sentence = model._complete_sentence(init_sentence, config.sample_length)
gpu_opts = tf.GPUOptions(per_process_gpu_memory_fraction=config.gpu_mem_frac, allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_opts))
# Setup global step
global_step = tf.Variable(0, trainable=False, name='global_step')
# Define the optimizer
if config.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(learning_rate=config.learning_rate, decay=config.learning_rate_decay)
elif config.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(config.learning_rate)
# Compute the gradients for each variable
grads_and_vars = optimizer.compute_gradients(model._loss)
#train_op = optimizer.apply_gradients(grads_and_vars, global_step)
grads, variables = zip(*grads_and_vars)
grads_clipped, _ = tf.clip_by_global_norm(grads, clip_norm=config.max_norm_gradient)
apply_gradients_op = optimizer.apply_gradients(zip(grads_clipped, variables), global_step=global_step)
# Saver
saver = tf.train.Saver(max_to_keep=50)
save_path = os.path.join(config.save_path, '{}/model.ckpt'.format(config.name))
_check_path(save_path)
# Initialization
init_op = tf.global_variables_initializer()
local_init_op = tf.local_variables_initializer()
sess.run(fetches=[init_op, local_init_op])
# Define summary operation
summary_op = tf.summary.merge_all()
# Logs
train_log_path = os.path.join(config.summary_path, '{}'.format(config.name))
_check_path(train_log_path)
train_log_writer = tf.summary.FileWriter(train_log_path, graph=sess.graph)
###########################################################################
# Implement code here.
###########################################################################
print(" ******* DICTIONARY ******* ")
print(dataset._ix_to_char)
for train_step in range(int(config.train_steps)):
# Only for time measurement of step through network
t1 = time.time()
#######################################################################
# Implement code here.
#######################################################################
x, y = dataset.batch(batch_size=config.batch_size, seq_length=config.seq_length)
tr_feed = {model._inputs: x, model._targets: y}
fetches = [apply_gradients_op, model._loss]
if train_step % config.print_every == 0:
fetches += [summary_op]
_, train_loss, summary = sess.run(feed_dict=tr_feed, fetches=fetches)
train_log_writer.add_summary(summary, train_step)
else:
_, train_loss = sess.run(feed_dict=tr_feed, fetches=fetches)
# Only for time measurement of step through network
t2 = time.time()
examples_per_second = config.batch_size/float(t2-t1)
# Output the training progress
if train_step % config.print_every == 0:
print("[{}] Train Step {:04d}/{:04d}, Batch Size = {}, Examples/Sec = {:.2f}, Loss = {:.4f}".format(
datetime.now().strftime("%Y-%m-%d %H:%M"), train_step,
int(config.train_steps), config.batch_size,
examples_per_second, train_loss))
# Sample sentences from the model
if train_step % config.sample_every == 0:
# Random initial character
init_chars = np.random.choice(a=dataset.vocab_size, size=(config.num_samples))
sampled_seq = sess.run(fetches=[seq_samples], feed_dict={init_sample_char: init_chars})[0]
sampled_seq = np.array(sampled_seq).T
print("\n ******* Random Initial Character *******")
for i in range(config.num_samples):
print('{} - {}|{}'.format(i, dataset._ix_to_char[init_chars[i]], dataset.convert_to_string(sampled_seq[i, :])))
#Custom sentences
custom_inits = ['To be, or not to be, that is the question: Whether ',
'History will be kind to me for I intend to ',
'Hansel and Gr',
'Democracy is ',
'Let T be a bounded linear operator in V, a vector space.',
'Mas vale pajaro en mano que ver un ciento v']
print("\n ******* Sentence Completion *******")
for init_seq in custom_inits:
init_vec = np.array([dataset._char_to_ix[x] for x in init_seq if x in dataset._char_to_ix]).reshape((-1, 1))
sampled_seq = sess.run(fetches=[completed_sentence], feed_dict={init_sentence: init_vec})[0]
print('{}|{}'.format(init_seq, dataset.convert_to_string(sampled_seq.squeeze().tolist())))
print("\n")
# Save checkpoint
if train_step % config.save_every == 0 and train_step > 1:
saver.save(sess, save_path=save_path)
train_log_writer.close()
if __name__ == "__main__":
# Parse training configuration
parser = argparse.ArgumentParser()
# Model params
parser.add_argument('--txt_file', type=str, required=True, help="Path to a .txt file to train on")
parser.add_argument('--seq_length', type=int, default=30, help='Length of an input sequence')
parser.add_argument('--lstm_num_hidden', type=int, default=128, help='Number of hidden units in the LSTM')
parser.add_argument('--lstm_num_layers', type=int, default=2, help='Number of LSTM layers in the model')
# Training params
parser.add_argument('--batch_size', type=int, default=64, help='Number of examples to process in a batch')
parser.add_argument('--learning_rate', type=float, default=2e-3, help='Learning rate')
parser.add_argument('--learning_rate_decay', type=float, default=0.96, help='Learning rate decay fraction')
parser.add_argument('--learning_rate_step', type=int, default=5000, help='Learning rate step')
parser.add_argument('--dropout_keep_prob', type=float, default=1.0, help='Dropout keep probability')
parser.add_argument('--train_steps', type=int, default=1e6, help='Number of training steps')
parser.add_argument('--max_norm_gradient', type=float, default=5.0, help='--')
# Misc params
parser.add_argument('--gpu_mem_frac', type=float, default=0.5, help='Fraction of GPU memory to allocate')
parser.add_argument('--log_device_placement', type=bool, default=False, help='Log device placement for debugging')
parser.add_argument('--summary_path', type=str, default="./summaries/", help='Output path for summaries')
parser.add_argument('--print_every', type=int, default=10, help='How often to print training progress')
parser.add_argument('--sample_every', type=int, default=250, help='How often to sample from the model')
parser.add_argument('--save_every', type=int, default=500, help='How often to save the model')
parser.add_argument('--save_path', type=str, default='./checkpoints/', help='Output path for model checkpoints')
parser.add_argument('--optimizer', type=str, default="rmsprop", choices=['adam', 'rmsprop'], help='Optimizer to use')
parser.add_argument('--name', type=str, default="model", help='Model name')
parser.add_argument('--num_samples', type=int, default=10, help='Number of randomly initialized sample sequences')
parser.add_argument('--sample_length', type=int, default=100, help='Length of sampled sequence')
parser.add_argument('--prediction_mode', type=str, default="sample", choices=['sample', 'max'], help='Length of sampled sequence')
config = parser.parse_args()
# Train the model
train(config)
```
#### File: deep_learning/assignment_3/a3_simple_template.py
```python
import os
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
def load_mnist_images(binarize=True):
"""
:param binarize: Turn the images into binary vectors
:return: x_train, x_test Where
x_train is a (55000 x 784) tensor of training images
x_test is a (10000 x 784) tensor of test images
"""
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
x_train = mnist.train.images
x_test = mnist.test.images
if binarize:
x_train = (x_train>0.5).astype(x_train.dtype)
x_test = (x_test>0.5).astype(x_test.dtype)
return x_train, x_test
def _check_path(path):
"""
Makes sure path for log and model saving exists
"""
if not tf.gfile.Exists(path):
tf.gfile.MakeDirs(path)
class NaiveBayesModel(object):
def __init__(self, w_init, b_init = None, c_init = None):
"""
:param w_init: An (n_categories, n_dim) array, where w[i, j] represents log p(X[j]=1 | Z[i]=1)
:param b_init: A (n_categories, ) vector where b[i] represents log p(Z[i]=1), or None to fill with zeros
:param c_init: A (n_dim, ) vector where b[j] represents log p(X[j]=1), or None to fill with zeros
"""
self.w = w_init
(self.n_categories, self.n_dim) = self.w.shape
if b_init is None:
self.b = tf.get_variable(name='b',
shape=[self.n_categories],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0))
else:
self.b = b_init
if c_init is None:
self.c = tf.get_variable(name='c',
shape=[self.n_dim],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0))
else:
self.c = c_init
self._inputs = tf.placeholder(tf.float32, shape=[None, self.n_dim], name='inputs')
self._loss = self._compute_loss()
def log_p_x_given_z(self, x):
"""
:param x: An (n_samples, n_dims) tensor
:return: An (n_samples, n_labels) tensor p_x_given_z where result[n, k] indicates p(X=x[n] | Z=z[k])
"""
# D x K
alpha = tf.transpose(self.w + self.c)
# N x K
return tf.matmul(x, tf.log_sigmoid(alpha)) + tf.matmul((1 - x), tf.log_sigmoid(- alpha))
def log_p_x(self, x):
"""
:param x: A (n_samples, n_dim) array of data points
:return: A (n_samples, ) array of log-probabilities assigned to each point
"""
# K x 1
log_prior = tf.nn.log_softmax(self.b)
# N x K
log_p_x_given_z = self.log_p_x_given_z(x)
# N x 1
return tf.reduce_logsumexp(tf.add(log_p_x_given_z, tf.transpose(log_prior)), axis=1)
def _compute_loss(self):
nll = -tf.reduce_mean(self.log_p_x(self._inputs), axis = 0)
tf.summary.scalar('log_like', -nll)
return nll
def sample(self, n_samples=None, z_samples=None, sample_or_mean='sample'):
"""
:param n_samples: Generate N samples from your model
:return: A (n_samples, n_dim) array where n_dim is the dimenionality of your input
"""
if z_samples is None:
latent_var_distro = tf.distributions.Categorical(logits=tf.squeeze(self.b))
# N x K
z_samples = latent_var_distro.sample(int(n_samples))
# N x K
z_one_hot = tf.one_hot(z_samples, self.n_categories)
# N x D
logits = tf.add(tf.matmul(z_one_hot, self.w), self.c, name='sample_logits')
batch_distro = tf.contrib.distributions.BernoulliWithSigmoidProbs(logits=logits)
if sample_or_mean == 'sample':
samples = batch_distro.sample()
elif sample_or_mean == 'mean':
samples = batch_distro.mean()
return samples
def plot_image_grid(data_tensor, im_h, im_w, hor_ims, vert_ims):
fig = plt.figure()
ax = fig.add_subplot(111)
reshaped_tensor = np.zeros((int(im_h * vert_ims), int(im_w * hor_ims)))
for row in range(vert_ims):
for col in range(hor_ims):
col_inf, col_sup = (int(col*im_w), int((col+1)*im_w))
row_inf, row_sup = (int(row*im_w), int((row+1)*im_w))
reshaped_im = np.reshape(data_tensor[int(col + hor_ims * row), :], (im_h, im_w))
reshaped_tensor[row_inf:row_sup, col_inf:col_sup] = reshaped_im
plt.imshow(reshaped_tensor, cmap='gray')
for axi in (ax.xaxis, ax.yaxis):
for tic in axi.get_major_ticks():
tic.tick1On = tic.tick2On = False
tic.label1On = tic.label2On = False
plt.show()
def train_simple_generative_model_on_mnist(n_categories=20, initial_mag = 0.01, optimizer='rmsprop', learning_rate=.01, n_epochs=20, test_every=100,
minibatch_size=100, plot_n_samples=16):
"""
Train a simple Generative model on MNIST and plot the results.
:param n_categories: Number of latent categories (K in assignment)
:param initial_mag: Initial weight magnitude
:param optimizer: The name of the optimizer to use
:param learning_rate: Learning rate for the optimization
:param n_epochs: Number of epochs to train for
:param test_every: Test every X iterations
:param minibatch_size: Number of samples in a minibatch
:param plot_n_samples: Number of samples to plot
"""
# Get Data
x_train, x_test = load_mnist_images(binarize=True)
# White background is nicer
x_train = 1 - x_train
x_test = 1 - x_test
# Create Frankenstein digits
frank, orig_digits = create_frankenstein(x_test, 10)
# Only use 1k test examples for speed
x_test = x_test[0:1000, :]
train_iterator = tf.data.Dataset.from_tensor_slices(x_train).repeat().batch(minibatch_size).make_initializable_iterator()
n_samples, n_dims = x_train.shape
x_minibatch = train_iterator.get_next() # Get symbolic data, target tensors
# Build the model
w_init = tf.get_variable(name="w",
shape=[n_categories, n_dims],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=initial_mag))
model = NaiveBayesModel(w_init)
# Setup global step
global_step = tf.Variable(0, trainable=False, name='global_step')
# Define the optimizer
assert optimizer in ('adam', 'rmsprop')
if optimizer == "adam":
optimizer = tf.train.AdamOptimizer(learning_rate)
elif optimizer == "rmsprop":
optimizer = tf.train.RMSPropOptimizer(learning_rate)
# Define summary operation
summary_op = tf.summary.merge_all()
# Optimization step
grads_and_vars = optimizer.compute_gradients(model._loss)
grads, variables = zip(*grads_and_vars)
apply_gradients_op = optimizer.apply_gradients(zip(grads, variables), global_step=global_step)
with tf.Session() as sess:
sess.run(train_iterator.initializer)
sess.run(tf.global_variables_initializer())
n_steps = (n_epochs * n_samples)/minibatch_size
train_log_path = "./logs/nb_tr/"
test_log_path = "./logs/nb_ts/"
_check_path(train_log_path)
_check_path(test_log_path)
train_log_writer = tf.summary.FileWriter(train_log_path, graph=sess.graph)
test_log_writer = tf.summary.FileWriter(test_log_path)
for i in range(int(n_steps)):
if i % test_every == 0:
ts_feed = {model._inputs: x_test}
fetches = [model._loss, summary_op]
test_loss, test_summary = sess.run(fetches=fetches, feed_dict=ts_feed)
test_log_writer.add_summary(test_summary, i)
print("Step: {} \t Test LL: {:.3f}".format(i, -test_loss))
tr_feed = {model._inputs: sess.run(x_minibatch)}
fetches = [apply_gradients_op, model._loss, summary_op]
_, train_loss, train_summary = sess.run(fetches=fetches, feed_dict=tr_feed)
train_log_writer.add_summary(train_summary, i)
if i % 50 == 0:
print("Step: {} \t Train LL: {:.3f}".format(i, -train_loss))
# Problem 6: Expected pixel values given that the latent variable
samples = sess.run(model.sample(z_samples=list(range(n_categories)),
sample_or_mean="mean"))
plot_image_grid(samples, 28, 28, 5, 4)
#Problem 7: Show 16 images samples from your trained model
samples = sess.run(model.sample(plot_n_samples))
plot_image_grid(samples, 28, 28, int(np.sqrt(plot_n_samples)), int(np.sqrt(plot_n_samples)))
#Problem 9: Frankenstein digits + statistical test
frank_ll = sess.run(model.log_p_x(frank))
orig_ll = sess.run(model.log_p_x(orig_digits))
print("\nFrankenstein Digits\n")
print(frank_ll, np.mean(frank_ll), np.std(frank_ll))
print(orig_ll, np.mean(orig_ll), np.std(orig_ll))
print(stats.ttest_ind(frank_ll, orig_ll, equal_var=False))
plot_image_grid(frank, 28, 28, 5, 2)
plot_image_grid(orig_digits, 28, 28, 5, 2)
test_log_writer.close()
train_log_writer.close()
def create_frankenstein(x_test, num_samples):
(num_test_samples, x_dim) = x_test.shape
rand_ix = np.random.randint(num_test_samples, size=int(2 * num_samples))
orig_digits = x_test[rand_ix, :]
frank_tensor = np.zeros((num_samples, x_dim))
frank_tensor[:, 0: int(x_dim/2)] = orig_digits[[2*i for i in range(num_samples)], 0:int(x_dim/2)]
frank_tensor[:, int(x_dim/2):] = orig_digits[[2*i + 1 for i in range(num_samples)], int(x_dim/2):]
return np.array(frank_tensor, dtype = 'float32'), np.array(orig_digits[0:num_samples, :], dtype = 'float32')
if __name__ == '__main__':
train_simple_generative_model_on_mnist()
```
|
{
"source": "jgalle29/ga",
"score": 3
}
|
#### File: genmodel/readers/cifar10.py
```python
import torch
from torch.utils import data
from torchvision import datasets, transforms
from pylego.reader import DatasetReader
class CIFAR10Reader(DatasetReader):
def __init__(self, data_path):
to_tensor = transforms.ToTensor()
train_dataset = datasets.CIFAR10(data_path + '/CIFAR10', train=True, download=True, transform=to_tensor)
test_dataset = datasets.CIFAR10(data_path + '/CIFAR10', train=False, download=True, transform=to_tensor)
torch.manual_seed(0)
super().__init__({'train': data.ConcatDataset([train_dataset, test_dataset])})
```
|
{
"source": "jgalle29/machine_learning_1",
"score": 4
}
|
#### File: ml1_labs/lab3/check_grad.py
```python
from numpy import *
def checkgrad(f,g,x,e,**args,RETURNGRADS=False):
from pylab import norm
"""Check correctness of gradient function g at x by comparing to numerical
approximation using perturbances of size e. Simple adaptation of
Carl Rasmussen's matlab-function checkgrad."""
print **args
dy = g(x,**args)
if isscalar(x):
dh = zeros(1,dtype=float)
l = 1
else:
print "x in checkgrad:"
print x
l = len(x)
dh = zeros(l,dtype=float)
for j in range(l):
dx = zeros(l,dtype=float)
dx[j] = e
y2 = f(x+dx,**args)
print y2
y1 = f(x-dx,**args)
#print dx,y2,y1
dh[j] = (y2 - y1)/(2*e)
#print dh[j]
print "analytic (gradient call): \n", dy
print "approximation (objective call): \n", dh
if RETURNGRADS: return dy,dh
else: return norm(dh-dy)/norm(dh+dy)
```
|
{
"source": "j-gallistl/reda",
"score": 3
}
|
#### File: reda/containers/TDIP.py
```python
from reda.containers.ERT import ERT
import numpy as np
import pandas as pd
import reda.utils.mpl
plt, mpl = reda.utils.mpl.setup()
class TDIP(ERT):
def check_dataframe(self, dataframe):
"""Check the given dataframe for the required type and columns
"""
if dataframe is None:
return None
# is this a DataFrame
if not isinstance(dataframe, pd.DataFrame):
raise Exception(
'The provided dataframe object is not a pandas.DataFrame'
)
required_columns = (
'a',
'b',
'm',
'n',
'r',
'chargeability',
)
for column in required_columns:
if column not in dataframe:
raise Exception('Required column not in dataframe: {0}'.format(
column
))
return dataframe
def plot_decay_curve(self, filename=None, index_nor=None, index_rec=None,
nr_id=None, abmn=None,
return_fig=False):
"""Plot decay curve
Input scheme: We recognize three ways to specify the quadrupoles to
plot (in descending priority):
1) indices for normal/reciprocal
2) by specifying the id
3) by specifying abmn (note that here the exact quadrupole must be
present. For example, when (1,2,4,3) is requested, (2,1,4,3)
will not be used).
Parameters
----------
filename : string, optional
If given, filename to plot to.
Returns
-------
fig : :class:`matplotlib.Figure`
Figure object, only returned if return_fig=True
"""
def get_indices_for_id(norrec_id):
subquery_nor = self.data.query(
'id == {} and norrec == "nor"'.format(norrec_id)
)
if subquery_nor.shape[0] >= 1:
indices = [subquery_nor.index.values[0], ]
else:
indices = [None, ]
subquery_rec = self.data.query(
'id == {} and norrec == "rec"'.format(norrec_id)
)
if subquery_rec.shape[0] >= 1:
indices = indices + [subquery_rec.index.values[0], ]
else:
indices = indices + [None, ]
return indices
# select data to plot
# 1: indices
if index_nor is not None or index_rec is not None:
indices = [index_nor, index_rec]
elif nr_id is not None:
# reset the index
self.data.reset_index(drop=True, inplace=True)
indices = get_indices_for_id(nr_id)
elif abmn is not None:
subquery = self.data.query(
'a == {} and b == {} and m == {} and n == {}'.format(*abmn)
)
# print(abmn)
# print('subquery', subquery)
# import IPython
# IPython.embed()
if subquery.shape[0] > 0:
self.data.reset_index(drop=True, inplace=True)
indices = get_indices_for_id(subquery['id'].values[0])
else:
raise Exception(
'configuration not found. Perhaps electrodes were ' +
'switched due to negative geometric factors?'
)
else:
raise Exception('No selection method successful!')
# plot
fig, axes = plt.subplots(1, 3, figsize=(15 / 2.54, 8 / 2.54))
labels = ('normal', 'reciprocal')
# gather data
data_list = []
data_normed_list = []
for nr, index in enumerate(indices):
if index is not None:
data = self.data.loc[index, 'Mx']
data_norm = data / self.data.loc[index, 'Iab']
data_list.append(data)
data_normed_list.append(data_norm)
else:
data_list.append(None)
data_normed_list.append(None)
for nr, index in enumerate(indices):
if index is None:
continue
mdelay = self.data.loc[index, 'mdelay']
times = self.data.loc[index, 'Tm']
mtime = mdelay + np.cumsum(times)
ax = axes[0]
ax.plot(mtime, data_list[nr], '.-', label=labels[nr])
ax = axes[1]
ax.plot(
mtime, data_normed_list[nr], '.-', label=labels[nr] +
' I: {:.1f}mA'.format(self.data.loc[index, 'Iab'])
)
ax = axes[2]
if indices[0] is not None and indices[1] is not None:
residuals = np.array(data_list[1]) - np.array(data_list[0])
ax.plot(mtime, residuals, '.-', color='r')
ax.set_xlabel('time [ms]')
ax.set_ylabel('residual [mV]')
else:
ax.set_visible(False)
# set labels etc.
ax = axes[0]
ax.legend(loc='best', fontsize=6.0)
ax.set_xlabel('time [ms]')
ax.set_ylabel('m [mV/V]')
ax = axes[1]
ax.set_ylabel('normalized decay curve [-]', fontsize=7.0)
ax.set_title(r'normed on current', fontsize=6.0)
ax.legend(loc='best', fontsize=6.0)
fig.tight_layout()
if filename is not None:
fig.savefig(filename, dpi=300)
if return_fig:
return fig
else:
fig.clf()
plt.close(fig)
def to_cr(self):
"""Convert container to a complex resistivity container, using the
CPA-conversion.
Kemna, 2000
COMPLEX RESISTIVITY COPPER MlNERALlZATlONt SPECTRA OF PORPHYRY
<NAME>.; <NAME>.; <NAME>.
Geophysics (1973 Jan 1) 38 (1): 49-60.
Application of complex resistivity tomography to field data from
a kerosene-contaminated siteGold Open Access Authors: <NAME>, <NAME> and <NAME> DOI: 10.3997/2214-4609.201407300
<NAME>, <NAME>, <NAME>, and
<NAME> (2012). ”Time-domain-induced polarization:
Full-decay forward modeling and 1D laterally constrained inversion
of Cole-Cole parameters.” GEOPHYSICS, 77(3), E213-E225.
https://doi.org/10.1190/geo2011-0217.1
"""
data_new = self.data.copy()
data_new['rpha'] = -1.5 * data_new['chargeability']
# now that we have magnitude and phase, compute the impedance Zt
data_new['Zt'] = data_new['r'] * np.exp(data_new['rpha'] * 1j / 1000.0)
cr = reda.CR(data=data_new)
return cr
```
#### File: reda/eis/convert.py
```python
import numpy as np
"""
Convert between different representations for complex resistivity spectra
Basically we always have two parameters for each frequency. These two
parameters can be representated in various forms: conductivity/resistivity;
magnitude-phase/real-imaginary part.
Note that in this context it doesn't matter if we deal with conductivities or
conductances (same for resitivity and resistance).
"""
def split_data(data, squeeze=False):
"""
Split 1D or 2D into two parts, using the last axis
Parameters
----------
data:
squeeze : squeeze results to remove unnecessary dimensions
"""
vdata = np.atleast_2d(data)
nr_freqs = int(vdata.shape[1] / 2)
part1 = vdata[:, 0:nr_freqs]
part2 = vdata[:, nr_freqs:]
if(squeeze):
part1 = part1.squeeze()
part2 = part2.squeeze()
return part1, part2
def to_complex(mag, pha):
complex_nr = mag * np.exp(1j / 1000 * pha.astype(np.float32))
return complex_nr
def generic_magpha_to_reim(mag, pha):
"""
Generically convert magnitude and phase to real and imaginary part using
the formula :math:`mag \cdot exp(1j / 1000 * pha)`
Thus it is suitable for resistivities, multiply conductivity phases with -1
"""
complex_nr = to_complex(mag, pha)
real_part = np.real(complex_nr)
imag_part = np.imag(complex_nr)
return real_part, imag_part
####################
# # from converter ##
####################
def from_ccomplex(data):
cre = np.real(data)
cim = np.imag(data)
return cre, cim
def from_rcomplex(data):
# rre = np.real(data)
Y = 1.0 / data
cre = np.real(Y)
cim = np.imag(Y)
return cre, cim
def from_cre_cim(data):
cre, cim = split_data(data)
return cre, cim
def from_cre_cmim(data):
cre, cmim = split_data(data)
return cre, -cmim
def from_cmag_cpha(data):
cmag, cpha = split_data(data)
cre, cim = generic_magpha_to_reim(cmag, cpha)
return cre, cim
def from_log10rmag_rpha(data):
rlog10mag, rpha = split_data(data)
Z = to_complex(10 ** rlog10mag, rpha)
Y = 1 / Z
real_part = np.real(Y)
imag_part = np.imag(Y)
return real_part, imag_part
def from_lnrmag_rpha(data):
rlnmag, rpha = split_data(data)
Z = to_complex(np.exp(rlnmag), rpha)
Y = 1 / Z
real_part = np.real(Y)
imag_part = np.imag(Y)
return real_part, imag_part
def from_rmag_rpha(data):
rmag, rpha = split_data(data)
Z = to_complex(rmag, rpha)
Y = 1 / Z
real_part = np.real(Y)
imag_part = np.imag(Y)
return real_part, imag_part
def from_rre_rmim(data):
rre, rmim = split_data(data)
Z = rre - 1j * rmim
Y = 1 / Z
real_part = np.real(Y)
imag_part = np.imag(Y)
return real_part, imag_part
def from_rre_rim(data):
rre, rim = split_data(data)
Z = rre + 1j * rim
Y = 1 / Z
real_part = np.real(Y)
imag_part = np.imag(Y)
return real_part, imag_part
##################
# # to converter ##
##################
# converts from conductiviy re/im to various formats
def to_cre_cim(cre, cim):
data = np.hstack((cre, cim))
return data
def to_cre_cmim(cre, cim):
cmim = -np.array(cim)
data = np.hstack((cre, cmim))
return data
def to_cmag_cpha(cre, cim):
Y = cre + 1j * cim
cmag = np.abs(Y)
cpha = np.arctan2(cim, cre) * 1000
return np.hstack((cmag, cpha))
def to_rre_rim(cre, cim):
Y = cre + 1j * cim
Z = 1 / Y
real_p = np.real(Z)
imag_p = np.imag(Z)
return np.hstack((real_p, imag_p))
def to_rre_rmim(cre, cim):
Y = cre + 1j * cim
Z = 1 / Y
real_p = np.real(Z)
mimag_p = -np.imag(Z)
return np.hstack((real_p, mimag_p))
def to_rmag_rpha(cre, cim):
Y = cre + 1j * cim
Z = 1 / Y
real_p = np.real(Z)
imag_p = np.imag(Z)
mag = np.abs(Z)
pha = np.arctan2(imag_p, real_p) * 1000
return np.hstack((mag, pha))
def to_log10rmag_rpha(cre, cim):
rmag_rpha = to_rmag_rpha(cre, cim)
mag_slice = slice(0, int(rmag_rpha.shape[1] / 2))
log10rmag_rpha = rmag_rpha
log10rmag_rpha[:, mag_slice] = np.log10(rmag_rpha[:, mag_slice])
return log10rmag_rpha
def to_lnrmag_rpha(cre, cim):
rmag_rpha = to_rmag_rpha(cre, cim)
mag_slice = slice(0, int(rmag_rpha.shape[1] / 2))
lnrmag_rpha = rmag_rpha
lnrmag_rpha[:, mag_slice] = np.log(rmag_rpha[:, mag_slice])
return lnrmag_rpha
def to_ccomplex(cre, cim):
return cre + 1j * cim
def to_rcomplex(cre, cim):
Y = cre + 1j * cim
Z = 1.0 / Y
return Z
# store the converter functions in dicts
from_converters = {
'lnrmag_rpha': from_lnrmag_rpha,
'log10rmag_rpha': from_log10rmag_rpha,
'rmag_rpha': from_rmag_rpha,
'rre_rim': from_rre_rim,
'rre_rmim': from_rre_rmim,
'cmag_cpha': from_cmag_cpha,
'cre_cim': from_cre_cim,
'cre_cmim': from_cre_cmim,
'ccomplex': from_ccomplex,
'rcomplex': from_rcomplex,
}
to_converters = {
'lnrmag_rpha': to_lnrmag_rpha,
'log10rmag_rpha': to_log10rmag_rpha,
'rmag_rpha': to_rmag_rpha,
'rre_rim': to_rre_rim,
'rre_rmim': to_rre_rmim,
'cmag_cpha': to_cmag_cpha,
'cre_cim': to_cre_cim,
'cre_cmim': to_cre_cmim,
'ccomplex': to_ccomplex,
'rcomplex': to_rcomplex,
}
def convert(input_format, output_format, data, one_spectrum=False):
"""
Convert from the given format to the requested format
Parameters
----------
input_format : format of input data (parameter 'data')
output_format : format of output data
data : numpy array containing data in specified input format
one_spectrum : True|False, the input data comprises one spectrum. This
allows for an additional format of the data array.
Possible input/output formats:
------------------------------
'lnrmag_rpha'
'log10rmag_rpha'
'rmag_rpha'
'rre_rim'
'rre_rmim'
'cmag_cpha'
'cre_cim'
'cre_cmim'
'ccomplex'
'rcomplex'
Array format
------------
data is either 1D or 2D. A 1D array correspond to one spectrum, with double
the size of the frequencies (which are not needed for the conversion).
Thus, the first halt either comprises a magnitude data, and the second one
phase data, or the parts comprise real and imaginary parts.
For the 2D case there exist two possibilities:
First, if one_spectrum is False, then the first axis denotes the spectrum
number, and each spectrum is stored on the second axis as described for the
1D case.
Second, if one_spectrum is True, and the first axis has the size two, then
the axis denotes either magnitude (index 0) and phase (index 1), or real
(index 0) and imaginary (index 1) parts. The second axis has the same size
as there are frequencies.
Internally we always convert to real part and imaginary part of
conductivity, and then convert back to the output format.
Return values are of the same dimensions as input variables.
"""
if input_format == output_format:
return data
if input_format not in from_converters:
raise KeyError('Input format {0} not known!'.format(input_format))
if output_format not in to_converters:
raise KeyError('Output format {0} not known!'.format(output_format))
# internally we always work with the second axis of double the frequency
# size
if len(data.shape) == 2 and data.shape[0] == 2 and one_spectrum:
work_data = np.hstack((data[0, :], data[1, :]))
one_spec_2d = True
else:
work_data = data
one_spec_2d = False
cre, cim = from_converters[input_format](work_data)
converted_data = to_converters[output_format](cre, cim)
if one_spec_2d:
part1, part2 = split_data(converted_data, True)
converted_data = np.vstack((part1, part2))
# reshape to input size (this should only be necessary for 1D data)
if len(data.shape) == 1:
converted_data = np.squeeze(converted_data)
return converted_data
```
#### File: importers/utils/transforms.py
```python
class transform_electrodes_roll_along(object):
"""This function shifts all electrode numbers by a fixed offset, as
commonly encountered for roll-a-long measurement schemes.
"""
def __init__(self, shiftby=0):
"""
Parameters
----------
shiftby : int
Shift electrode numbers (abmn) by this offset.
"""
self.shiftby = shiftby
def transform(self, data, electrodes, topography):
data[['a', 'b', 'm', 'n']] += self.shiftby
return data, electrodes, topography
```
#### File: reda/plotters/time_series.py
```python
import numpy as np
import pandas as pd
import reda.utils.mpl
plt, mpl = reda.utils.mpl.setup()
def plot_quadpole_evolution(dataobj, quadpole, cols, threshold=5,
rolling=False, ax=None):
"""Visualize time-lapse evolution of a single quadropole.
Parameters
----------
dataobj : :py:class:`pandas.DataFrame`
DataFrame containing the data. Please refer to the documentation for
required columns.
quadpole : list of integers
Electrode numbers of the the quadropole.
cols : str
The column/parameter to plot over time.
threshold : float
Allowed percentage deviation from the rolling standard deviation.
rolling : bool
Calculate rolling median values (the default is False).
ax : mpl.axes
Optional axes object to plot to.
"""
if isinstance(dataobj, pd.DataFrame):
df = dataobj
else:
df = dataobj.data
subquery = df.query(
'a == {0} and b == {1} and m == {2} and n == {3}'.format(*quadpole))
# rhoa = subquery['rho_a'].values
# rhoa[30] = 300
# subquery['rho_a'] = rhoa
if ax is not None:
fig = ax.get_figure()
else:
fig, ax = plt.subplots(1, 1, figsize=(20 / 2.54, 7 / 2.54))
ax.plot(
subquery['timestep'],
subquery[cols],
'.',
color='blue',
label='valid data',
)
if rolling:
# rolling mean
rolling_m = subquery.rolling(3, center=True, min_periods=1).median()
ax.plot(
rolling_m['timestep'].values,
rolling_m['rho_a'].values,
'-',
label='rolling median',
)
ax.fill_between(
rolling_m['timestep'].values,
rolling_m['rho_a'].values * (1 - threshold),
rolling_m['rho_a'].values * (1 + threshold),
alpha=0.4,
color='blue',
label='{0}\% confidence region'.format(threshold * 100),
)
# find all values that deviate by more than X percent from the
# rolling_m
bad_values = (np.abs(
np.abs(subquery['rho_a'].values - rolling_m['rho_a'].values) /
rolling_m['rho_a'].values) > threshold)
bad = subquery.loc[bad_values]
ax.plot(
bad['timestep'].values,
bad['rho_a'].values,
'.',
# s=15,
color='r',
label='discarded data',
)
ax.legend(loc='upper center', fontsize=6)
# ax.set_xlim(10, 20)
ax.set_ylabel(r'$\rho_a$ [$\Omega$m]')
ax.set_xlabel('timestep')
return fig, ax
```
#### File: reda/utils/data.py
```python
import os
from urllib import request
import zipfile
import pandas as pd
# if this is set to a valid directory path, try to fetch data from here
# The path could be a local copy of the data repository
use_local_data_repository = None
repository_url = ''.join((
'https://raw.githubusercontent.com/geophysics-ubonn/'
'reda_examples_mw/master/'
))
inventory_filename = 'inventory.dat'
def download_data(identifier, outdir):
"""Download data from a separate data repository for testing.
Parameters
----------
identifier: string
The identifier used to find the data set
outdir: string
unzip the data in this directory
"""
# determine target
if use_local_data_repository is not None:
url_base = 'file:' + request.pathname2url(
use_local_data_repository + os.sep)
else:
url_base = repository_url
print('url_base: {}'.format(url_base))
url = url_base + inventory_filename
# download inventory file
filename, headers =request.urlretrieve(url)
df = pd.read_csv(
filename,
delim_whitespace=True,
comment='#',
header=None,
names=['identifier', 'rel_path'],
)
# find relative path to data file
rel_path_query = df.query('identifier == "{}"'.format(identifier))
if rel_path_query.shape[0] == 0:
raise Exception('identifier not found')
rel_path = rel_path_query['rel_path'].values[0]
# download the file
url = url_base + rel_path
print('data url: {}'.format(url))
filename, headers =request.urlretrieve(url)
if not os.path.isdir(outdir):
os.makedirs(outdir)
zip_obj = zipfile.ZipFile(filename)
zip_obj.extractall(outdir)
```
#### File: reda/utils/enter_directory.py
```python
import os
class EnterDirectory(object):
"""This is a context manager that enters a given directory and returns to
the initial current directory after finishing
"""
def __init__(self, directory):
self.pwd = os.getcwd()
self.directory = os.path.abspath(str(directory))
def __enter__(self):
os.chdir(self.directory)
def __exit__(self, dtype, value, traceback):
os.chdir(self.pwd)
class CreateEnterDirectory(EnterDirectory):
"""This is a context manager that enters a given directory and returns to
the initial current directory after finishing. If the target directory does
not exist, create it.
"""
def __enter__(self):
os.makedirs(self.directory, exist_ok=True)
os.chdir(self.directory)
```
|
{
"source": "j-gallistl/reda_testing",
"score": 2
}
|
#### File: IRIS_SyscalPro/02_bin_txt/test_syscal_02.py
```python
import os
import reda
basepath = os.path.dirname(__file__) + os.sep
def test_loading_normal():
ert = reda.ERT()
ert.import_syscal_bin(basepath + 'data_normal.bin')
ert_txt = reda.ERT()
ert_txt.import_syscal_txt(basepath + 'data_normal.txt')
def test_loading_reciprocal():
ert = reda.ERT()
ert.import_syscal_bin(
basepath + 'data_reciprocal.bin',
reciprocals=48,
)
ert_txt = reda.ERT()
ert_txt.import_syscal_txt(
basepath + 'data_reciprocal.txt',
reciprocals=48,
)
```
|
{
"source": "jgallud/patronesPython",
"score": 3
}
|
#### File: jgallud/patronesPython/laberintoGUI.py
```python
import os
from Tkinter import *
import Tkinter,Tkconstants,tkFileDialog
from laberintoBuilder import *
class App:
def __init__(self, master):
#wx.Frame.__init__(self, parent, title="LaberintoGUI", size=(800,600))
#self.frame=Frame(master)
#self.window.geometry("800x600")
#self.frame.pack()
self.origen=Point(350,14)
self.ancho=None
self.alto=None
self.puedeDibujar=False
self.dc=None
#panel = wx.Panel(self)
#self.quote = wx.StaticText(panel, label="Archivo: ", pos=(20, 30))
self.lbl1=Label(master,text="Archivo: ")
self.lbl1.pack()
#self.lbl1.grid(column=0,row=1)
self.lbl2 = Label(master, text="")
self.lbl2.pack()
#self.lbl2.grid(column=1, row=1)
self.btn1=Button(master,text="Seleccionar y procesar",command=self.OnClick)
self.btn1.pack()
#self.btn1.grid(column=0,row=2)
self.dc=Canvas(master,width=900,height=700)
#self.dc.grid(column=0,row=0)
self.dc.pack()
#self.archivo = wx.TextCtrl(panel, value="Localiza el archivo", pos=(20,50),size=(150,20),style=wx.TE_READONLY)
#self.button =wx.Button(panel, label="Seleccionar y procesar", pos=(20, 80))
#self.bichos = wx.TextCtrl(panel, value="", pos=(20,150),size=(150,20),style=wx.TE_READONLY)
#self.Bind(wx.EVT_BUTTON, self.OnClick,self.button)
#self.Bind(wx.EVT_PAINT,self.on_paint)
#self.Show()
#self.button=Button(self.frame,text="Seleccionar y procesar",command=self.OnClick)
#self.button.pack()
def on_paint(self,event=None):
#self.dc=wx.PaintDC(self)
#self.dc.Clear()
#self.dc.SetPen(wx.Pen("BLACK",3))
#self.dc.DrawLine(300,0,350,50)
if (self.puedeDibujar):
self.dibujarLaberinto()
def OnClick(self):
self.dirname = ""
# dlg = wx.FileDialog(self, "Selecciona un archivo", self.dirname, "", "*.json", wx.FD_OPEN)
# if dlg.ShowModal() == wx.ID_OK:
# self.filename = dlg.GetFilename()
# self.dirname = dlg.GetDirectory()
# #f = open(os.path.join(self.dirname, self.filename), 'r')
# #self.control.SetValue(f.read())
# self.archivo.SetValue(self.filename)
# #f.close()
# director=Director()
# director.procesar(os.path.join(self.dirname, self.filename))
# self.juego=director.builder.juego
# self.bichos.SetValue(str(len(self.juego.bichos)))
# self.mostrarLaberinto()
# dlg.Destroy()
self.filename=tkFileDialog.askopenfilename(initialdir="./",title="Selecciona archivo",filetypes=[("JSON","*.json")])
if (self.filename!=""):
print(self.filename)
self.lbl2.configure(text=self.filename)
director=Director()
director.procesar(self.filename)
self.juego=director.builder.juego
#self.bichos.SetValue(str(len(self.juego.bichos)))
self.mostrarLaberinto()
def mostrarLaberinto(self):
self.calcularPosiciones()
self.normalizar()
self.calcularDimensiones()
self.asignarPuntosReales()
self.puedeDibujar=True
#self.Refresh()
#self.Update()
self.dibujarLaberinto()
def calcularPosiciones(self):
if (self.juego!=None):
h=self.juego.obtenerHabitacion(1)
h.punto=Point(0,0)
h.calcularPosicion()
def normalizar(self):
minX=0
minY=0
for h in self.juego.laberinto.hijos:
if (h.punto.x<minX):
minX=h.punto.x
if (h.punto.y<minY):
minY=h.punto.y
for h in self.juego.laberinto.hijos:
punto=h.punto
h.punto.x=punto.x+abs(minX)
h.punto.y=punto.y+abs(minY)
def calcularDimensiones(self):
maxX=0
maxY=0
self.origen=Point(5,5)
for h in self.juego.laberinto.hijos:
if (h.punto.x>maxX):
maxX=h.punto.x
if (h.punto.y>maxY):
maxY=h.punto.y
maxX=maxX+1
maxY=maxY+1
self.ancho=850/maxX
self.alto=650/maxY
def asignarPuntosReales(self):
for h in self.juego.laberinto.hijos:
x=self.origen.x+(h.punto.x * self.ancho)
y=self.origen.y+(h.punto.y * self.alto)
h.punto=Point(x,y)
h.extent=Point(self.ancho,self.alto)
for ho in h.hijos:
ho.asignarPuntosReales(h)
def dibujarLaberinto(self):
for h in self.juego.laberinto.hijos:
h.dibujar(self)
def dibujarContenedorRectangular(self,unCont):
x1=unCont.punto.x
y1=unCont.punto.y
x2=unCont.extent.x
y2=unCont.extent.y
#print(x1,y1,x2,y2)
self.dc.create_rectangle(x1,y1,x1+x2,y1+y2)
root=Tk()
app = App(root)
#frame = MainWindow(None, "Sample editor")
root.title("LaberintoGUI")
root.geometry("1000x800")
root.minsize(width=1000,height=800)
root.mainloop()
#root.destroy()
```
|
{
"source": "jgamblin/btrecon",
"score": 3
}
|
#### File: jgamblin/btrecon/btrecon.py
```python
import os
import re
import time
import sys
import subprocess
import readline
def color(text, color_code):
if sys.platform == "win32" and os.getenv("TERM") != "xterm":
return text
return '\x1b[%dm%s\x1b[0m' % (color_code, text)
def red(text):
return color(text, 31)
def blink(text):
return color(text, 5)
def green(text):
return color(text, 32)
def blue(text):
return color(text, 34)
#clean up old files
os.system("rm -rf devices.txt")
os.system("rm -rf btaddresses.txt")
print "\n"
print "Finding Bluetooth Devices..."
os.system("hcitool -i hci0 scan > devices.txt")
print "Found The Following Devices:"
os.system("cat devices.txt | grep -i '[0-9A-F]\{2\}\(:[0-9A-F]\{2\}\)\{5\}'")
os.system("cat devices.txt | grep -o '[0-9A-F]\{2\}\(:[0-9A-F]\{2\}\)\{5\}' > btaddresses.txt")
b = open('btaddresses.txt')
macs = b.readlines()
b.close()
print "\n"
print "Starting Information Gathering"
print "\n"
for mac in macs:
print (green("Information about %s" % mac))
subprocess.call("hcitool name %s" % mac, shell=True)
print "\n"
subprocess.call("hcitool info %s" % mac, shell=True)
print "\n"
subprocess.call("sdptool records %s" % mac, shell=True)
print "\n"
subprocess.call("sdptool browse %s" % mac, shell=True)
print "\n"
print "\n"
```
|
{
"source": "jgamblin/gitupdates",
"score": 3
}
|
#### File: jgamblin/gitupdates/gitupdates.py
```python
import os
import re
import time
import sys
import fileinput
import subprocess
import commands
def color(text, color_code):
if sys.platform == "win32" and os.getenv("TERM") != "xterm":
return text
return '\x1b[%dm%s\x1b[0m' % (color_code, text)
def red(text):
return color(text, 31)
def blue(text):
return color(text, 34)
searchspace = os.path.expanduser("~")
gitlists = []
outofdategits =[]
gitlists=list(set(gitlists))
gits=gitlists
for dirname, dirnames, filenames in os.walk(searchspace):
for subdirname in dirnames:
if '.git' in dirnames:
d = (os.path.join(dirname))
gitlists.append(d)
for git in gits:
os.chdir(git)
command = ("git status -uno | grep -ci 'Your branch is behind'")
ood = commands.getoutput(command)
if ood is '1':
dd = git
outofdategits.append(dd)
if not outofdategits:
print'All Repos are up-to-date!'
sys.exit()
if outofdategit:
print 'The following repos are out of date:'
print "\n".join(outofdategits)
for outofdategit in outofdategits:
resp = raw_input('\nDo You Want To Update The Repos? (Y/N): ')
if resp.lower() in ["yes", "y"]:
print('\nOk, Lets do this!')
os.chdir(git)
print ('Updating Repo' + outofdategit)
updategit = ("git pull'")
print commands.getoutput(updategit)
print'\n'
else:
print('NO? Ok Have Fun!')
```
|
{
"source": "jgamblin/randommac",
"score": 3
}
|
#### File: jgamblin/randommac/randommac.py
```python
import os
import random
import fcntl
import socket
import struct
import subprocess
import uuid
from itertools import imap
from random import randint
import time
def randomMAC():
rmac = ':'.join(['%02x'%x for x in imap(lambda x:randint(0,255), range(6))])
return rmac
def hwMAC():
hwaddr = subprocess.Popen("networksetup -getmacaddress en0 | awk '{print $3}'", shell=True, stdout=subprocess.PIPE).stdout.read()
hwaddr2 = hwaddr[0:17]
return hwaddr2
def currentMAC():
cmac = ':'.join(['{:02x}'.format((uuid.getnode() >> i) & 0xff) for i in range(0,8*6,8)][::-1])
return cmac
#Store currentMAC as oldMAC
oldMAC = ':'.join(['{:02x}'.format((uuid.getnode() >> i) & 0xff) for i in range(0,8*6,8)][::-1])
rMAC = randomMAC()
#Say what I am doing
print "\n"
print "Changing your MAC address to %s from %s." % (rMAC,oldMAC)
print "This will take 30 seconds."
#Change Mac Address
print "Changing Mac Address to %s" %rMAC
os.system("ifconfig en0 ether %s" %rMAC)
time.sleep(10)
#Turn Off Wireless
print "Turning Off Wireless"
os.system("ifconfig en0 down")
time.sleep(10)
#Turn On Wireless
print "Turning On Wireless"
os.system("ifconfig en0 up")
time.sleep(10)
#Print New Mac
nmac = subprocess.Popen("ifconfig en0 | grep -o -E '([[:xdigit:]]{1,2}:){5}[[:xdigit:]]{1,2}'", shell=True, stdout=subprocess.PIPE).stdout.read()
print "Your new MAC address is %s" %nmac
```
|
{
"source": "jgamer42/final_compugrafica",
"score": 3
}
|
#### File: final_compugrafica/menu/menu.py
```python
import os
import sys
import pygame as pg
from motor.constantes import *
from motor.utilidades import *
sys.path.append(os.getcwd() + "/motor/")
pygame.init()
class Icono(pg.sprite.Sprite):
def __init__(self,pos,icon):
pg.sprite.Sprite.__init__(self)
self.image = icon
self.rect = self.image.get_rect()
self.rect.x = pos[0]
self.rect.y = pos[1]
self.click = False
#pg.mixer.init()
fondo = pg.image.load("./menu/Inicio.png")
iconos = pg.sprite.Group()
IconoJugar = pg.image.load("./menu/IconoJugar.png")
jugar = Icono([917,297],IconoJugar)
iconos.add(jugar)
IconoOpciones = pg.image.load("./menu/IconoOpciones.png")
opciones = Icono([917,389],IconoOpciones)
iconos.add(opciones)
IconoCreditos = pg.image.load("./menu/IconoCreditos.png")
creditos = Icono([917,482],IconoCreditos)
iconos.add(creditos)
IconoSalir = pg.image.load("./menu/IconoSalir.png")
salir = Icono([917,575],IconoSalir)
iconos.add(salir)
FondContr = pg.image.load("./menu/controles.png")
FondCreditos = pg.image.load("./menu/creditos.png")
IconoAceptar = pg.image.load("./menu/IconoAceptar.png")
aceptar = Icono([992,602], IconoAceptar)
iconos.add(aceptar)
IconoSonidoON = pg.image.load("./menu/iconoSonidoON.png")
IconoSonidoOFF = pg.image.load("./menu/iconoSonidoOFF.png")
sonido = Icono([506,559], IconoSonidoOFF)
iconos.add(sonido)
def inicio(ventana,estados,mouse,click,sonidos):
if estados["opciones"]:
ventana.fill(NEGRO)
if sonidos.getMudo():
ventana.blit(FondContr,[0,0])
ventana.blit(IconoSonidoOFF,[506,559])
else:
ventana.blit(FondContr,[0,0])
if aceptar.rect.collidepoint(mouse):
if click[0] == 1:
sonidos.click()
ventana.blit(IconoAceptar,[992,602])
estados["opciones"] = False
else:
ventana.blit(IconoAceptar,[992,602])
elif sonido.rect.collidepoint(mouse):
if sonidos.getMudo():
if click[0] == 1:
#ventana.blit(IconoSonidoON,[506,559])
sonidos.click()
sonidos.mudo()
else:
ventana.blit(IconoSonidoON,[506,559])
else:
if click[0] == 1:
#ventana.blit(IconoSonidoOFF,[506,559])
sonidos.click()
sonidos.mudo()
else:
ventana.blit(IconoSonidoOFF,[506,559])
elif estados["creditos"]:
ventana.fill(NEGRO)
ventana.blit(FondCreditos,[0,0])
if aceptar.rect.collidepoint(mouse):
ventana.blit(IconoAceptar,[992,602])
if click[0] == 1:
sonidos.click()
estados["creditos"] = False
else:
ventana.fill(NEGRO)
ventana.blit(fondo,[0,0])
if jugar.rect.collidepoint(mouse):
ventana.blit(IconoJugar,[917,297])
if click[0] == 1:
sonidos.click()
estados["inicio"] = False
estados["historia"] = True
estados["dialogo1"] = True
elif opciones.rect.collidepoint(mouse):
ventana.blit(IconoOpciones,[917,389])
if click[0] == 1:
sonidos.click()
estados["opciones"] = True
elif creditos.rect.collidepoint(mouse):
ventana.blit(IconoCreditos,[917,482])
if click[0] == 1:
sonidos.click()
estados["creditos"] = True
elif salir.rect.collidepoint(mouse):
ventana.blit(IconoSalir,[917,575])
if click[0] == 1:
sonidos.click()
estados["inicio"] = False
estados["jugando"] = False
pg.display.flip()
'''
ventana.blit(history, [20,38])
ventana.blit(history2, [645,38])
ventana.blit(history3, [20,38])
'''
```
#### File: final_compugrafica/motor/ambiente.py
```python
import os
import sys
import globales
import pygame
from constantes import *
sys.path.append(os.getcwd() + "/motor/")
fondos_mapas = {"mapaA1Fondo":pygame.image.load("mapa/mapaA1Fondo.png")}
balas_enemigos = None
def ciclo_juego(ventana,elementos_dibujar,gui):
global fondos_mapas
global balas_enemigos
ventana.fill(NEGRO)
ventana.blit(fondos_mapas["mapaA1Fondo"],velocidad_fondo())
#print(balas_enemigos)
#print(len(balas_enemigos))
for grupo_sprites in elementos_dibujar:
grupo_sprites.update()
grupo_sprites.draw(ventana)
gui.update()
pygame.display.flip()
def velocidad_fondo():
globales.x_fondo += globales.velx_entorno
globales.y_fondo += globales.vely_entorno
return [globales.x_fondo,globales.y_fondo]
def gravedad(objeto):
if objeto.vely == 0:
objeto.vely = GRAVEDAD
else:
objeto.vely += GRAVEDAD
def agregar_bala(objeto,fuente):
global balas_enemigos
if fuente == "jugador":
pass
elif fuente == "enemigo":
balas_enemigos = objeto
#balas_enemigos.add(objeto)
#print(balas_enemigos)
#balas_enemigos.append("hola")
print(balas_enemigos)
def protector_memoria(elementos):
for elemento in elementos:
for e in elemento:
if(e.rect.bottom <= 0) or (e.rect.top > ALTO):
elemento.remove(e)
if(e.rect.x <= 0) or (e.rect.x > ANCHO):
elemento.remove(e)
```
#### File: final_compugrafica/motor/utilidades.py
```python
import os
import sys
import pygame
sys.path.append(os.getcwd() + "/motor/")
from constantes import *
def crear_sprite(dir_sabana, dimensiones, columnas, filas=1, opcion=None):
"""
Funcion para recorte de sprites
sabana: imagen con los graficos
dimensiones: [ancho,alto] del recorte
columnas: cantidad de columnas que tiene la sabana
filas: filas de la sabana (default 1)
opcion: optiene una matriz con el parametro "matriz" (default None)
"""
sabana = pygame.image.load(dir_sabana)
ancho_cuadros = dimensiones[0]
alto_cuadros = dimensiones[1]
animacion = []
# Recorta como fila
if filas == 1:
for cuadro in range(columnas):
cuadro = sabana.subsurface(ancho_cuadros * cuadro, 0, ancho_cuadros, alto_cuadros)
animacion.append(cuadro)
# recorta como matricez
elif filas > 1 and opcion == "matriz":
for f in range(filas):
fila = []
for cuadro in range(columnas):
cuadro = sabana.subsurface(ancho_cuadros * cuadro,alto_cuadros * f,ancho_cuadros,alto_cuadros)
fila.append(cuadro)
animacion.append(fila)
# recorta una sabana matriz como una fila
elif filas > 1 and opcion == None:
for fila in range(filas):
for cuadro in range(columnas):
cuadro = sabana.subsurface(ancho_cuadros * cuadro,alto_cuadros * fila,ancho_cuadros,alto_cuadros)
animacion.append(cuadro)
return animacion
def animar(frame_actual, numero_frames,direccion):
if direccion == 0:
return frame_actual
elif direccion == 1:
if frame_actual < (numero_frames - 1):
frame_actual += 1
else:
frame_actual = 0
return frame_actual
elif direccion == -1:
if frame_actual > 0:
frame_actual -= 1
else:
frame_actual = (numero_frames - 1)
return frame_actual
```
|
{
"source": "jgamper/biobertology",
"score": 2
}
|
#### File: biobertology/biobertology/download.py
```python
from docopt import docopt
import os
from os.path import expanduser
import wget
import shutil
LINK = "https://www.dropbox.com/s/dc2ki2d4jv8isrb/biobert_weights.zip?dl=1"
def get_default_biobert_path():
"""
:return: path "/home/user_name/.biobertology
"""
home_dir_path = expanduser("~")
return os.path.join(home_dir_path, ".biobertology")
def download_and_extract(target_directory):
"""
Downloads and extracts biobert weights
:return:
"""
# Make sure that directory for pannuke exists
os.makedirs(target_directory, exist_ok=True)
wget.download(LINK, out=target_directory)
downloaded_file_path = os.path.join(target_directory, "biobert_weights.zip")
shutil.unpack_archive(downloaded_file_path, target_directory)
os.remove(downloaded_file_path)
def parse_arguments(arguments):
user_directory = arguments["--dir"]
target_directory = user_directory if user_directory else get_default_biobert_path()
return target_directory
if __name__ == "__main__":
arguments = docopt(__doc__)
target_directory = parse_arguments(arguments)
download_and_extract(target_directory)
```
|
{
"source": "jgamper/wsi-syntax",
"score": 2
}
|
#### File: transformers/tiling/tiling.py
```python
import warnings
from typeguard import typechecked
import numpy as np
from random import shuffle
import pandas as pd
from typing import Optional
from PIL import ImageDraw
from syntax.transformers.base import StaticTransformer
from syntax.slide import Slide
@typechecked
class SimpleTiling(StaticTransformer):
"""The summary line for a class docstring should fit on one line.
If the class has public attributes, they may be documented here
in an ``Attributes`` section and follow the same formatting as a
function's ``Args`` section. Alternatively, attributes may be documented
inline with the attribute's declaration (see __init__ method below).
Properties created with the ``@property`` decorator should be documented
in the property's getter method.
Attributes:
attr1 (str): Description of `attr1`.
attr2 (:obj:`int`, optional): Description of `attr2`.
"""
def __init__(self,
magnification: int,
tile_size: int,
max_per_class: int,
annotation_threshold: Optional[float] = None):
"""
Args:
magnification:
tile_size:
"""
self.magnification = magnification
self.tile_size = tile_size
self.max_per_class = max_per_class
self.anno_threshold = annotation_threshold
self.rejected = 0
def transform(self, slide: Slide, target=None):
"""
TODO!
Args:
slide:
target:
Returns:
"""
# Check if slide already has patch-frame if it does, then just return it as it is
if self._check_patch_frame(slide):
warnings.warn("{} slide already has tile_frame, yet has been passed to PatchExtractor".format(self.slide.ID))
return slide
# Check if has tissue mask
self._check_tissue_mask(slide)
# Check if has annotation mask
self._check_annotation(slide)
self.slide = slide
# Get classes and approximate coordinates to 'seed' the patch sampling process.
self._get_classes_and_seeds()
slide.tile_frame = self._sample_patches(self.slide.verbose)
return slide
@staticmethod
def visualize(slide: Slide, size: int):
"""
Args:
slide:
size:
Returns:
"""
wsi_thumb = slide.get_thumbnail(size=(size, size))
xy = list(zip(slide.tile_frame.w, slide.tile_frame.h))
for w, h in xy:
w = int(w / pow(2, slide.level_count + 2)) + 1
h = int(h / pow(2, slide.level_count + 2)) + 1
mask = wsi_thumb.copy()
mask_draw = ImageDraw.Draw(mask)
mask_draw.rectangle(((w, h), (w + 20, h + 20)), fill=255)
wsi_thumb.paste(mask)
return wsi_thumb
def _sample_patches(self, verbose=False):
"""Sample tile and return in a tile_frame"""
frame = pd.DataFrame(data=None, columns=['tile_id', 'w', 'h', 'class', 'mag', 'size', 'parent', 'lvl0'])
for c in self.class_list:
index = self.class_list.index(c)
seeds = self.class_seeds[index]
count = 0
for j, seed in enumerate(seeds):
_, info = self._class_c_patch_i(c, j)
if info is not None:
frame = frame.append(info, ignore_index=1)
if isinstance(self.max_per_class, int):
# If not rejected increment count
if info is not None:
count += 1
if count >= (self.max_per_class - 1):
break
if verbose:
print('Rejected {} patches for file {}'.format(self.rejected, self.slide.ID))
return frame
def _get_classes_and_seeds(self):
"""Get classes and approximate coordinates to 'seed' the patch sampling process.
Builds the objects self.class_list and self.class_seeds."""
# Do class 0 i.e. unannotated first.
mask = self.tissue_mask.data
nonzero = np.nonzero(mask)
factor = self.slide.level_downsamples[self.tissue_mask.level]
N = nonzero[0].size
coordinates = [(int(nonzero[0][i] * factor), int(nonzero[1][i] * factor)) for i in range(N)]
shuffle(coordinates)
self.class_list = [0]
self.class_seeds = [coordinates]
# If no annotation we're done.
if self.annotation is None:
return
# Now add other classes.
annotation_low_res, factor = self.annotation.get_low_res_numpy(self.xml_reader)
classes = sorted(list(np.unique(annotation_low_res)))
assert classes[0] == 0
classes = classes[1:]
for c in classes:
mask = (annotation_low_res == c)
nonzero = np.nonzero(mask)
N = nonzero[0].size
coordinates = [(int(nonzero[0][i] * factor), int(nonzero[1][i] * factor)) for i in range(N)]
shuffle(coordinates)
self.class_list.append(c)
self.class_seeds.append(coordinates)
def _class_c_patch_i(self, c, i):
"""
Try and get the ith patch of class c. If we reject return (None, None).
:param c: class
:param i: index
:return: (patch, info_dict) or (None, None) if we reject patch.
"""
idx = self.class_list.index(c)
h, w = self.class_seeds[idx][i]
patch = self.slide.get_tile(w, h, self.magnification, self.tile_size)
tissue_mask_patch = self.tissue_mask.get_tile(w, h, self.magnification, self.tile_size)
if np.sum(tissue_mask_patch) / np.prod(tissue_mask_patch.shape) < 0.9:
self.rejected += 1
return None, None
info = {
'tile_id': i,
'w': w,
'h': h,
'parent': self.slide.ID,
'size': self.tile_size,
'mag': self.magnification,
'class': c,
'lvl0': self.slide.level0
}
# If no annotation we're done.
if self.annotation is None:
return patch, info
annotation_patch = self.annotation.get_tile(w, h, self.magnification, self.tile_size)
annotation_patch = np.asarray(annotation_patch)
print(annotation_patch.shape)
pixel_pattern = self.xml_reader.label_to_pixel(c)
mask = (annotation_patch == pixel_pattern)
if np.sum(mask) / np.prod(mask.shape) < self.anno_threshold:
self.rejected += 1
return None, None
return patch, info
def _check_patch_frame(self, slide: Slide):
"""Check if slide already has patch frame"""
return hasattr(slide, "tile_frame")
def _check_tissue_mask(self, slide: Slide):
"""Checks if slide has tissue mask, if not raises an issue"""
assert hasattr(slide, "tissue_mask"), "Slide {} does not have tissue mask!".format(slide.ID)
self.tissue_mask = slide.tissue_mask
def _check_annotation(self, slide: Slide):
"""Checks if has annotation and raises a warning"""
if not hasattr(slide, "annotation"):
self.annotation = None
warning_string = "{} slide does not have annotation mask supplied".format(slide.ID)
warnings.warn(warning_string)
else:
self.annotation = slide.annotation
```
|
{
"source": "jganhotra/gridspace-stanford-harper-valley",
"score": 2
}
|
#### File: src/models/mtl.py
```python
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from src.models.las import ListenAttendSpell
from src.models.ctc import CTCDecoder
class MultiTaskLearning(ListenAttendSpell):
def __init__(
self,
input_dim,
num_class,
label_maxlen,
listener_hidden_dim=256,
num_pyramid_layers=3,
dropout=0,
speller_hidden_dim=512,
speller_num_layers=1,
mlp_hidden_dim=128,
multi_head=1,
sos_index=0,
sample_decode=False,
):
super().__init__(
input_dim,
num_class,
label_maxlen,
listener_hidden_dim=listener_hidden_dim,
num_pyramid_layers=num_pyramid_layers,
dropout=dropout,
speller_hidden_dim=speller_hidden_dim,
speller_num_layers=speller_num_layers,
mlp_hidden_dim=mlp_hidden_dim,
multi_head=multi_head,
sos_index=sos_index,
sample_decode=sample_decode,
)
self.ctc_decoder = CTCDecoder(
listener_hidden_dim * 2,
num_class,
)
self.num_pyramid_layers = num_pyramid_layers
self.embedding_dim = listener_hidden_dim * 4
def forward(
self,
inputs,
ground_truth=None,
teacher_force_prob=0.9,
):
listener_feats, (listener_h, listener_c) = self.listener(inputs)
listener_hc = self.combine_h_and_c(listener_h, listener_c)
las_log_probs = self.speller(
listener_feats,
ground_truth=ground_truth,
teacher_force_prob=teacher_force_prob,
)
ctc_log_probs = self.ctc_decoder(listener_feats)
return ctc_log_probs, las_log_probs, listener_hc
def get_loss(
self,
ctc_log_probs,
las_log_probs,
targets,
num_labels,
input_lengths,
target_lengths,
pad_index=0,
blank_index=0,
label_smooth=0.1,
):
ctc_loss = self.ctc_decoder.get_loss(
ctc_log_probs,
targets,
# pyramid encode cuts timesteps in 1/2 each way
input_lengths // (2**self.num_pyramid_layers),
target_lengths,
blank=blank_index,
)
las_loss = super().get_loss(
las_log_probs,
targets,
num_labels,
pad_index=pad_index,
label_smooth=label_smooth,
)
return ctc_loss, las_loss
```
|
{
"source": "Jgansaown/wordle_solver",
"score": 3
}
|
#### File: wordle_solver/src/main.py
```python
import json
from pathlib import Path
from typing import Optional, List
from multiprocessing import Pool
import os
from wordle import match_guess_to_answer
from solver import Solver
def read_words_list(path: Path, sep: str = '\n'):
with open(path) as f:
return f.read().split(sep)
def compute_effectiveness(answers: List[str], test_set: List[str], ranking_algorithm: str):
solver = Solver(word_list=test_set)
with Pool(os.cpu_count()) as p:
ret = p.map(solver.solve, answers)
print(ret)
with open('output.txt', 'w+') as file:
txt = '\n'.join([json.dumps(r) for r in ret])
file.write(txt)
if __name__ == '__main__':
answers = read_words_list(Path('files', 'wordle_list_of_answers.txt'))
test_set = read_words_list(Path('files', 'scrabble_5_letter_words.txt'))
compute_effectiveness(
answers = answers[:10],
test_set = test_set,
ranking_algorithm = 'naive' #
)
# import numpy as np
# with open('output.txt') as f:
# txt = f.read()
# result = np.array([ line.split('=') for line in txt.split('\n') ])
# nums = np.array([ int(n) for n in result[:, 1] ])
# print(nums)
# print(f'Minimum guesses: {nums.min()}, Maximum guesses: {nums.max()}, Average guesses: {nums.mean()}')
```
#### File: wordle_solver/tests/test_wordle.py
```python
import unittest
from src.wordle import match_guess_to_answer as match
class TestWordleMatch(unittest.TestCase):
def test_answer_and_guess_same_length(self):
with self.assertRaises(AssertionError) as e:
match('JASON', 'A')
self.assertEqual(e.exception.args, ('guess and answer must be the same length',))
def test_guess_is_all_correct(self):
self.assertEqual(match('ABCDE', 'ABCDE'),'CCCCC')
self.assertEqual(match('AAAAA', 'AAAAA'),'CCCCC')
def test_guess_is_all_absent(self):
self.assertEqual(match('ABCDE', 'FGHIJ'),'AAAAA')
self.assertEqual(match('AAAAA', 'BBBBB'),'AAAAA')
def test_guess_have_present_letters(self):
self.assertEqual(match('BABBB', 'AAAAA'), 'ACAAA')
self.assertEqual(match('BABAB', 'AAAAA'), 'ACACA')
self.assertEqual(match('BABBB', 'ABAAA'), 'PPAAA')
self.assertEqual(match('JASON', 'ABAAA'), 'PAAAA')
self.assertEqual(match('JASON', 'AABBB'), 'ACAAA')
self.assertEqual(match('ABCDD', 'DDDDD'), 'AAACC')
self.assertEqual(match('ABCDD', 'DDDAA'), 'PPAPA')
### Using result from actual wordle to test
def test_wordle_246(self):
self.assertEqual(match('tacit', 'horse'), 'AAAAA')
self.assertEqual(match('tacit', 'plans'), 'AAPAA')
self.assertEqual(match('tacit', 'crane'), 'PAPAA')
self.assertEqual(match('tacit', 'soare'), 'AAPAA')
self.assertEqual(match('tacit', 'three'), 'CAAAA')
self.assertEqual(match('tacit', 'trace'), 'CAPPA')
self.assertEqual(match('tacit', 'tacit'), 'CCCCC')
def test_wordle_250(self):
self.assertEqual(match('bloke', 'tacit'), 'AAAAA')
self.assertEqual(match('bloke', 'doner'), 'APAPA')
self.assertEqual(match('bloke', 'bowie'), 'CPAAC')
self.assertEqual(match('bloke', 'bloke'), 'CCCCC')
```
|
{
"source": "jganseman/cassatie-be",
"score": 3
}
|
#### File: jganseman/cassatie-be/cassweb_extract_text.py
```python
from os import listdir, rename, system, makedirs, getcwd
from os.path import isfile, join, exists, getsize
from subprocess import Popen, PIPE, TimeoutExpired
import shutil
import re
import multiprocessing
#import numpy as np
#import pandas as pd
#import PyPDF2
#from PIL import Image, ImageSequence
#import pytesseract
dataroot = getcwd()
dataset = "cassweb"
PDF_DIR = join(dataroot, dataset)
filepath = PDF_DIR
pdf_list = [file for file in listdir(filepath) if ".pdf" in file] # if "2014" in file]
pdf_list = list(filter(None, pdf_list)) # remove any empty results
print(len(pdf_list), pdf_list)
### OPTION 1 : use PyPDF. Uncomment this section to activate
# # Reworked version of the earlier single-process PyPDF2 OCR routine
# # note: processing a single PDF can easily take 30 seconds
# # Now IMPROVED(tm) for parallel processing in python --> 10 times faster
# # Set directory for results of this function
# RESULT_DIR = join(dataroot, dataset, "text-pypdf2")
# if not exists(RESULT_DIR):
# print("Created directory ", RESULT_DIR)
# makedirs(RESULT_DIR)
# # First, define a function to run on a single file
# def text_from_pdf_parallel(mypdf):
# print("Extracting text from file ", mypdf)
# fullfilename = join(filepath,mypdf)
# fileObj = open(fullfilename, 'rb')
# PDFreader = PyPDF2.PdfFileReader(fileObj)
# text = ''
# for page in PDFreader.pages: # eventually constrain [0:3]
# text += '\n' + page.extractText()
# fileObj.close()
# if text == None:
# pass
# else:
# mytxt = mypdf[:-3] + 'txt'
# #mode = 'a' if exists(join(filepath,txt)) else 'w'
# mode = 'w'
# with open(join(RESULT_DIR,mytxt), mode) as f:
# f.write(text)
# # Then, do the actual processing on all files
# pool = multiprocessing.Pool(20)
# pool.map(text_from_pdf_parallel, pdf_list)
### OPTION 2 (DEFAULT) : use pdftotext command line tool (Xpdf / poppler)
# --> these results are already much better
# and: takes only about 2 seconds/file
# command line options: http://www.xpdfreader.com/pdftotext-man.html
# (-layout could be interesting)
RESULT_DIR_2 = join(dataroot, dataset, "text-xpdf")
if not exists(RESULT_DIR_2):
print("Created directory ", RESULT_DIR_2)
makedirs(RESULT_DIR_2)
# NEW: Also parallelized. Define function:
def use_xpdf_parallel(mypdf):
print("Running xpdf text convertor on ", mypdf)
mycommand = "pdftotext " + join(PDF_DIR,mypdf) + " " + join(RESULT_DIR_2, mypdf[:-3]+"txt")
system(mycommand)
# Then, do the actual processing on all files
# pdf_list was already defined
pool = multiprocessing.Pool(20)
pool.map(use_xpdf_parallel, pdf_list)
# note: uses an external process that might hang? If so, interrupt kernel and retry
### OPTION 3: pdf2txt (pdfminer.six) Uncomment this section to activate.
# note: easily takes 2 minutes per file!
# --> these results are maybe best (text order is better preserved)
# --> however, the hyphenation is not resolved, as it is in xpdf
# RESULT_DIR_3 = join(dataroot, dataset, "text-pdf2txt")
# if not exists(RESULT_DIR_3):
# print("Created directory ", RESULT_DIR_3)
# makedirs(RESULT_DIR_3)
# # also parallelized. First, define function
# def use_pdfminer_parallel(mypdf):
# print("Running pdfminer pdf2txt.py text convertor on ", mypdf)
# mycommand = "pdf2txt.py -o " + join(RESULT_DIR_3, mypdf[:-3]+"txt") + " " + join(PDF_DIR,mypdf)
# system(mycommand)
# # Then, do the actual processing on all files
# # pdf_list was already defined
# pool = multiprocessing.Pool(20)
# pool.map(use_pdfminer_parallel, pdf_list)
```
#### File: jganseman/cassatie-be/cassweb_splitarrests.py
```python
from os import listdir, path, remove, makedirs, getcwd
from os.path import join, exists
import math
import time
import unicodedata
import re
dataroot = getcwd()
dataset = "cassweb"
TXT_DIR = join(dataroot, dataset,"text-xpdf")
OUTPUT_DIR = join(dataroot, dataset,"text-xpdf-split")# to save the resulting model
if not exists(OUTPUT_DIR):
print("Created directory ", OUTPUT_DIR)
makedirs(OUTPUT_DIR)
# too many special characters in there, also because of OCR errors. Limit to plain ASCII
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn' and ord(c) < 128
)
# note: pattern must contain 1 subgroup, style (foo(bar)):
# as the number of matches is used to iterate through the document
def splitarrests(filelist, pattern, numberpattern):
for myfile in filelist:
with open(join(TXT_DIR,myfile), 'r') as f:
filecontent = f.read()
arrestssplitted = re.split(pattern, filecontent) # split file into separate arrests
for i, text in enumerate(arrestssplitted):
# print(text.replace('\n', '*')[:50]) # print output to get idea of the regex capturing groups
# if line i matches the regex completely, skip i+1 (kamer), and add i+2
if re.match(pattern, text):
currentarrest = text.strip()
currentarrest += arrestssplitted[i+2].strip().replace("\n\n", "\n")
# currentarrest = unicodeToAscii(currentarrest).replace('\x0c', '\n').replace('_', '-') # leave this to reader
# get the arrest number
arrestnumber = re.match(numberpattern, text).group(1)
newfilename = myfile.replace(".txt", "-"+arrestnumber+".txt")
print("writing arrest " + arrestnumber + " to file " + newfilename)
with open(join(OUTPUT_DIR,newfilename), 'w') as f2:
f2.write(currentarrest)
# segment and preprocess the data
# for xpdf processed files: each new arrest is marked by e.g. "N° 7 \n x°" where x=1/2/3/V (kamernr / verenigd)
### PART 1
# split the files from 2014_06 until 2015_05
# sort and only do a selection
text_list = sorted([file for file in listdir(TXT_DIR) if 'ac' in file])
print(text_list)
# they use the following pattern:
pattern = "(N[°or]\.?\s*\d{1,4}\n([123][°oe•]|[Vv]))" #"(N°\s*\d{1,3}\n[123V]°)"
numberpattern = "N[°or]\.?\s*(\d{1,4})"
# still misses a few, e.g. N° 132 from ac_2015_03.txt
splitarrests(text_list, pattern, numberpattern)
### PART 2
# split the rest of the files: 2012_01 only
# Encoded differently in pdf, hence decoded differently by xpdf?
text_list = sorted([file for file in listdir(TXT_DIR) if 'ac' in file])
text_list = text_list[0]
print(text_list)
# they use the following pattern:
pattern = "(N°\s*\d{1,4}\n(\d{1,2}.\d{1,2}.12))" # Number of arrest, with date on next line
numberpattern = "N°\s*(\d{1,4})"
splitarrests([text_list], pattern, numberpattern)
### PART 3
# split files of the form AC-2002-01 up to AC-2011-12
text_list = sorted([file for file in listdir(TXT_DIR) if 'AC' in file and 'Reg' not in file])
print(text_list)
# they use the following pattern:
pattern = "(N[°or]\.?\s*\d{1,4}\n([123][°oe•]|[Vv]))" #"(N°\s*\d{1,3}\n[123V]°)"
numberpattern = "N[°or]\.?\s*(\d{1,4})"
splitarrests(text_list, pattern, numberpattern)
### PART 4
# arrests from 2000-01 to 2001-10
# note that these do not follow naming by month
text_list = sorted([file for file in listdir(TXT_DIR) if ('2000' in file or '2001' in file) and not '11' in file])
print(text_list)
# they use the following pattern:
pattern = "(N[°or]\.?\s*\d{1,4}\n([123][°oe•]|[Vv]))" #"(N°\s*\d{1,3}\n[123V]°)"
numberpattern = "N[°or]\.?\s*(\d{1,4})"
splitarrests(text_list, pattern, numberpattern)
### END
# arrests from 1999 and before are published in 2-column format.
# while the OCR did a fairly good job in following the columns, it probably still fails too often to be useful here
# so stopping here for now
### POSTPROCESSING: delete erroneously splitted arrests (still 2 arrests in the same file)
# There are never more than 1000 arrests per year
maxArrestsPerYear = 1000
beginyear=2000
endyear=2015
removefile=True
for currentyear in range(beginyear, endyear+1):
#select all files containing years within the range specified above
text_list = [file for file in listdir(OUTPUT_DIR) if str(currentyear) in file]
# files have a suffix that should be increasing by 1. If there is a gap, remove the previous file from the list
# as that file will contain multiple arrests (was not splitted properly)
previousOK = -1
for i in range(1,maxArrestsPerYear):
filenamesuffix = "-" + str(i) + ".txt"
if any(filenamesuffix in filename for filename in text_list):
previousOK = i
else:
if previousOK > 0:
# arrest not found? it's still part of the previous one: remove the previous file from consideration
toremovesuffix = "-" + str(previousOK) + ".txt"
previousfile = [s for s in text_list if toremovesuffix in s] # note this is an array
if previousfile and previousfile[0] in text_list: # only remove if not already removed
text_list.remove(previousfile[0])
if removefile:
remove(join(OUTPUT_DIR, previousfile[0]))
print("Removed erroneously splitted arrest ", previousfile[0])
else:
print("Not considering possibly erroneously splitted arrest ", previousfile[0])
# now text_list only has clean arrests.
```
#### File: cassatie-be/nertraining/printfortrainingdata.py
```python
def printmatch(mymatch, entityname):
# create match tuple
matchdef = ( str(mymatch.start()) , str(mymatch.end()), entityname )
# put it into an entity json array
myent = {'entities': [ matchdef ]}
# combine with source string into a tuple
# do not strip() the string here, as that will screw up matched start and end positions
mytuple = (mymatch.string, myent)
# print to command line
print(mytuple, ',', sep='')
# todo UTF encoding seems OK in Python3 (not Python2), but u"" signifier is not printed.
# for the moment, considering that harmless
```
|
{
"source": "jganulin/learning-dsa",
"score": 2
}
|
#### File: src/python/hello_world.py
```python
def hello_world():
print("Hello World!")
# Prints Hello World to the console.
hello_world()
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.