metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jesse-zwd/ins-backend",
"score": 3
}
|
#### File: apps/posts/models.py
```python
from datetime import datetime
from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
# Create your models here.
class Post(models.Model):
user = models.ForeignKey(User, related_name='posts', on_delete=models.CASCADE)
caption = models.CharField(max_length=200, default='', verbose_name='caption')
tags = models.CharField(max_length=100, null=True, blank=True, verbose_name='tags')
createdAt = models.DateTimeField(default=datetime.now, verbose_name='createdAt')
def __str__(self):
return self.caption
class Meta:
verbose_name = 'post'
verbose_name_plural = verbose_name
class PostFile(models.Model):
url = models.CharField(max_length=200, default='', verbose_name='url')
post = models.ForeignKey(Post, related_name='files', verbose_name='files', on_delete=models.CASCADE)
user = models.ForeignKey(User, verbose_name='user', on_delete=models.CASCADE)
createdAt = models.DateTimeField(default=datetime.now, verbose_name='createdAt')
def __str__(self):
return self.url
class Meta:
verbose_name = 'files'
verbose_name_plural = verbose_name
class Comment(models.Model):
text = models.CharField(max_length=140, default='', verbose_name='comment')
post = models.ForeignKey(Post, related_name='comments', verbose_name='post', on_delete=models.CASCADE)
user = models.ForeignKey(User, verbose_name='user', on_delete=models.CASCADE)
createdAt = models.DateTimeField(default=datetime.now, verbose_name='createdAt')
def __str__(self):
return self.text
class Meta:
verbose_name = 'comment'
verbose_name_plural = verbose_name
class Like(models.Model):
post = models.ForeignKey(Post, verbose_name='post', on_delete=models.CASCADE)
user = models.ForeignKey(User, verbose_name='user', on_delete=models.CASCADE)
createdAt = models.DateTimeField(default=datetime.now, verbose_name='createdAt')
def __str__(self):
return self.post.caption
class Meta:
verbose_name = 'like'
verbose_name_plural = verbose_name
class Follow(models.Model):
following = models.ForeignKey(User, related_name='following', verbose_name='following', on_delete=models.CASCADE)
follower = models.ForeignKey(User, verbose_name='follower', on_delete=models.CASCADE)
createdAt = models.DateTimeField(default=datetime.now, verbose_name='createdAt')
def __str__(self):
return self.following.username
class Meta:
verbose_name = 'follow'
verbose_name_plural = verbose_name
class Save(models.Model):
post = models.ForeignKey(Post, verbose_name='post', on_delete=models.CASCADE)
user = models.ForeignKey(User, verbose_name='user', on_delete=models.CASCADE)
createdAt = models.DateTimeField(default=datetime.now, verbose_name='createdAt')
def __str__(self):
return self.post.caption
class Meta:
verbose_name = 'save'
verbose_name_plural = verbose_name
```
#### File: apps/users/models.py
```python
from datetime import datetime
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class UserProfile(AbstractUser):
nickname = models.CharField(max_length=50, default='', verbose_name='nickname')
email = models.EmailField(max_length=50, default='', verbose_name='email')
avatar = models.CharField(max_length=100, default='', verbose_name='avatar')
bio = models.CharField(max_length=200, null=True, blank=True, verbose_name='bio')
website = models.CharField(max_length=50, null=True, blank=True, verbose_name='website')
def __str__(self):
return self.username
class Meta:
verbose_name = 'user'
verbose_name_plural = verbose_name
```
|
{
"source": "JessHV/LabADA_GrupoC",
"score": 4
}
|
#### File: LabADA_GrupoC/aula4_19oct/Find_Needle.py
```python
def find_needle(needle, haystack):
#Esta función recorrerá cada caracter del haystack para compararlo con el needle, 1er caracter
#Si hay equivalencia, se entra al 2do for para comparar con los caracteres de needle
if(len(needle) <= len(haystack)):
#Verifica que el needle sea del mismo tamaño o menor que el haystack
for i in range(len(haystack)):
if haystack[i] == needle[0]:
for j in range(len(needle)):
if haystack[j+i] == needle[j]:
#print("Caracter igual\n")
if(j == len(needle)-1):
return True
else:
#print("Caracter diferente\n")
break
continue
return False
return False
'''
Caso prueba
'''
print(find_needle("fgh", "abcdefgh"))
```
|
{
"source": "jessi678/aye-saac",
"score": 2
}
|
#### File: services/colour_detection/main.py
```python
import operator
from pprint import pprint
import numpy as np
import pandas as pd
from skimage.color import rgb2lab
from skimage.measure import regionprops
from skimage.segmentation import slic
from ayesaac.services.common import QueueManager
from ayesaac.services.common.crypter import decode
from ayesaac.utils.config import Config
from ayesaac.utils.logger import get_logger
config = Config()
logger = get_logger(__file__)
class ColourDetection(object):
"""
The class ColourDetection purpose is to detect every main colour from objects in
the given pictures.
"""
def __init__(self):
self.queue_manager = QueueManager([self.__class__.__name__, "Interpreter"])
data_file = config.directory.data.joinpath("colour", "lab.txt")
colour_list = pd.read_csv(
data_file, skiprows=28, header=None, names=["l", "a", "b", "name"]
)
colour_list = colour_list.values.tolist()[1:]
self.colour_list_names = [x[3] for x in colour_list]
self.colour_list_values = np.asarray(
[np.asarray(x[:3], dtype=np.float32) for x in colour_list]
)
logger.info(f"{self.__class__.__name__} ready")
@staticmethod
def convert_rgb_to_lab(image: np.ndarray) -> np.ndarray:
return rgb2lab(image)
@staticmethod
def flatten_image(image: np.ndarray) -> np.ndarray:
dimensions = np.shape(image)
return np.reshape(image, (dimensions[0] * dimensions[1], dimensions[2]))
@staticmethod
def remove_non_unique_pixels(image: np.ndarray) -> np.ndarray:
return np.unique(image, axis=0)
@staticmethod
def create_labelled_image(lab_image) -> np.ndarray:
return slic(
lab_image,
n_segments=200,
compactness=10,
sigma=0.1,
convert2lab=False,
enforce_connectivity=True,
)
@staticmethod
def create_regions(lab_image, labelled_image):
region_segments = regionprops(labelled_image)
image_dimensions = np.shape(labelled_image)
for region in region_segments:
region.is_boundary = ColourDetection.is_region_on_boundary(
region, image_dimensions
)
region.average_colour = ColourDetection.get_region_average_colour(
region.label, labelled_image, lab_image
)
return region_segments
@staticmethod
def is_region_on_boundary(region, image_dimensions):
if (
region.bbox[0] == 0
or region.bbox[1] == 0
or region.bbox[2] == image_dimensions[0]
or region.bbox == image_dimensions[1]
):
return True
return False
@staticmethod
def get_pixels_from_label_id(label_id, labelled_image, image):
label_mask = np.invert(np.isin(labelled_image, label_id))
label_mask = np.dstack((label_mask, label_mask, label_mask))
image_mask = np.ma.array(image, mask=label_mask)
return image_mask
@staticmethod
def get_region_average_colour(label_id, labelled_image, image):
masked_image = ColourDetection.get_pixels_from_label_id(
label_id, labelled_image, image
)
flattened_masked_image = ColourDetection.flatten_image(masked_image)
average_colour = np.zeros(3, dtype=np.float32)
for channel in range(np.shape(image)[2]):
average_colour[channel] = np.mean(flattened_masked_image[:, channel])
return average_colour
@staticmethod
def get_all_region_colours(region_list):
return [region.average_colour for region in region_list]
def detect_colours(self, crop_image):
lab_image = self.convert_rgb_to_lab(crop_image)
labelled_image = self.create_labelled_image(lab_image)
region_list = self.create_regions(lab_image, labelled_image)
colours = self.get_all_region_colours(region_list)
colours_found = {}
for colour in colours:
d = ((self.colour_list_values - colour) ** 2).sum(axis=1)
if not self.colour_list_names[d.argmin()] in colours_found:
colours_found[self.colour_list_names[d.argmin()]] = 0
colours_found[self.colour_list_names[d.argmin()]] += 1
sorted_colours = max(colours_found.items(), key=operator.itemgetter(1))
pprint(colours_found)
return sorted_colours[0]
def callback(self, body, **_):
body["path_done"].append(self.__class__.__name__)
for picture in body["pictures"]:
image = decode(picture["data"], picture["shape"], np.uint8)
for i, obj in enumerate(body["objects"]):
crop_img = image[
int(picture["shape"][0] * obj["bbox"][0]) : int(
picture["shape"][0] * obj["bbox"][2]
),
int(picture["shape"][1] * obj["bbox"][1]) : int(
picture["shape"][1] * obj["bbox"][3]
),
]
colour_name = self.detect_colours(crop_img)
body["objects"][i]["colour"] = colour_name
del body["pictures"]
pprint(body)
next_service = body["vision_path"].pop(0)
self.queue_manager.publish(next_service, body)
def run(self):
self.queue_manager.start_consuming(self.__class__.__name__, self.callback)
def main():
colour_detection = ColourDetection()
colour_detection.run()
if __name__ == "__main__":
main()
```
#### File: services/common/service_base.py
```python
from typing import Any, List, TypeVar
from ayesaac.utils.logger import get_logger
from .queue_manager import QueueManager
T = TypeVar("T")
logger = get_logger(__file__)
class ServiceBase(object):
def __init__(self, queue_names: List[str]) -> None:
if self.__class__.__name__ not in queue_names:
raise AssertionError(f"{self.__class__.__name__} is not in the queue list")
self.queue_manager = QueueManager(queue_names)
def __post_init__(self) -> None:
logger.info(f"{self.__class__.__name__} ready")
def callback(self, body: Any, **_) -> None:
raise NotImplementedError
def run(self) -> None:
self.queue_manager.start_consuming(self.__class__.__name__, self.callback)
```
#### File: services/object_detection/main.py
```python
from pprint import pprint
import numpy as np
import tensorflow as tf
from ayesaac.services.common import QueueManager
from ayesaac.services.common.crypter import decode
from ayesaac.utils.config import Config
from ayesaac.utils.logger import get_logger
from .coco_category_index import coco_category_index
logger = get_logger(__file__)
config = Config()
class ObjectDetection(object):
"""
The class ObjectDetection purpose is to detect every object in the given pictures.
"""
def __init__(self):
self.queue_manager = QueueManager(
[
self.__class__.__name__,
"Interpreter",
"ColourDetection",
"PositionDetection",
]
)
self.category_index = coco_category_index
self.model_path = config.directory.data.joinpath("resnet")
model = tf.saved_model.load(str(self.model_path))
self.model = model.signatures["serving_default"]
logger.info(f"{self.__class__.__name__} ready")
def run_inference_for_single_image(self, image):
input_tensor = tf.convert_to_tensor(image)
input_tensor = input_tensor[tf.newaxis, ...]
output_dict = self.model(input_tensor)
num_detections = int(output_dict.pop("num_detections"))
output_dict = {
key: value[0, :num_detections].numpy() for key, value in output_dict.items()
}
output_dict["num_detections"] = num_detections
output_dict["detection_classes"] = output_dict["detection_classes"].astype(
np.int32
)
return output_dict
def callback(self, body, **_):
objects = []
for picture in body["pictures"]:
image = decode(picture["data"], picture["shape"], np.uint8)
output = self.run_inference_for_single_image(image)
for i in range(output["num_detections"]):
if float(output["detection_scores"][i]) >= 0.5:
objects.append(
{
"name": self.category_index[output["detection_classes"][i]][
"name"
],
"confidence": float(output["detection_scores"][i]),
"bbox": output["detection_boxes"][i].tolist(),
"from": picture["from"],
}
)
pprint(objects)
body["objects"] = objects
body["path_done"].append(self.__class__.__name__)
if "ColourDetection" not in body["vision_path"]:
del body["pictures"]
next_service = body["vision_path"].pop(0)
self.queue_manager.publish(next_service, body)
def run(self):
self.queue_manager.start_consuming(self.__class__.__name__, self.callback)
def main():
obj_detection = ObjectDetection()
obj_detection.run()
if __name__ == "__main__":
main()
```
#### File: services/web/app.py
```python
import os
import time
from flask import Flask, url_for
from flask_cors import CORS
from ayesaac.services.common import QueueManager
from ayesaac.utils.config import Config
from .user_request import UserRequest
config = Config()
# Create Flask app
app = Flask(__name__)
app.config["CORS_HEADERS"] = "Location"
CORS(
app,
origins=config.endpoint_service.cors_origins,
expose_headers=["Location"],
)
@app.route("/", methods=["GET"])
def hello_world():
return "Hello world"
@app.route("/submit", methods=["POST"])
def submit():
service_if_audio = "AutomaticSpeechRecognition"
service_if_text = "NaturalLanguageUnderstanding"
# Parse user request
user_request = UserRequest(
service_if_audio=service_if_audio, service_if_text=service_if_text
)
# Create queue for Ayesaac and send it
ayesaac_queue_manager = QueueManager([user_request.first_service])
ayesaac_queue_manager.publish(user_request.first_service, user_request.body)
status_url = url_for("submit_status", task_id=user_request.uid)
return (
status_url,
202,
{"Location": status_url},
)
@app.route("/status/<task_id>")
def submit_status(task_id):
file_path = f"output/{task_id}.txt"
attempt_counter = 0
attempt_limit = 15
while not os.path.exists(file_path):
time.sleep(2)
attempt_counter += 1
if attempt_counter > attempt_limit:
break
file_exists = os.path.isfile(file_path)
# Return if its not there
if file_exists is not True:
return "not found", 404
# Get data from file
with open(file_path, "r") as f:
data = f.read()
# Delete file
# os.remove(file_path)
# Return response
return data, 200
```
#### File: ayesaac/utils/config.py
```python
import os
from pathlib import Path
from typing import List
from dotenv import find_dotenv, load_dotenv
load_dotenv(find_dotenv())
class RabbitMQCreds(object):
@property
def host(self) -> str:
return os.getenv("RABBITMQ_HOST")
@property
def username(self) -> str:
return os.getenv("RABBITMQ_USERNAME")
@property
def password(self) -> str:
return os.getenv("RABBITMQ_PASSWORD")
class IBMWatsonCreds(object):
@property
def api_key(self) -> str:
return os.getenv("IBM_API_KEY")
@property
def endpoint(self) -> str:
return os.getenv("IBM_WATSON_ENDPOINT")
class EndpointService(object):
def __init__(self, delimiter: str) -> None:
self._delimiter = delimiter
@property
def cors_origins(self) -> List[str]:
url_as_string = os.getenv("ENDPOINT_DOMAINS")
return url_as_string.split(self._delimiter)
class Directories(object):
@property
def root(self) -> Path:
return Path().absolute()
@property
def data(self) -> Path:
return self.root.joinpath("data")
@property
def output(self) -> Path:
return self.root.joinpath("output")
class Config(object):
__slots__ = ("rabbitmq", "directory", "ibmwatson", "endpoint_service")
def __init__(self) -> None:
self.rabbitmq = RabbitMQCreds()
self.ibmwatson = IBMWatsonCreds()
self.directory = Directories()
self.endpoint_service = EndpointService(" ")
def getenv(self, env_key: str) -> str:
return os.getenv(env_key)
```
|
{
"source": "Jessica001cheng/CarND-Advanced-Lane-Lines",
"score": 3
}
|
#### File: Jessica001cheng/CarND-Advanced-Lane-Lines/advanced_lane.py
```python
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import pickle
from helper import showImages, showSidebySide
from camera_calibrate import undistortImages
from threshold_binary import combineGradients, combineGradientsOnS, combineGradientsAndColor,combineYellowWhiteOnLabLuv, combineGradientsAndLUV
from moviepy.editor import VideoFileClip
import pdb
class Lane():
def __init__(self):
self.fit = None
self.fit_m = None
self.curvature = None
self.prevfit = []
self.prevfit_m = []
self.discardFrameNo = 0
def add_fit(self, fit):
self.prevfit.append(fit)
if len(self.prevfit) > 10:
self.prevfit = self.prevfit[len(self.prevfit)-10:]
self.fit = np.mean(self.prevfit, axis=0)
else:
self.fit = fit
return self.fit
def add_fit_m(self, fit_m):
self.prevfit_m.append(fit_m)
if len(self.prevfit_m) > 10:
self.prevfit_m = self.prevfit_m[len(self.prevfit_m)-10:]
self.fit_m = np.mean(self.prevfit_m, axis=0)
else:
self.fit_m = fit_m
return self.fit_m
# Init LeftLane and rightLane for pipeline use
leftLane = Lane()
rightLane = Lane()
# Define conversions in x and y from pixels space to meters
## as perspective transform, the offset = 200
offset = 200
image_width = 1280
image_height = 720
## the line width is 3.7m
## 3.7m = xm_per_pix * (1280 - 2*200)
between_left_right_Line = 3.7
xm_per_pix = between_left_right_Line/(image_width - 2*offset) # meters per pixel in x dimension
## Each dashed line measures 10 feet, and the empty spaces in-between measure 30 feet.
between_dashline_m = 9.14 ## 30 feet
between_dashline_start_y = 100
between_dashline_end_y = 670
ym_per_pix = between_dashline_m/(between_dashline_end_y - between_dashline_start_y) # meters per pixel in y dimension
# Define M as 0 at begin
M = 0
# Define if need convert BGR to RGB for cv image reading
needBGR2RGB = True
# Define show image row and columns
imageRow = 4
imageCol = 3
# Define the number of Frames that discarded
discardFrameNo = 0
def adjustPerspective(image, M=M):
"""
Adjust the `image` using the transformation matrix `M`.
"""
img_size = (image.shape[1], image.shape[0])
warped = cv2.warpPerspective(image, M, img_size)
return warped
def getDiscardFrameNo():
return discardFrameNo
def getDiscardFrameNo():
return discardFrameNo
def findLines(binary_warped, nwindows=9, margin=110, minpix=50):
"""
Find the polynomial representation of the lines in the `image` using:
- `nwindows` as the number of windows.
- `margin` as the windows margin.
- `minpix` as minimum number of pixes found to recenter the window.
- `ym_per_pix` meters per pixel on Y.
- `xm_per_pix` meters per pixels on X.
Returns (left_fit, right_fit, left_lane_inds, right_lane_inds, out_img, nonzerox, nonzeroy)
"""
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Set height of windows
window_height = np.int(binary_warped.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
try:
## if can not find valid point, use the previous value
left_fit = np.polyfit(lefty, leftx, 2)
left_fit_m = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2)
except TypeError:
##pdb.set_trace()
leftLane.discardFrameNo = leftLane.discardFrameNo + 1
plt.imshow(binary_warped)
plt.savefig("notdectedLeft_" + str(leftLane.discardFrameNo) + ".png")
left_fit = None
left_fit_m = None
pass
try:
## if can not find valid point, use the previous value
right_fit = np.polyfit(righty, rightx, 2)
right_fit_m = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2)
except TypeError:
#pdb.set_trace()
rightLane.discardFrameNo = rightLane.discardFrameNo + 1
plt.imshow(binary_warped)
plt.savefig("notdectedRight_" + str(rightLane.discardFrameNo) + ".png")
right_fit = None
right_fit_m = None
pass
return left_fit, right_fit, left_fit_m, right_fit_m, left_lane_inds, right_lane_inds, out_img, nonzerox, nonzeroy
def visualizeLanes(image, ax):
"""
Visualize the windows and fitted lines for `image`.
Returns (`left_fit` and `right_fit`)
"""
left_fit, right_fit, left_fit_m, right_fit_m, left_lane_inds, right_lane_inds, out_img, nonzerox, nonzeroy = findLines(image)
# Visualization
ploty = np.linspace(0, image.shape[0]-1, image.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
ax.imshow(out_img)
ax.plot(left_fitx, ploty, color='yellow')
ax.plot(right_fitx, ploty, color='yellow')
return ( left_fit, right_fit, left_fit_m, right_fit_m )
def showLaneOnImages(images, imagesName, cols = 2, rows = 4, figsize=(15,13)):
"""
Display `images` on a [`cols`, `rows`] subplot grid.
Returns a collection with the image paths and the left and right polynomials.
"""
imgLength = len(images)
fig, axes = plt.subplots(rows, cols, figsize=figsize)
indexes = range(cols * rows)
imageAndFit = []
for ax, index in zip(axes.flat, indexes):
if index < imgLength:
imagePathName = imagesName[index]
image = images[index]
left_fit, right_fit, left_fit_m, right_fit_m = visualizeLanes(image, ax)
ax.set_title(imagePathName)
ax.axis('off')
imageAndFit.append( ( imagePathName, left_fit, right_fit, left_fit_m, right_fit_m ) )
saveName = "./output_images/polynomial_line.png"
fig.savefig(saveName)
return imageAndFit
def drawLine(img, left_fit, right_fit):
"""
Draw the lane lines on the image `img` using the poly `left_fit` and `right_fit`.
"""
yMax = img.shape[0]
ploty = np.linspace(0, yMax - 1, yMax)
color_warp = np.zeros_like(img).astype(np.uint8)
# Calculate points.
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0]))
return cv2.addWeighted(img, 1, newwarp, 0.3, 0)
def drawLaneOnImage(img):
"""
Find and draw the lane lines on the image `img`.
"""
left_fit, right_fit, left_fit_m, right_fit_m, _, _, _, _, _ = findLines(img)
output = drawLine(img, left_fit, right_fit)
return cv2.cvtColor( output, cv2.COLOR_BGR2RGB )
def calculateCurvature(yRange, fit_cr):
"""
Returns the curvature of the polynomial `fit` on the y range `yRange`.
"""
return ((1 + (2*fit_cr[0]*yRange*ym_per_pix + fit_cr[1])**2)**1.5) / np.absolute(2*fit_cr[0])
def calculateCenter(img, left_fit_m, right_fit_m):
# Calculate vehicle center
xMax = img.shape[1]*xm_per_pix
yMax = img.shape[0]*ym_per_pix
vehicleCenter = xMax / 2
lineLeft = left_fit_m[0]*yMax**2 + left_fit_m[1]*yMax + left_fit_m[2]
lineRight = right_fit_m[0]*yMax**2 + right_fit_m[1]*yMax + right_fit_m[2]
lineMiddle = lineLeft + (lineRight - lineLeft)/2
diffFromVehicle = lineMiddle - vehicleCenter
return diffFromVehicle
# Function to draw the line on the images
def laneCurveProcess(binary_warped, img):
left_fit, right_fit, left_fit_m, right_fit_m, _, _, _, _, _ = findLines(binary_warped)
#print("left_fit_m in left lane, right_fit_m in right lane: ", leftLane.fit_m, rightLane.fit_m)
## need to get some default value for left lane and right lane. Otherwise, None will cause issue.
leftLane.add_fit(left_fit)
leftLane.add_fit_m(left_fit_m)
rightLane.add_fit(right_fit)
rightLane.add_fit_m(right_fit_m)
output = drawLine(img, left_fit, right_fit)
if needBGR2RGB:
output = cv2.cvtColor( output, cv2.COLOR_BGR2RGB )
print("need convert")
# Calculate curvature
yRange = img.shape[0] - 1
leftCurvature = calculateCurvature(yRange, left_fit_m)
rightCurvature = calculateCurvature(yRange, right_fit_m)
print("actural left curv:, right curv: ",leftCurvature, rightCurvature)
# Calculate vehicle center
diffFromVehicle = calculateCenter(img, left_fit_m, right_fit_m)
if diffFromVehicle > 0:
message = '{:.2f} m right'.format(diffFromVehicle)
else:
message = '{:.2f} m left'.format(-diffFromVehicle)
# Draw info
font = cv2.FONT_HERSHEY_SIMPLEX
fontColor = (255, 255, 255)
fontScale = 2
cv2.putText(output, 'Left curvature: {:.0f} m'.format(leftCurvature), (50, 50), font, fontScale, fontColor, 2)
cv2.putText(output, 'Right curvature: {:.0f} m'.format(rightCurvature), (50, 120), font, fontScale, fontColor, 2)
cv2.putText(output, 'Vehicle is {} of center'.format(message), (50, 190), font, fontScale, fontColor, 2)
return output
## sanity check for polynomials
def sanityCheck(img, left_fit, right_fit, left_fit_m, right_fit_m):
discard = False
xMax = img.shape[1]*xm_per_pix
yMax = img.shape[0]*ym_per_pix
yMid = yMax/2
yMin = yMax/8
yRange = img.shape[0] - 1
try:
lineLeftMax = left_fit_m[0]*yMax**2 + left_fit_m[1]*yMax + left_fit_m[2]
lineRightMax = right_fit_m[0]*yMax**2 + right_fit_m[1]*yMax + right_fit_m[2]
lineLeftMid = left_fit_m[0]*yMid**2 + left_fit_m[1]*yMid + left_fit_m[2]
lineRightMid = right_fit_m[0]*yMid**2 + right_fit_m[1]*yMid + right_fit_m[2]
lineLeftMin = left_fit_m[0]*yMin**2 + left_fit_m[1]*yMin + left_fit_m[2]
lineRightMin = right_fit_m[0]*yMin**2 + right_fit_m[1]*yMin + right_fit_m[2]
## check width between line left and line right
delta_width = abs((lineRightMax - lineLeftMax) - between_left_right_Line)/between_left_right_Line
if delta_width > 0.05:
print("start width of lane wrong")
discard = True
else:
delta_width_mid = abs((lineRightMid - lineLeftMid) - (lineRightMax - lineLeftMax))/(lineRightMax - lineLeftMax)
if delta_width_mid > 0.1:
print("middle width of lane wrong")
discard = True
else:
delta_width_min = abs((lineRightMin - lineLeftMin) - (lineRightMid - lineLeftMid))/(lineRightMid - lineLeftMid)
if delta_width_min > 0.1:
print(" end width of lane wrong")
discard = True
else:
leftCurvature = calculateCurvature(yRange, left_fit_m)
rightCurvature = calculateCurvature(yRange, right_fit_m)
if leftCurvature > 10000:
print("discard fail frame as left curve")
discard = True
if rightCurvature > 10000:
print("discard fail frame as right curve")
discard = True
except TypeError:
print("typeError")
discard = True
if discard == True:
# use previous frame data
left_fit = leftLane.fit
left_fit_m = leftLane.fit_m
right_fit = rightLane.fit
right_fit_m = rightLane.fit_m
else:
# valid data and add in leftLane/rightLane prev_fit
left_fit = leftLane.add_fit(left_fit)
left_fit_m = leftLane.add_fit_m(left_fit_m)
right_fit = rightLane.add_fit(right_fit)
right_fit_m = rightLane.add_fit_m(right_fit_m)
return left_fit, right_fit, left_fit_m, right_fit_m
## pipe line to deal with movie
def pipeline(img):
undistort = undistortImages(img,mtx, dist)
#hls = cv2.cvtColor(undistort, cv2.COLOR_BGR2HLS)
combine = combineGradientsAndLUV(img,threshSobel = (70,160))
binary_warped = adjustPerspective(combine, M)
left_fit, right_fit, left_fit_m, right_fit_m, _, _, _, _, _ = findLines(binary_warped)
#pdb.set_trace()
left_fit, right_fit, left_fit_m, right_fit_m = sanityCheck(img, left_fit, right_fit, left_fit_m, right_fit_m)
output = drawLine(img, left_fit, right_fit)
if needBGR2RGB:
output = cv2.cvtColor( output, cv2.COLOR_BGR2RGB )
print("need convert")
# Calculate curvature
yRange = img.shape[0] - 1
leftCurvature = calculateCurvature(yRange, left_fit_m)
rightCurvature = calculateCurvature(yRange, right_fit_m)
#print("actural left curv:, right curv: ",leftCurvature, rightCurvature)
## first frame set
if leftLane.curvature is None:
print("first frame")
leftLane.curvature = leftCurvature
if rightLane.curvature is None:
print("first frame")
rightLane.curvature = rightCurvature
## Compare with old_left_fit & old_right_fit to decide if update or not
## Also check deviation from last frame. If deviation > 5%, discard this frame.
#leftDeviate = abs((left_fit[0] - leftLane.fit[0])/leftLane.fit])
#rightDeviate = abs((right_fit[0] - rightLane.fit[0])/rightLane.fit[0])
if leftCurvature > 10000:
print("discard fail frame as left curve")
left_fit = leftLane.fit
left_fit_m = leftLane.fit_m
leftCurvature = leftLane.curvature
else:
leftLane.fit = left_fit
leftLane.fit_m = left_fit_m
leftLane.curvature = leftCurvature
if rightCurvature > 10000:
print("discard fail frame as right curve")
right_fit = rightLane.fit
right_fit_m = rightLane.fit_m
rightCurvature = rightLane.curvature
else:
rightLane.fit = right_fit
rightLane.fit_m = right_fit_m
rightLane.curvature = rightCurvature
# Calculate vehicle center
diffFromVehicle= calculateCenter(img, left_fit_m, right_fit_m)
if diffFromVehicle > 0:
message = '{:.2f} m right'.format(diffFromVehicle)
else:
message = '{:.2f} m left'.format(-diffFromVehicle)
# Draw info
font = cv2.FONT_HERSHEY_SIMPLEX
fontColor = (255, 255, 255)
fontScale = 2
cv2.putText(output, 'Left curvature: {:.0f} m'.format(leftCurvature), (50, 50), font, fontScale, fontColor, 2)
cv2.putText(output, 'Right curvature: {:.0f} m'.format(rightCurvature), (50, 120), font, fontScale, fontColor, 2)
cv2.putText(output, 'Vehicle is {} of center'.format(message), (50, 190), font, fontScale, fontColor, 2)
return output
def videoClip(inputVideo, outputVideo, start, end):
clip = VideoFileClip(inputVideo)
clip2 = clip.subclip("0:0:" + str(start), "0:0:" + str(end))
clip2.write_videofile(outputVideo, audio=False)
def videoPipeline(inputVideo, outputVideo):
"""
Process the `inputVideo` frame by frame to find the lane lines, draw curvarute and vehicle position information and
generate `outputVideo`
"""
myclip = VideoFileClip(inputVideo)
## use pipeline to deal with each frame
clip = myclip.fl_image(pipeline)
clip.write_videofile(outputVideo, audio=False)
## test code
# Loading camera calibration
cameraCalibration = pickle.load( open('./pickled_data/camera_calibration.p', 'rb' ) )
mtx, dist = map(cameraCalibration.get, ('mtx', 'dist'))
# Load test images.
testImages = list(map(lambda imageFileName: cv2.imread(imageFileName),
glob.glob('./test_images/*.jpg')))
testImagesName = glob.glob('./test_images/*.jpg')
print("test image shape: ", testImages[1].shape)
image_width = testImages[1].shape[1]
image_height = testImages[1].shape[1]
undistImages = list(map(lambda img: undistortImages(img,mtx, dist),testImages))
## Finally use combination of color detection on HSV and Sobel on S channel of HLS
combineImages = list(map(lambda img: combineGradientsAndLUV(img,threshSobel = (70,160)), undistImages))
## show warped images in the test images
#showImages(combineImages, testImagesName,figTitle ='Filter on test images', cols=imageRow,rows=imageCol,cmap='gray',figName = "FilteredTestImages")
#plt.show()
## Apply perspective transform
transMatrix = pickle.load( open('./pickled_data/perspective_transform.p', 'rb' ) )
M, Minv = map(transMatrix.get, ('M', 'Minv'))
warpedImages = list(map(lambda img: adjustPerspective(img, M), combineImages))
## show warped images in the test images
#showImages(warpedImages, testImagesName,figTitle ='Perspective transform on the test images', cols=imageRow,rows=imageCol,cmap='gray',figName = "PerspectiveTestImages")
#plt.show()
## show lane-line pixels and fit their positions with a polynomial
#polyImages = showLaneOnImages(warpedImages, testImagesName, cols=imageRow,rows=imageCol)
#plt.show()
discardFrameNo = 0
processedImages = []
for i in range(len(testImagesName)):
print("image name: ", testImagesName[i])
processed = laneCurveProcess(warpedImages[i], testImages[i])
processedImages.append(processed)
## show lane in the test images
#showImages(processedImages, testImagesName,figTitle ='Detected Lines on the test images', cols=imageRow,rows=imageCol,cmap='gray', figName = "LaneOnTestImages")
#plt.show()
## Use pipeline to deal with the project_video file
## no need convert BGR to RGB
needBGR2RGB = not needBGR2RGB
print("needBGR2RGB: ", needBGR2RGB)
#videoClip('project_video.mp4', 'test1.mp4', 37,43)
#videoClip('project_video.mp4', 'test2.mp4', 21,25)
#videoPipeline('test1.mp4', 'video_output/test1_out.mp4')
#videoPipeline('test2.mp4', 'video_output/test2_out.mp4')
videoPipeline('project_video.mp4', 'video_output/project_video_out.mp4')
```
|
{
"source": "Jessica001cheng/CarND-Vehicle-Detection",
"score": 3
}
|
#### File: Jessica001cheng/CarND-Vehicle-Detection/helper.py
```python
import numpy as np
import cv2
import matplotlib.pyplot as plt
def showImages(images, imagesName, figTitle= None, cols = 4, rows = 5, figsize=(15,10), cmap = None, figName = None):
"""
Display `images` on a [`cols`, `rows`] subplot grid.
"""
if len(images) != len(imagesName):
raise ValueError('Lenth of images and imagesName are not same')
imgLength = len(images)
fig, axes = plt.subplots(rows, cols, figsize=figsize)
plt.suptitle(figTitle)
indexes = range(cols * rows)
for ax, index in zip(axes.flat, indexes):
if index < imgLength:
imagePathName = imagesName[index],
image = images[index]
ax.imshow(image,cmap=cmap)
ax.set_title(imagePathName)
ax.axis('off')
if figName != None:
print("save fig name: ", figName)
saveName = "./output_images/" + figName + ".png"
fig.savefig(saveName)
plt.show()
def showSidebySide(original, new, firstTitle = "original", newTitle = "new"):
fig, axes = plt.subplots(ncols=2, figsize=(20, 10))
axes[0].imshow(original)
axes[0].set_title(firstTitle)
axes[1].imshow(new)
axes[1].set_title(newTitle)
## save figure
saveName = "./output_images/" + newTitle + ".png"
fig.savefig(saveName)
plt.show()
```
|
{
"source": "jessica1338/LinTIMaT",
"score": 3
}
|
#### File: LinTIMaT/visualization/visualization_utility (2).py
```python
import pandas as pd
from newick import loads,read
from collections import defaultdict
def load_color_annotation_dictionary(color_annotation_file):
# color_annotation_file = "ClusterColors.txt"
label_type_dictionary={}
label_color_dictionary={}
color_df = pd.read_csv(color_annotation_file,sep="\t")
labels=color_df[color_df.columns[0]]
label_types=color_df[color_df.columns[1]]
label_type_dictionary.update(dict(zip(labels, label_types)))
label_colors=color_df[color_df.columns[2]]
label_color_dictionary.update(dict(zip(labels, label_colors)))
return label_type_dictionary, label_color_dictionary
def load_label_mutation_annotation_dictionary(data_files):
cell_label_dictionary={}
cell_HMID_dictionary={}
for data_file in data_files:
df=pd.read_csv(data_file,sep="\t")
# dfs+=[df]
cell_names=df[df.columns[0]]
cell_labels=df[df.columns[1]]
cell_label_dictionary.update(dict(zip(cell_names, cell_labels)))
cell_HMIDs=df[df.columns[2]]
cell_HMID_dictionary.update(dict(zip(cell_names, cell_HMIDs)))
# print len(cell_label_dictionary.items())
# print len(cell_HMID_dictionary.items())
return cell_label_dictionary, cell_HMID_dictionary
def load_mutation_event_loaction_dictionary(cell_HMID_dictionary):
small_event_location_map=defaultdict(set)
for key,value in cell_HMID_dictionary.items():
splits=value.split("-")
for i,sp in enumerate(splits):
small_event_location_map[sp].add(i)
return small_event_location_map
def load_individual_tree_visualization_file(non_binary_tree_file):
lines=open(non_binary_tree_file,'r').readlines()
tree_newick=lines[1].replace("\n","").replace("\r","")
tree_root = loads(tree_newick)[0]
# print tree_newick
mutation_node=[]
cluster_node=[]
mutation_node_event_dict=defaultdict(list)
ln=0
for i in range(3,len(lines)):
if lines[i].startswith("Cluster Nodes"):
ln=i
break
line = lines[i].replace("\n","").replace("\r","")
print line
splits=line.split(" ")
mutation_node_name = splits[0]
mutation_node+=[mutation_node_name]
for sp in splits[1:]:
if sp.startswith("["):
sp=sp[1:]
if sp.endswith("]") or sp.endswith(",") :
sp=sp[:-1]
mutation_node_event_dict[mutation_node_name]+=[sp]
for i in range(ln+1,len(lines)):
cluster_node+=[lines[i].replace("\n","").replace("\r","")]
return tree_root, mutation_node,cluster_node,mutation_node_event_dict
def write_individual_tree_non_binary(node,mutation_node,cluster_node,mutation_node_event_dict,cell_label_dictionary,label_color_dictionary,label_type_dictionary,cell_HMID_dictionary,small_event_location_map, level=0, parent=None,mutation_set=set(),group=[]):
node_dict={}
node_dict["name"]=node.name
node_dict["parent"]="null"
node_dict["event"]="NONE_NONE_NONE_NONE_NONE_NONE_NONE_NONE_NONE_NONE"
# node_dict["commonEvent"]="*_*_*_*_*_*_*_*_*_*"
node_dict["rootDist"]=level+1
node_dict["organProportions"]={"ALL": 1.0}
node_dict["nodecolor"]="black"
if node.name in mutation_node:
node_dict["nodecolor"]="blue"
mutation_set|=set(mutation_node_event_dict[node.name])
node_dict["mutation"]="_".join(mutation_node_event_dict[node.name])
else:
node_dict["mutation"]="NONE"
if node.name in cluster_node:
node_dict["nodecolor"]="red"
if parent != None:
node_dict["parent"] = parent.name
# print mutation_set
events=["*"]*10
for event in mutation_set:
event_locs=small_event_location_map[event]
for event_loc in event_locs:
events[event_loc]=event
node_dict["commonEvent"] = "_".join(events)
# print node_dict["commonEvent"]
children=[]
if len(node.descendants) == 0:
#leaf
cl=cell_label_dictionary[node.name]
lc = label_color_dictionary[cl]
node_dict["nodecolor"]="white"
node_dict["cladeColor"]="#"+lc
node_dict["cellType"]=label_type_dictionary[cl]
node_dict["event"] = "_".join(cell_HMID_dictionary[node.name].split("-"))
node_dict["organProportions"]={label_type_dictionary[cl]: 1.0}
node_dict["num_cell_under_this_node"]=1
else:
node_dict["organProportions"]=defaultdict(lambda:0)
cell_count=0
for child in node.descendants:
if child.name =="normal":
continue
if child.name not in group and len(group)>0 and level==1:
print child.name
print group
continue
children += [write_individual_tree_non_binary(child,mutation_node,cluster_node,mutation_node_event_dict,cell_label_dictionary,label_color_dictionary,label_type_dictionary,cell_HMID_dictionary,small_event_location_map, level = level+1, parent = node, mutation_set=mutation_set,group=group)]
# print children[-1]
ops=children[-1]["organProportions"]
cc=children[-1]["num_cell_under_this_node"]
for key,val in ops.items():
node_dict["organProportions"][key]+=val*cc
cell_count+=cc
node_dict["num_cell_under_this_node"]=cell_count
total=sum(node_dict["organProportions"].values())
for key in node_dict["organProportions"].keys():
node_dict["organProportions"][key]/=float(total)
# print node_dict["organProportions"]
node_dict["children"]=children
mutation_set-=set(mutation_node_event_dict[node.name])
return node_dict
def update_node_leaf_dictionary(root,node_leaf_dictionary,level=0):
#print level
node_leaf_dictionary[root.name]=root.get_leaf_names()
#print root, root.descendants
for node in root.descendants:
update_node_leaf_dictionary(node,node_leaf_dictionary,level+1)
def get_matched_cluster_node_leaf_dictionary(matching_file_name):
matched_tree_file_lines=open(matching_file_name,"r").readlines()
ln=0
original_tree_newicks=[]
matched_tree_newicks=[]
matched_cluster={}
while(ln<len(matched_tree_file_lines)):
line=matched_tree_file_lines[ln]
if line.startswith("Original tree newick"):
original_tree_newicks+=[matched_tree_file_lines[ln+1][:-1]]
matched_tree_newicks+=[matched_tree_file_lines[ln+3][:-1]]
ln+=4
else:
ln+=2
break
while(ln<len(matched_tree_file_lines)):
line=matched_tree_file_lines[ln]
line2=matched_tree_file_lines[ln+1]
key=line[:-1].replace("\n","").replace("\r","")
values=line2.replace("\n","").replace("\r","").split(" ")[:-1]
matched_cluster[key]=values
# print key, values
ln+=2
node_leaf_dictionary_list=[]
for newick_string in original_tree_newicks:
tree = loads(newick_string)
node_leaf_dictionary={}
update_node_leaf_dictionary(tree[0],node_leaf_dictionary,0)
# print node_leaf_dictionary
node_leaf_dictionary_list+=[node_leaf_dictionary]
return matched_cluster,node_leaf_dictionary_list,matched_tree_newicks
def write_consensus_tree_json_dict(node,matched_cluster, node_leaf_dictionary_list,label_type_dictionary,label_color_dictionary,cell_HMID_dictionary,cell_label_dictionary, level=0, parent=None, fish_index = -1, calc_distance_by_cell_pairs=False,group=[]):
node_dict={}
node_dict["name"]=node.name
node_dict["parent"]="null"
node_dict["event"]="NONE_NONE_NONE_NONE_NONE_NONE_NONE_NONE_NONE_NONE"
node_dict["commonEvent"]="*_*_*_*_*_*_*_*_*_*"
node_dict["rootDist"]=level+1
node_dict["organProportions"]=defaultdict(lambda:0)
node_dict["num_cell_under_this_node"]=0
if parent != None:
node_dict["parent"] = parent.name
children=[]
if len(node.descendants) == 0:
node_dict["nodecolor"]="red"
for i,cluster in enumerate(matched_cluster[node.name]):
fish_cell_type_count_dict=defaultdict(lambda:0)
if fish_index!= -1 and i != fish_index:
continue
fish_node_dict={}
fish_node_dict["name"]="f"+str(i)
fish_node_dict["parent"]=node.name
fish_node_dict["commonEvent"]="*_*_*_*_*_*_*_*_*_*"
fish_node_dict["event"]="NONE_NONE_NONE_NONE_NONE_NONE_NONE_NONE_NONE_NONE"
fish_node_dict["rootDist"]=level+2
fish_node_dict["organProportions"]=defaultdict(lambda:0)
fish_node_dict["nodecolor"]="blue"
fish_node_dict["num_cell_under_this_node"]=0
fish_children=[]
cluster_mean=[]
for cell in node_leaf_dictionary_list[i][cluster]:
cell_dict={}
cl=cell_label_dictionary[cell]
lc = label_color_dictionary[cl]
# exp = cell_exp_dictionary[cell]
# cluster_mean+=[exp]
cell_dict["name"]=cell
if fish_index == -1:
cell_dict["parent"]="f"+str(i)
cell_dict["rootDist"]=level+3
else:
cell_dict["parent"]=node.name
cell_dict["rootDist"]=level+2
cell_dict["commonEvent"]="*_*_*_*_*_*_*_*_*_*"
# cell_dict["organProportions"]={"ALL": 1.0}
cell_dict["nodecolor"]="white"
cell_dict["cladeColor"]="#"+lc
cell_dict["cellType"]=label_type_dictionary[cl]
cell_dict["organProportions"]={label_type_dictionary[cl]: 1.0}
cell_dict["num_cell_under_this_node"]=1
fish_node_dict["num_cell_under_this_node"]+=1
node_dict["num_cell_under_this_node"]+=1
cell_dict["event"] = "_".join(cell_HMID_dictionary[cell].split("-"))
fish_node_dict["organProportions"][label_type_dictionary[cl]]+=1
node_dict["organProportions"][label_type_dictionary[cl]]+=1
# print cell_dict["event"]
fish_children+=[cell_dict]
factor=1.0/sum(fish_node_dict["organProportions"].itervalues())
for k in fish_node_dict["organProportions"]:
fish_node_dict["organProportions"][k] = fish_node_dict["organProportions"][k]*factor
if fish_index ==-1:
fish_node_dict["children"]=fish_children
children+=[fish_node_dict]
else:
children=fish_children
factor=1.0/sum(node_dict["organProportions"].itervalues())
for k in node_dict["organProportions"]:
node_dict["organProportions"][k] = node_dict["organProportions"][k]*factor
else:
cell_count=0
for child in node.descendants:
if child.name =="normal":
continue
if child.name not in group and len(group)>0 and level==1:
continue
children += [write_consensus_tree_json_dict(child,matched_cluster, node_leaf_dictionary_list,label_type_dictionary,label_color_dictionary,cell_HMID_dictionary,cell_label_dictionary, level = level+1, parent = node,fish_index=fish_index,calc_distance_by_cell_pairs=calc_distance_by_cell_pairs,group=group)]
# print children[-1]["name"]
ops=children[-1]["organProportions"]
cc=children[-1]["num_cell_under_this_node"]
for key,val in ops.items():
node_dict["organProportions"][key]+=val*cc
cell_count+=cc
node_dict["num_cell_under_this_node"]=cell_count
total=sum(node_dict["organProportions"].values())
for key in node_dict["organProportions"].keys():
node_dict["organProportions"][key]/=float(total)
# print node_dict["organProportions"]
node_dict["children"]=children
return node_dict
```
|
{
"source": "JessicaAlan/foremast",
"score": 2
}
|
#### File: awslambda/s3_event/s3_event.py
```python
import json
import logging
import boto3
from ...utils import add_lambda_permissions, get_lambda_alias_arn, get_template
LOG = logging.getLogger(__name__)
def create_s3_event(app_name, env, region, bucket, triggers):
"""Create S3 lambda events from triggers
Args:
app_name (str): name of the lambda function
env (str): Environment/Account for lambda function
region (str): AWS region of the lambda function
triggers (list): List of triggers from the settings
"""
session = boto3.Session(profile_name=env, region_name=region)
s3_client = session.client('s3')
lambda_alias_arn = get_lambda_alias_arn(app_name, env, region)
LOG.debug("Lambda ARN for lambda function %s is %s.", app_name, lambda_alias_arn)
LOG.debug("Creating S3 events for bucket %s", bucket)
# allow lambda trigger permission from bucket
principal = 's3.amazonaws.com'
statement_id = "{}_s3_{}".format(app_name, bucket).replace('.', '')
source_arn = "arn:aws:s3:::{}".format(bucket)
add_lambda_permissions(
function=lambda_alias_arn,
env=env,
region=region,
principal=principal,
statement_id=statement_id,
source_arn=source_arn)
# configure events on s3 bucket to trigger lambda function
template_kwargs = {"lambda_arn": lambda_alias_arn, "triggers": triggers}
config = get_template(template_file='infrastructure/lambda/s3_event.json.j2', **template_kwargs)
s3_client.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=json.loads(config))
LOG.info("Created lambda %s S3 event on bucket %s", app_name, bucket)
```
#### File: foremast/configs/__main__.py
```python
import argparse
import logging
import gogoutils
from ..args import add_debug
from ..consts import APP_FORMATS, LOGGING_FORMAT
from .outputs import write_variables
from .prepare_configs import process_git_configs, process_runway_configs
LOG = logging.getLogger(__name__)
def main():
"""Append Application Configurations to a given file in multiple formats."""
logging.basicConfig(format=LOGGING_FORMAT)
parser = argparse.ArgumentParser(description=main.__doc__)
add_debug(parser)
parser.add_argument('-o', '--output', required=True, help='Name of environment file to append to')
parser.add_argument(
'-g', '--git-short', metavar='GROUP/PROJECT', required=True, help='Short name for Git, e.g. forrest/core')
parser.add_argument('-r', '--runway-dir', help='Runway directory with app.json files, requires --git-short')
args = parser.parse_args()
LOG.setLevel(args.debug)
logging.getLogger(__package__.split('.')[0]).setLevel(args.debug)
generated = gogoutils.Generator(*gogoutils.Parser(args.git_short).parse_url(), formats=APP_FORMATS)
git_short = generated.gitlab()['main']
if args.runway_dir:
configs = process_runway_configs(runway_dir=args.runway_dir)
else:
configs = process_git_configs(git_short=git_short)
write_variables(app_configs=configs, out_file=args.output, git_short=git_short)
if __name__ == '__main__':
main()
```
#### File: foremast/iam/create_iam.py
```python
import collections
import logging
import boto3
from ..utils import get_details, get_properties, get_template
from .construct_policy import construct_policy
from .resource_action import resource_action
LOG = logging.getLogger(__name__)
def create_iam_resources(env='dev', app='', **_):
"""Create the IAM Resources for the application.
Args:
env (str): Deployment environment/account, i.e. dev, stage, prod.
app (str): Spinnaker Application name.
Returns:
True upon successful completion.
"""
session = boto3.session.Session(profile_name=env)
client = session.client('iam')
app_properties = get_properties(env='pipeline')
generated = get_details(env=env, app=app)
generated_iam = generated.iam()
app_details = collections.namedtuple('AppDetails', generated_iam.keys())
details = app_details(**generated_iam)
LOG.debug('Application details: %s', details)
deployment_type = app_properties['type']
role_trust_template = get_template(
'infrastructure/iam/trust/{0}_role.json.j2'.format(deployment_type), formats=generated)
resource_action(
client,
action='create_role',
log_format='Created Role: %(RoleName)s',
RoleName=details.role,
AssumeRolePolicyDocument=role_trust_template)
resource_action(
client,
action='create_instance_profile',
log_format='Created Instance Profile: %(InstanceProfileName)s',
InstanceProfileName=details.profile)
attach_profile_to_role(client, role_name=details.role, profile_name=details.profile)
iam_policy = construct_policy(app=app, group=details.group, env=env, pipeline_settings=app_properties)
if iam_policy:
resource_action(
client,
action='put_role_policy',
log_format='Added IAM Policy: %(PolicyName)s',
RoleName=details.role,
PolicyName=details.policy,
PolicyDocument=iam_policy)
resource_action(client, action='create_user', log_format='Created User: %(UserName)s', UserName=details.user)
resource_action(client, action='create_group', log_format='Created Group: %(GroupName)s', GroupName=details.group)
resource_action(
client,
action='add_user_to_group',
log_format='Added User to Group: %(UserName)s -> %(GroupName)s',
GroupName=details.group,
UserName=details.user)
return True
def attach_profile_to_role(client, role_name='forrest_unicorn_role', profile_name='forrest_unicorn_profile'):
"""Attach an IAM Instance Profile _profile_name_ to Role _role_name_.
Args:
role_name (str): Name of Role.
profile_name (str): Name of Instance Profile.
Returns:
True upon successful completion.
"""
current_instance_profiles = resource_action(
client,
action='list_instance_profiles_for_role',
log_format='Found Instance Profiles for %(RoleName)s.',
RoleName=role_name)['InstanceProfiles']
for profile in current_instance_profiles:
if profile['InstanceProfileName'] == profile_name:
LOG.info('Found Instance Profile attached to Role: %s -> %s', profile_name, role_name)
break
else:
for remove_profile in current_instance_profiles:
resource_action(
client,
action='remove_role_from_instance_profile',
log_format='Removed Instance Profile from Role: '
'%(InstanceProfileName)s -> %(RoleName)s',
InstanceProfileName=remove_profile['InstanceProfileName'],
RoleName=role_name)
resource_action(
client,
action='add_role_to_instance_profile',
log_format='Added Instance Profile to Role: '
'%(InstanceProfileName)s -> %(RoleName)s',
InstanceProfileName=profile_name,
RoleName=role_name)
return True
```
#### File: src/foremast/__main__.py
```python
import argparse
import collections
import logging
import os
from . import runner, validate
from .args import add_debug, add_env
from .consts import LOGGING_FORMAT, SHORT_LOGGING_FORMAT
from .version import print_version
LOG = logging.getLogger(__name__)
def add_infra(subparsers):
"""Infrastructure subcommands."""
infra_parser = subparsers.add_parser('infra', help=runner.prepare_infrastructure.__doc__)
infra_parser.set_defaults(func=runner.prepare_infrastructure)
def add_pipeline(subparsers):
"""Pipeline subcommands."""
pipeline_parser = subparsers.add_parser(
'pipeline', help=add_pipeline.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
pipeline_parser.set_defaults(func=pipeline_parser.print_help)
pipeline_subparsers = pipeline_parser.add_subparsers(title='Pipelines')
pipeline_full_parser = pipeline_subparsers.add_parser(
'app', help=runner.prepare_app_pipeline.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
pipeline_full_parser.set_defaults(func=runner.prepare_app_pipeline)
pipeline_onetime_parser = pipeline_subparsers.add_parser(
'onetime', help=runner.prepare_onetime_pipeline.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
pipeline_onetime_parser.set_defaults(func=runner.prepare_onetime_pipeline)
add_env(pipeline_onetime_parser)
def add_rebuild(subparsers):
"""Rebuild Pipeline subcommands."""
rebuild_parser = subparsers.add_parser(
'rebuild', help=runner.rebuild_pipelines.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
rebuild_parser.set_defaults(func=runner.rebuild_pipelines)
rebuild_parser.add_argument('-a', '--all', action='store_true', help='Rebuild all Pipelines')
rebuild_parser.add_argument(
'project',
nargs='?',
default=os.getenv('REBUILD_PROJECT'),
help='Project to rebuild, overrides $REBUILD_PROJECT')
def add_autoscaling(subparsers):
"""Auto Scaling Group Policy subcommands."""
autoscaling_parser = subparsers.add_parser(
'autoscaling',
help=runner.create_scaling_policy.__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
autoscaling_parser.set_defaults(func=runner.create_scaling_policy)
def add_scheduled_actions(subparsers):
"""Auto Scaling Group Scheduled Actions subcommands."""
scheduled_actions_parser = subparsers.add_parser(
'scheduledactions',
help=runner.create_scheduled_actions.__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
scheduled_actions_parser.set_defaults(func=runner.create_scheduled_actions)
def add_validate(subparsers):
"""Validate Spinnaker setup."""
validate_parser = subparsers.add_parser(
'validate', help=add_validate.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
validate_parser.set_defaults(func=validate_parser.print_help)
validate_subparsers = validate_parser.add_subparsers(title='Testers')
validate_all_parser = validate_subparsers.add_parser(
'all', help=validate.validate_all.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
validate_all_parser.set_defaults(func=validate.validate_all)
validate_gate_parser = validate_subparsers.add_parser(
'gate', help=validate.validate_gate.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
validate_gate_parser.set_defaults(func=validate.validate_gate)
def main(manual_args=None):
"""Foremast, your ship's support."""
parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.set_defaults(func=parser.print_help)
add_debug(parser)
parser.add_argument(
'-s',
'--short-log',
action='store_const',
const=SHORT_LOGGING_FORMAT,
default=LOGGING_FORMAT,
help='Truncated logging format')
parser.add_argument('-v', '--version', action='store_true', help=print_version.__doc__)
subparsers = parser.add_subparsers(title='Commands', description='Available activies')
add_infra(subparsers)
add_pipeline(subparsers)
add_rebuild(subparsers)
add_autoscaling(subparsers)
add_scheduled_actions(subparsers)
add_validate(subparsers)
CliArgs = collections.namedtuple('CliArgs', ['parsed', 'extra'])
parsed, extra = parser.parse_known_args(args=manual_args)
args = CliArgs(parsed, extra)
logging.basicConfig(format=args.parsed.short_log)
package, *_ = __package__.split('.')
logging.getLogger(package).setLevel(args.parsed.debug)
LOG.debug('Arguments: %s', args)
if args.parsed.version:
args.parsed.func = print_version
try:
args.parsed.func(args)
except (AttributeError, TypeError):
args.parsed.func()
if __name__ == '__main__':
main()
```
#### File: foremast/pipeline/clean_pipelines.py
```python
import logging
import requests
from ..consts import API_URL, GATE_CA_BUNDLE, GATE_CLIENT_CERT, RUNWAY_BASE_PATH
from ..exceptions import SpinnakerPipelineCreationFailed, SpinnakerPipelineDeletionFailed
from ..utils import check_managed_pipeline, get_all_pipelines, normalize_pipeline_name
LOG = logging.getLogger(__name__)
def delete_pipeline(app='', pipeline_name=''):
"""Delete _pipeline_name_ from _app_."""
safe_pipeline_name = normalize_pipeline_name(name=pipeline_name)
LOG.warning('Deleting Pipeline: %s', safe_pipeline_name)
url = '{host}/pipelines/{app}/{pipeline}'.format(host=API_URL, app=app, pipeline=safe_pipeline_name)
response = requests.delete(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
if not response.ok:
LOG.debug('Delete response code: %d', response.status_code)
if response.status_code == requests.status_codes.codes['method_not_allowed']:
raise SpinnakerPipelineDeletionFailed('Failed to delete "{0}" from "{1}", '
'possibly invalid Pipeline name.'.format(safe_pipeline_name, app))
LOG.debug('Pipeline missing, no delete required.')
LOG.debug('Deleted "%s" Pipeline response:\n%s', safe_pipeline_name, response.text)
return response.text
def clean_pipelines(app='', settings=None):
"""Delete Pipelines for regions not defined in application.json files.
For Pipelines named **app_name [region]**, _region_ will need to appear
in at least one application.json file. All other names are assumed
unamanaged.
Args:
app (str): Application name
settings (dict): imported configuration settings
Returns:
True: Upon successful completion.
Raises:
SpinnakerPipelineCreationFailed: Missing application.json file from
`create-configs`.
"""
pipelines = get_all_pipelines(app=app)
envs = settings['pipeline']['env']
LOG.debug('Find Regions in: %s', envs)
regions = set()
for env in envs:
try:
regions.update(settings[env]['regions'])
except KeyError:
error_msg = 'Missing "{}/application-master-{}.json".'.format(RUNWAY_BASE_PATH, env)
raise SpinnakerPipelineCreationFailed(error_msg)
LOG.debug('Regions defined: %s', regions)
for pipeline in pipelines:
pipeline_name = pipeline['name']
try:
region = check_managed_pipeline(name=pipeline_name, app_name=app)
except ValueError:
LOG.info('"%s" is not managed.', pipeline_name)
continue
LOG.debug('Check "%s" in defined Regions.', region)
if region not in regions:
delete_pipeline(app=app, pipeline_name=pipeline_name)
return True
```
#### File: src/foremast/plugin_manager.py
```python
import pathlib
from pluginbase import PluginBase
from .exceptions import PluginNotFound
class PluginManager:
"""Class to manage and create Spinnaker applications
Args:
paths (str): Path of plugin directory.
provider (str): The name of the cloud provider.
"""
def __init__(self, resource, provider):
path = pathlib.Path(__file__).parent.resolve()
path = path / resource
all_paths = [str(path)]
self.paths = all_paths
self.provider = provider
plugin_base = PluginBase(package='foremast.plugins')
self.plugin_source = plugin_base.make_plugin_source(searchpath=self.paths, persist=True)
def plugins(self):
"""List of all plugins available."""
for plugin in self.plugin_source.list_plugins():
yield plugin
def load(self):
"""Load the plugin."""
try:
loaded_plugin = self.plugin_source.load_plugin(self.provider)
except ModuleNotFoundError:
raise PluginNotFound('No plugin found for provider {} in {}'.format(self.provider, self.paths))
return loaded_plugin
```
|
{
"source": "JessicaAlan/garden",
"score": 3
}
|
#### File: JessicaAlan/garden/brain.py
```python
import serial, os, sys
import time
import keyboard
from smbus import SMBus
sensor_data = {
"temperature": None,
"humidity": None,
"moisture": None,
"light": None,
"relay": None
}
blink = True
power = False
ser1 = None
ser2 = None
def toggle():
global power
power = not power
keyboard.add_hotkey('p', toggle)
def print_data():
os.system('clear')
print("Temperature: {} %RH: {}".format(sensor_data["temperature"], sensor_data["humidity"]))
if sensor_data["moisture"] is not None:
print("Moisture level: {}".format(sensor_data["moisture"]))
else:
print("No Plant Monitor found!")
if sensor_data["light"] is not None:
print("Light level: {} Relay is: {}".format(sensor_data["light"], sensor_data["relay"]))
else:
print("No Outlet Guard found!")
global blink
if blink:
print('-')
else:
print('|')
blink = not blink
def getTnH():
bus = SMBus(1)
bus.write_byte(0x40, 0xF5)
time.sleep(0.1)
data0 = bus.read_byte(0x40)
data1 = bus.read_byte(0x40)
humidity = ((data0 * 256 + data1) * 125 / 65536.0) - 6
time.sleep(0.1)
bus.write_byte(0x40, 0xE0)
time.sleep(0.1)
data0 = bus.read_byte(0x40)
data1 = bus.read_byte(0x40)
cTemp = ((data0 * 256 + data1) * 175.72 / 65536.0) - 46.85
fTemp = cTemp * 1.8 + 32
sensor_data["humidity"] = humidity
sensor_data["temperature"] = fTemp
def clear_data():
for key in sensor_data:
sensor_data[key] = None
def read_from_ser(ser):
if ser is not None:
try:
ser.readline()
time.sleep(0.1)
data = ser.readline()
except serial.serialutil.SerialException:
clear_data()
return
if len(data) == 0:
return
if data[0] == '0':
# Read from Plant Monitor
try:
sensor_data["moisture"] = int(data[1])
except:
pass
elif data[0] == '1':
# Read from Outlet Guard
try:
sensor_data["light"] = int(data[1])
sensor_data["relay"] = int(data[2])
ser.write(str(int(power)))
except:
pass
ser.flushInput()
while (True):
if ser1 is None:
try:
ser1 = serial.Serial('/dev/ttyACM0', 9600, timeout=0.1)
except serial.serialutil.SerialException:
pass
if ser2 is None:
try:
ser2 = serial.Serial('/dev/ttyACM1', 9600, timeout=0.1)
except serial.serialutil.SerialException:
pass
read_from_ser(ser1)
read_from_ser(ser2)
getTnH()
print_data()
time.sleep(0.5)
clear_data()
```
|
{
"source": "Jessica-Andrew/CodeDrillz",
"score": 4
}
|
#### File: Jessica-Andrew/CodeDrillz/Tasks.py
```python
def cat(x,y,z):
w=(x+y*z) + (x-z)
return w
print cat(1,2,3)
x=int(input())
y=int(input())
def cat(a,b):
w=(x+y)
return w
print cat(a,b)
def cat():
x=int(input())
y=int(input())
w=int(input(x+y))
return w
print cat(1,2)
```
#### File: Jessica-Andrew/CodeDrillz/timestable.py
```python
def print_table(n):
""" (int) -> NoneType
Print the multiplication table for numbers 1 through n inclusive.
>>> print_table(5)
1 2 3 4 5
1 1 2 3 4 5
2 2 4 6 8 10
3 3 6 9 12 15
4 4 8 12 16 20
5 5 10 15 20 25
"""
# The numbers to include in the table.
numbers = list(range(1, n + 1))
# Print the header row.
for i in numbers:
print('\t' + str(i), end='')
# End the header row.
print()
# Print each row number and the contents of each row.
for i in numbers:
print (i, end='')
for j in numbers:
print('\t' + str(i * j), end='')
# End the current row.
print()
```
|
{
"source": "jessica-angel7/qiskit-metal",
"score": 2
}
|
#### File: analyses/quantization/lumped_oscillator_model.py
```python
import pandas as pd
from pint import UnitRegistry
from pyEPR.calcs.convert import Convert
from qiskit_metal.designs import QDesign # pylint: disable=unused-import
from ..core import QAnalysis
from ..simulation import LumpedElementsSim
from ... import Dict
from ... import config
if not config.is_building_docs():
from .lumped_capacitive import extract_transmon_coupled_Noscillator
# TODO: eliminate every reference to "renderer" in this file
class LOManalysis(QAnalysis):
"""Performs Lumped Oscillator Model analysis on a simulated or user-provided capacitance matrix.
Default Setup:
* junctions (Dict)
* Lj (float): Junction inductance (in nH)
* Cj (float): Junction capacitance (in fF)
* freq_readout (float): Coupling readout frequency (in GHz).
* freq_bus (Union[list, float]): Coupling bus frequencies (in GHz).
* freq_bus can be a list with the order they appear in the capMatrix.
Data Labels:
* lumped_oscillator (pd.DataFrame): Lumped oscillator result at the last simulation pass
* lumped_oscillator_all (dict): of pd.DataFrame. Lumped oscillator resulting
at every pass of the simulation
"""
default_setup = Dict(junctions=Dict(Lj=12, Cj=2),
freq_readout=7.0,
freq_bus=[6.0, 6.2])
"""Default setup."""
# supported labels for data generated from the simulation
data_labels = ['lumped_oscillator', 'lumped_oscillator_all']
"""Default data labels."""
def __init__(self, design: 'QDesign', renderer_name: str = 'q3d'):
"""Initialize the Lumped Oscillator Model analysis.
Args:
design (QDesign): Pointer to the main qiskit-metal design.
Used to access the QRenderer.
renderer_name (str, optional): Which renderer to use. Defaults to 'q3d'.
"""
# set design and renderer
self.sim = None if renderer_name is None else LumpedElementsSim(
design, renderer_name)
super().__init__()
@property
def lumped_oscillator(self) -> dict:
"""Getter
Returns:
dict: Lumped oscillator result at the last simulation pass.
"""
return self.get_data('lumped_oscillator')
@lumped_oscillator.setter
def lumped_oscillator(self, data: dict):
"""Setter
Args:
data (dict): Lumped oscillator result at the last simulation pass.
"""
if not isinstance(data, dict):
self.logger.warning(
'Unsupported type %s. Only accepts dict. Please try again.',
{type(data)})
return
self.set_data('lumped_oscillator', data)
@property
def lumped_oscillator_all(self) -> pd.DataFrame:
"""Getter
Returns:
pd.DataFrame: each line corresponds to a simulation pass number
and the remainder of the data is the respective lump oscillator information.
"""
return self.get_data('lumped_oscillator_all')
@lumped_oscillator_all.setter
def lumped_oscillator_all(self, data: pd.DataFrame):
"""Setter
Args:
data (pd.DataFrame): each line corresponds to a simulation pass number
and the remainder of the data is the respective lump oscillator information.
"""
if not isinstance(data, pd.DataFrame):
self.logger.warning(
'Unsupported type %s. Only accepts pd.DataFrame. Please try again.',
{type(data)})
return
self.set_data('lumped_oscillator_all', data)
def run(self, *args, **kwargs):
"""Executes sequentially the system capacitance simulation (if a renderer was provided
at creation of this object) and lom extraction by executing the methods
LumpedElementsSim.run_sim(`*args`, `**kwargs`) and LOManalysis.run_lom().
For input parameter, see documentation for LumpedElementsSim.run_sim().
Returns:
(dict): Pass numbers (keys) and respective lump oscillator information (values).
"""
if isinstance(self.sim, LumpedElementsSim):
self.sim.run(*args, **kwargs)
return self.run_lom()
def run_lom(self):
"""Executes the lumped oscillator extraction from the capacitance matrix,
and based on the setup values.
Returns:
dict: Pass numbers (keys) and their respective capacitance matrices (values).
"""
# wipe data from the previous run (if any)
self.clear_data()
s = self.setup
if self.sim.capacitance_matrix is None:
self.logger.warning(
'Please initialize the capacitance_matrix before executing this method.'
)
return
if not self.sim.capacitance_all_passes:
self.sim.capacitance_all_passes[
1] = self.sim.capacitance_matrix.values
ureg = UnitRegistry()
ic_amps = Convert.Ic_from_Lj(s.junctions.Lj, 'nH', 'A')
cj = ureg(f'{s.junctions.Cj} fF').to('farad').magnitude
fread = ureg(f'{s.freq_readout} GHz').to('GHz').magnitude
fbus = [ureg(f'{freq} GHz').to('GHz').magnitude for freq in s.freq_bus]
# derive number of coupling pads
num_cpads = 2
if isinstance(fread, list):
num_cpads += len(fread) - 1
if isinstance(fbus, list):
num_cpads += len(fbus) - 1
# get the LOM for every pass
all_res = {}
for idx_cmat, df_cmat in self.sim.capacitance_all_passes.items():
res = extract_transmon_coupled_Noscillator(
df_cmat,
ic_amps,
cj,
num_cpads,
fbus,
fread,
g_scale=1,
print_info=bool(
idx_cmat == len(self.sim.capacitance_all_passes)))
all_res[idx_cmat] = res
self.lumped_oscillator = all_res[len(self.sim.capacitance_all_passes)]
all_res = pd.DataFrame(all_res).transpose()
all_res['χr MHz'] = abs(all_res['chi_in_MHz'].apply(lambda x: x[0]))
all_res['gr MHz'] = abs(all_res['gbus'].apply(lambda x: x[0]))
self.lumped_oscillator_all = all_res
return self.lumped_oscillator_all
def plot_convergence(self, *args, **kwargs):
"""Plots alpha and frequency versus pass number, as well as convergence of delta (in %).
It accepts the same inputs as run_lom(), to allow regenerating the LOM
results before plotting them.
"""
if self.lumped_oscillator_all is None or args or kwargs:
self.run_lom(*args, **kwargs)
# TODO: copy plot_convergence_main() from pyEPR and move it here
self.sim.renderer.plot_convergence_main(self.lumped_oscillator_all)
def plot_convergence_chi(self, *args, **kwargs):
"""Plot convergence of chi and g, both in MHz, as a function of pass number.
It accepts the same inputs as run_lom(), to allow regenerating the LOM
results before plotting them.
"""
if self.lumped_oscillator_all is None or args or kwargs:
self.run_lom(*args, **kwargs)
# TODO: copy plot_convergence_main() from pyEPR and move it here
self.sim.renderer.plot_convergence_chi(self.lumped_oscillator_all)
```
|
{
"source": "jessicaaustin/CoTeDe",
"score": 2
}
|
#### File: CoTeDe/cotede/misc.py
```python
import numpy as np
# ============================================================================
# I need to improve this, and include the places where the
# flags are masked, i.e. only eliminate where the flags
# could guarantee it was false.
def combined_flag(flags, criteria=None):
""" Returns the combined flag considering all the criteria
Input: flags
Collects all flags in the criteria, and for each measurements, it
return the maximum flag value among the different criteria.
If criteria is not defined, considers all the flags,
i.e. flags.keys()
"""
assert hasattr(flags, 'keys')
if criteria is None:
criteria = list(flags.keys())
output = np.asanyarray(flags[criteria[0]])
for c in criteria[1:]:
assert len(flags[c]) == len(output)
output = np.max([output, flags[c]], axis=0)
return output
def make_qc_index(flags, criteria, type="anytrue"):
ind = flags[criteria[0]].copy()
if type == "anytrue":
for c in criteria:
ind[(ind is True) | (flags[c] is True)] = True
#ind[np.nonzero((ind == True) | (np.array(flags[c]) == True))[0]] = True
elif type == "alltrue":
for c in criteria:
ind[(ind is True) | (flags[c] is True)] = True
for c in criteria:
ind[(ind is False) | (flags[c] is False)] = False
# ind[np.nonzero((ind == False) | (np.array(flags[c]) == False))[0]] = False
return ind
```
#### File: cotede/utils/profilescollection.py
```python
import time
# import logging
import multiprocessing as mp
import numpy as np
from numpy import ma
try:
from seabird.utils import make_file_list
except:
pass
import cotede.qc
def process_profiles_serial(inputfiles, cfg=None, saveauxiliary=False,
verbose=True):
# verbose=True, logger=None):
""" Quality control a list of CTD files
"""
# logger = logger or logging.getLogger(__name__)
print("This function will be removed. If interested in processing CTD files, please install the package PySeabird (pip install seabird).")
import time; time.sleep(5)
profiles = []
for f in inputfiles:
try:
p = cotede.qc.fProfileQC(f, cfg, saveauxiliary, verbose=verbose)
profiles.append(p)
except CNVError as e:
print(e.msg)
# logger.warn(e.msg)
return profiles
def process_profiles(inputfiles, cfg=None, saveauxiliary=True,
verbose=True, timeout=60):
# verbose=True, timeout=60, logger=None):
""" Quality control a list of CTD files in parallel
"""
print("This function will be removed. If interested in processing CTD files, please install the package PySeabird (pip install seabird).")
import time; time.sleep(5)
# logger = logger or logging.getLogger(__name__)
npes = 2 * mp.cpu_count()
npes = min(npes, len(inputfiles))
# logger.debug("Running with %s npes" % npes)
queuesize = 3*npes
# logger.debug("queue size: %s" % queuesize)
qout = mp.Queue(queuesize)
teste = []
def run_qc(inputfiles, cfg, saveauxiliary, verbose):
def process_file(f, cfg, saveauxiliary, verbose=verbose):
try:
if verbose is True:
print("Loading: %s" % f)
# logger.debug("fProfileQC: %s" % f)
p = cotede.qc.fProfileQC(f, cfg, saveauxiliary, verbose)
# logger=logger)
attrs = [pn.attributes for pn in p.data]
# logger.debug("Sending profile %s to queue" % f)
qout.put([p, attrs], block=True)
# logger.debug("Sent to queue")
except CNVError as e:
print(e.msg)
# logger.warn(e.msg)
pool = []
for f in inputfiles[:npes]:
pool.append(mp.Process(target=process_file,
args=(f, cfg, saveauxiliary, verbose)))
pool[-1].start()
for i, f in enumerate(inputfiles[npes:]):
n = i % npes
pool[n].join(timeout)
if pool[n].is_alive():
print("timeout: %s" % pool[n])
pool[n].terminate()
pool[n] = mp.Process(target=process_file,
args=(f, cfg, saveauxiliary, verbose))
pool[n].start()
for p in pool:
p.join(timeout)
if p.is_alive():
print("timeout: %s" % p)
p.terminate()
print("Done evaluating.")
worker = mp.Process(target=run_qc,
args=(inputfiles, cfg, saveauxiliary, verbose))
worker.start()
profiles = []
while worker.is_alive() or not qout.empty():
if qout.empty():
# logger.debug("Queue is empty. I'll give a break.")
# print("Queue is empty. I'll give a break.")
time.sleep(2)
else:
# logger.debug("There are results waiting in queue")
# Dummy way to fix pickling on Queue
# When the fProfile object is sent through the Queue, each
# data loses its .attributes.
# Improve this in the future.
out, attrs = qout.get()
for i, a in enumerate(attrs):
out.data[i].attributes = a
print("Collected: %s" % out.attributes['filename'])
profiles.append(out)
# logger.debug("Done. Worker finished and queue is empty")
worker.terminate()
return profiles
class ProfilesQCCollection(object):
""" Load a collection of ProfileQC from a directory
"""
def __init__(self, inputdir, inputpattern=".*\.cnv",
cfg=None, saveauxiliary=False, timeout=60):
"""
"""
print("This function will be removed. If interested in processing CTD files, please install the package PySeabird (pip install seabird).")
import time; time.sleep(5)
self.name = "ProfilesQCCollection"
self.inputfiles = make_file_list(inputdir, inputpattern)
self.profiles = process_profiles(self.inputfiles, cfg, saveauxiliary,
timeout=timeout)
# self.profiles = process_profiles_serial(self.inputfiles, cfg,
# saveauxiliary)
self.data = {'id': [], 'profileid': [], 'profilename': []}
self.flags = {}
if saveauxiliary is True:
self.auxiliary = {}
offset = 0
for p in self.profiles:
N = p['timeS'].size
# Be sure that all have the same lenght.
for v in p.keys():
assert p[v].size == N
ids = offset + np.arange(N)
self.data['id'] = np.append(self.data['id'],
ids).astype('i')
profileid = [p.attributes['md5']] * N
self.data['profileid'] = np.append(self.data['profileid'],
profileid)
profilename = [p.attributes['filename']] * N
self.data['profilename'] = np.append(self.data['profilename'],
profilename)
for v in p.keys():
if v not in self.data:
self.data[v] = ma.masked_all(offset)
self.data[v] = ma.append(self.data[v], p[v])
# ---- Dealing with the flags --------------------------------
for v in p.flags.keys():
if v not in self.flags:
self.flags[v] = {'id': [], 'profileid': []}
self.flags[v]['id'] = np.append(self.flags[v]['id'],
ids).astype('i')
self.flags[v]['profileid'] = np.append(
self.flags[v]['profileid'], profileid)
for t in p.flags[v]:
if t not in self.flags[v]:
self.flags[v][t] = ma.masked_all(offset)
self.flags[v][t] = ma.append(self.flags[v][t],
p.flags[v][t])
offset += N
return
class ProfilesQCPandasCollection(object):
""" Quality Control a collection of ProfileQC from a directory
Search all profiles inside the given directory and evaluate
all them. The output is stored in a continuous table, where
each profile receives a unique profileid value.
This class was build thinking on join analysis of a batch of
profiles, like all profiles from a specific cruise, for
example.
"""
def __init__(self, inputdir, inputpattern=".*\.cnv",
cfg=None, saveauxiliary=False, timeout=60):
"""
"""
print("This function will be removed. If interested in processing CTD files, please install the package PySeabird (pip install seabird).")
import time; time.sleep(5)
try:
import pandas as pd
except:
print("Pandas is not available.")
return
self.name = "ProfilesQCPandasCollection"
self.inputfiles = make_file_list(inputdir, inputpattern)
self.profiles = process_profiles(self.inputfiles, cfg, saveauxiliary,
timeout=timeout)
#self.profiles = process_profiles_serial(self.inputfiles, cfg,
# saveauxiliary)
self.data = pd.DataFrame()
self.flags = {}
if saveauxiliary is True:
self.auxiliary = {}
for p in self.profiles:
try:
# ---- Dealing with the data ---------------------------------
# FIXME: This expects a CNV object with as_DataFrame. I must
# generalize this.
tmp = p.input.as_DataFrame()
profileid = p.attributes['md5']
tmp['profileid'] = profileid
tmp['profilename'] = p.attributes['filename']
cont_id = range(len(self.data), len(self.data)+len(tmp))
tmp['id'] = cont_id
tmp.set_index('id', inplace=True)
self.data = pd.concat([self.data, tmp])
# ---- Dealing with the flags --------------------------------
V = [v for v in p.flags.keys() if v != 'common']
for v in V:
tmp = pd.DataFrame(p.flags[v])
for f in p.flags['common']:
tmp[f] = p.flags['common'][f]
tmp['id'] = cont_id
tmp.set_index('id', inplace=True)
if v not in self.flags:
self.flags[v] = pd.DataFrame(tmp)
else:
self.flags[v] = pd.concat([self.flags[v],
pd.DataFrame(tmp)])
# ---- Dealing with the auxiliary -----------------------------
if saveauxiliary is True:
for a in p.auxiliary.keys():
tmp = pd.DataFrame(p.auxiliary[a])
tmp['id'] = cont_id
tmp.set_index('id', inplace=True)
if a not in self.auxiliary:
self.auxiliary[a] = pd.DataFrame(tmp)
else:
self.auxiliary[a] = pd.concat([self.auxiliary[a],
pd.DataFrame(tmp)])
except:
print("Failled")
def keys(self):
return [k for k in self.flags.keys()]
def __getitem__(self, key):
tmp = self.flags[key].copy()
tmp[key] = self.data[key]
tmp['timeS'] = self.data['timeS']
tmp['PRES'] = self.data['PRES']
return tmp
def save(self, filename):
store = pd.HDFStore(filename)
# self.data.to_hdf("%s_data.hdf" % filename, 'df')
store.append('data', self.data)
for k in self.flags.keys():
# self.flags[k].to_hdf("%s_flags_%s.hdf" % (filename, k), 'df')
store.append("flags_%s" % k, self.flags[k])
if hasattr(self, 'auxiliary'):
for k in self.auxiliary.keys():
# self.auxiliary[k].to_hdf("%s_flags_%s.hdf" % (filename, k), 'df')
store.append("auxiliary_%s" % k, self.auxiliary[k])
```
#### File: tests/fuzzy/test_memberships.py
```python
import numpy as np
from numpy.testing import assert_allclose
from cotede.fuzzy.membership_functions import smf, zmf, trimf, trapmf
def test_smf():
x = [-4, 5.1, 0.1]
p = [-6.3, 4]
test = smf(x, p)
expected = np.array([ 0.09972665, 1., 0.71326232])
assert_allclose(test, expected)
def test_zmf():
x = [-4, 5.1, 0.1]
p = [-6.3, 4]
test = zmf(x, p)
expected = np.array([ 0.90027335, 0., 0.28673768])
assert_allclose(test, expected)
def test_trimf():
x = [-4, 5.1, 0.1, 0]
p = [-6.3, 0, 4]
test = trimf(x, p)
expected = np.array([ 0.36507937, 0., 0.975, 1.])
assert_allclose(test, expected)
def test_trapmf():
x = [-4, 5.1, 0.1, 0]
p = [-6.3, -1, 0, 2]
test = trapmf(x, p)
expected = np.array([ 0.43396226, 0., 0.95, 1.])
assert_allclose(test, expected)
```
#### File: tests/qctests/test_qc_gradient.py
```python
import numpy as np
from numpy import ma
from cotede.qctests import Gradient
from data import DummyData
def test():
profile = DummyData()
feature = ma.masked_array([0, 1.25, 5.875, 0],
mask=[True, False, False, True])
cfg = {'threshold': 6, 'flag_good': 1, 'flag_bad': 4}
y = Gradient(profile, 'TEMP', cfg)
y.test()
assert type(y.features) is dict
# assert ma.allclose(y.features['gradient'], feature)
# assert ma.allclose(y.flags['gradient'],
# np.array([0, 1, 1, 0], dtype='i1'))
```
#### File: tests/qctests/test_qc_morello2014.py
```python
import numpy as np
from cotede.qc import ProfileQC
from data import DummyData
def test():
"""
"""
profile = DummyData()
pqc = ProfileQC(profile, cfg='morello2014')
assert 'morello2014' in pqc.flags['TEMP']
assert 'morello2014' in pqc.flags['PSAL']
assert profile['TEMP'].shape == pqc.flags['TEMP']['morello2014'].shape
assert profile['PSAL'].shape == pqc.flags['PSAL']['morello2014'].shape
# assert sorted(np.unique(pqc.flags['TEMP']['morello2014'])) == [1, 2, 3, 4]
# assert sorted(np.unique(pqc.flags['TEMP2']['morello2014'])) == [1]
# assert sorted(np.unique(pqc.flags['PSAL']['morello2014'])) == [1, 2, 4]
# assert sorted(np.unique(pqc.flags['PSAL2']['morello2014'])) == [1]
```
#### File: CoTeDe/tests/test_load_cfg.py
```python
import os.path
import pkg_resources
from cotede.utils import load_cfg, cotede_dir
CFG = [f[:-5] for f in pkg_resources.resource_listdir('cotede', 'qc_cfg')
if f[-5:] == '.json']
def test_no_local_duplicate_cfg():
""" Avoid local cfg of default procedures
Guarantee that there is no local copy of a default cfg json file,
otherwise the default cfg could be breaking, and the tests safely
escaping into a local, non-distributed, cfg.
"""
for cfg in CFG:
local_cfg = os.path.join(cotede_dir(), "cfg", "%s.json" % cfg)
assert not os.path.exists(local_cfg), \
"Redundant local cfg file: %s" % cfg
def test_inout():
""" load_cfg shouldn't modify input variable cfg
"""
cfg = 'cotede'
out = load_cfg(cfg)
assert out != cfg
def test_dict():
"""Test a user dict config
It is possible to define a full config instead of choosing one of the
builtins. This is done by giving a dictionary with the correct
structure.
"""
cfg = {'common': {'valid_datetime': None}}
cfg_out = load_cfg(cfg)
assert 'common' in cfg_out, "Missing 'common' in load_cfg output"
assert cfg_out['common'] == cfg['common']
def test_default():
cfg_out = load_cfg()
assert isinstance(cfg_out, dict)
assert len(cfg_out) > 0
def test_factory_cfgs():
"""Load all available configs, one at a time
CoTeDe comes with builtin config. This test checks if can
load all those available configs.
"""
for cfg in CFG:
print("Loading %s" % cfg)
try:
cfg_out = load_cfg(cfg)
except:
assert False, "Couldn't load: %s" % cfg
assert isinstance(cfg_out, dict)
assert len(cfg_out) > 0
# Missing a test to load cfg at ~/.cotede
def test_dict_input():
"""Test a dictionary input, i.e. manually defined config
It should return the same dictionary
"""
cfg = {'temperature': {'global_range': 'test'}}
cfg_out = load_cfg(cfg)
assert cfg_out['variables']['temperature'] == cfg['temperature']
def test_inheritance():
"""Test inheritance
"""
cfg = load_cfg('cotede')
cfg2 = load_cfg({'inherit': 'cotede'})
for c in cfg:
assert c in cfg
assert cfg[c] == cfg2[c]
def test_inheritance_priority():
"""Test priority when inheriting
When inheritance is a list, the first item has priority over
the last one.
"""
def walk_and_check(cfg, cfg2):
for c in cfg:
assert c in cfg2, "Missing %s in inherited cfg2" % c
if not isinstance(cfg[c], dict):
assert cfg[c] == cfg2[c], \
"Missing %s in cfg2" % c
else:
walk_and_check(cfg[c], cfg2[c])
cfg = load_cfg('cotede')
# If is a list, the last is the lowest priority
cfg2 = load_cfg({'inherit': ['cotede', 'gtspp']})
walk_and_check(cfg, cfg2)
try:
cfg2 = load_cfg({'inherit': ['gtspp', 'cotede']})
walk_and_check(cfg, cfg2)
failed = False
except:
failed = True
assert failed, "It should fail in inverse priority"
```
#### File: CoTeDe/tests/test_pqctypes.py
```python
import numpy as np
from cotede.qc import ProfileQC
from data import DummyData
def test():
profile = DummyData()
pqc = ProfileQC(profile)
# assert type(pqc.keys()) == list
assert type(pqc.attributes) == dict
assert hasattr(pqc, 'input')
assert hasattr(pqc, 'flags')
assert hasattr(pqc, 'features')
assert type(pqc.flags) == dict
for k in pqc.flags.keys():
assert type(pqc.flags[k]) == dict
for kk in pqc.flags[k].keys():
assert (type(pqc.flags[k][kk]) == np.ndarray) or \
(type(pqc.flags[k][kk]) == int)
if (type(pqc.flags[k][kk]) == np.ndarray):
assert pqc.flags[k][kk].dtype == 'int8'
```
#### File: CoTeDe/tests/test_serialize.py
```python
import pickle
import numpy as np
from cotede.qc import ProfileQC
from data import DummyData
def test_serialize_ProfileQC():
""" Serialize ProfileQC
"""
profile = DummyData()
pqc = ProfileQC(profile)
pqc2 = pickle.loads(pickle.dumps(pqc))
assert sorted(pqc.data.keys()) == sorted(pqc2.data.keys())
for v in pqc.data:
assert np.allclose(pqc[v], pqc2[v])
assert sorted(pqc.attributes.keys()) == sorted(pqc2.attributes.keys())
for v in pqc.attributes:
assert pqc.attributes[v] == pqc2.attributes[v]
assert sorted(pqc.flags.keys()) == sorted(pqc2.flags.keys())
for v in pqc.flags:
for f in pqc.flags[v]:
assert np.allclose(pqc.flags[v][f], pqc2.flags[v][f])
```
|
{
"source": "jessica-beckenbach/maya-pagoda-tool",
"score": 2
}
|
#### File: jessica-beckenbach/maya-pagoda-tool/pagoda-tool.py
```python
import maya.cmds as cm
#-----------------functions that don't make geometry
def makeFinalCombine_MultipleStorey():
#unites all geometry into one, for buildings with more than one storey
if merge==True:
cm.select( 'floor_g*' , 'roof_gr*' , 'topRoof_group' , r=True )
cm.polyUnite( n='Temple' )
cm.delete( ch=True )
def extrudeLowerRoofShape( faceNumber , x , y ):
cm.polyExtrudeFacet( "balconyBottomFloor.f[" + faceNumber + "]" , ltz=1.31 * x , lty=1.05 * y )
cm.polyExtrudeFacet( "balconyBottomFloor.f[" + faceNumber + "]" , ltz=0.7 * x , lty=0.35 * y )
cm.polyExtrudeFacet( "balconyBottomFloor.f[" + faceNumber + "]" , ltz=0.55 * x , lty=0.16 * y )
cm.polyExtrudeFacet( "balconyBottomFloor.f[" + faceNumber + "]" , ltz=0.6 * x , lty=0.08 * y )
cm.polyExtrudeFacet( "balconyBottomFloor.f[" + faceNumber + "]" , ltz=1.35 * x )
def mergeCorners( vtxs , x , z ):
cm.select( "balconyBottomFloor.vtx[" + vtxs["lvl1"]["vtx1"] + "]", "balconyBottomFloor.vtx[" + vtxs["lvl1"]["vtx2"] + "]", r=True )
cm.move( 0, 0, 1.31 * z , r=True )
cm.select( "balconyBottomFloor.vtx[" + vtxs["lvl1"]["vtx3"] + "]", "balconyBottomFloor.vtx[" + vtxs["lvl1"]["vtx4"] + "]", r=True )
cm.move( 1.31 * x , 0, 0, r=True )
cm.polyMergeVertex( "balconyBottomFloor.vtx[" + vtxs["lvl1"]["vtx2"] + "]", "balconyBottomFloor.vtx[" + vtxs["lvl1"]["vtx6"] + "]", d=0.2 )
cm.polyMergeVertex( "balconyBottomFloor.vtx[" + vtxs["lvl1"]["vtx1"] + "]", "balconyBottomFloor.vtx[" + vtxs["lvl1"]["vtx5"] + "]", d=0.2 )
cm.select( 'balconyBottomFloor.vtx[' + vtxs['lvl2']['vtx1'] + ']', 'balconyBottomFloor.vtx[' + vtxs['lvl2']['vtx2'] + ']', r=True )
cm.move( 0, 0, 2.013 * z , r=True )
cm.select( 'balconyBottomFloor.vtx[' + vtxs['lvl2']['vtx3'] + ']', 'balconyBottomFloor.vtx[' + vtxs['lvl2']['vtx4'] + ']', r=True )
cm.move( 2.013 * x , 0, 0, r=True )
cm.polyMergeVertex( 'balconyBottomFloor.vtx[' + vtxs['lvl2']['vtx2'] + ']', 'balconyBottomFloor.vtx[' + vtxs['lvl2']['vtx6'] + ']', d=0.2 )
cm.polyMergeVertex( 'balconyBottomFloor.vtx[' + vtxs['lvl2']['vtx1'] + ']', 'balconyBottomFloor.vtx[' + vtxs['lvl2']['vtx5'] + ']', d=0.2 )
cm.select( 'balconyBottomFloor.vtx[' + vtxs['lvl3']['vtx1'] + ']', 'balconyBottomFloor.vtx[' + vtxs['lvl3']['vtx2'] + ']', r=True )
cm.move( 0, 0, 2.575 * z , r=True )
cm.select( 'balconyBottomFloor.vtx[' + vtxs['lvl3']['vtx3'] + ']', 'balconyBottomFloor.vtx[' + vtxs['lvl3']['vtx4'] + ']', r=True )
cm.move( 2.575 * x , 0, 0, r=True )
cm.polyMergeVertex( 'balconyBottomFloor.vtx[' + vtxs['lvl3']['vtx5'] + ']', 'balconyBottomFloor.vtx[' + vtxs['lvl3']['vtx6'] + ']', d=0.2 )
cm.polyMergeVertex( 'balconyBottomFloor.vtx[' + vtxs['lvl3']['vtx7'] + ']', 'balconyBottomFloor.vtx[' + vtxs['lvl3']['vtx8'] + ']', d=0.2 )
cm.select( 'balconyBottomFloor.vtx[' + vtxs['lvl4']['vtx1'] + ']', 'balconyBottomFloor.vtx[' + vtxs['lvl4']['vtx2'] + ']', r=True )
cm.move( 0, 0, 3.23 * z , r=True )
cm.select( 'balconyBottomFloor.vtx[' + vtxs['lvl4']['vtx3'] + ']', 'balconyBottomFloor.vtx[' + vtxs['lvl4']['vtx4'] + ']', r=True )
cm.move( 3.23 * x , 0, 0, r=True )
cm.polyMergeVertex( 'balconyBottomFloor.vtx[' + vtxs['lvl4']['vtx5'] + ']', 'balconyBottomFloor.vtx[' + vtxs['lvl4']['vtx6'] + ']', d=0.2 )
cm.polyMergeVertex( 'balconyBottomFloor.vtx[' + vtxs['lvl4']['vtx7'] + ']', 'balconyBottomFloor.vtx[' + vtxs['lvl4']['vtx8'] + ']', d=0.2 )
cm.select( 'balconyBottomFloor.vtx[' + vtxs['lvl5']['vtx1'] + ']', 'balconyBottomFloor.vtx[' + vtxs['lvl5']['vtx2'] + ']', r=True )
cm.move( 0, 0, 4.5 * z , r=True )
cm.select( 'balconyBottomFloor.vtx[' + vtxs['lvl5']['vtx3'] + ']', 'balconyBottomFloor.vtx[' + vtxs['lvl5']['vtx4'] + ']', r=True )
cm.move( 4.5 * x , 0, 0, r=True )
cm.polyMergeVertex( 'balconyBottomFloor.vtx[' + vtxs['lvl5']['vtx2'] + ']', 'balconyBottomFloor.vtx[' + vtxs['lvl5']['vtx6'] + ']', d=0.2 )
cm.polyMergeVertex( 'balconyBottomFloor.vtx[' + vtxs['lvl5']['vtx1'] + ']', 'balconyBottomFloor.vtx[' + vtxs['lvl5']['vtx5'] + ']', d=0.2 )
def extrudeUpperRoofShape( faceNumber , x , y ):
cm.polyExtrudeFacet( "tr_roof.f[" + faceNumber + "]" ,
ltz=float(width)/1.15 *x , lty=float(width)/1.1 *y )
cm.polyExtrudeFacet( "tr_roof.f[" + faceNumber + "]" ,
ltz=float(width)/1.5 *x, lty=float(width)/1.76 *y )
cm.polyExtrudeFacet( "tr_roof.f[" + faceNumber + "]" ,
ltz=float(width)/1.578 *x, lty=float(width)/2.5 *y )
cm.polyExtrudeFacet( "tr_roof.f[" + faceNumber + "]" ,
ltz=float(width)/1.66 *x, lty=float(width)/6 *y )
cm.polyExtrudeFacet( "tr_roof.f[" + faceNumber + "]" ,
ltz=float(width)/1.76 *x, lty=float(width)/10 *y )
def mergeTopCorners( vtxs , x , z ):
cm.select( "tr_roof.vtx[" + vtxs['lvl1']['vtx1'] + "]", "tr_roof.vtx[" + vtxs['lvl1']['vtx2'] + "]", r=True )
cm.move( 0, 0, float(width)/1.161 * z , r=True )
cm.select( "tr_roof.vtx[" + vtxs['lvl1']['vtx3'] + "]", "tr_roof.vtx[" + vtxs['lvl1']['vtx4'] + "]", r=True )
cm.move( float(width)/1.161 * x , 0, 0, r=True )
cm.polyMergeVertex( "tr_roof.vtx[" + vtxs['lvl1']['vtx2'] + "]", "tr_roof.vtx[" + vtxs['lvl1']['vtx6'] + "]", d=float(width)/10 )
cm.polyMergeVertex( "tr_roof.vtx[" + vtxs['lvl1']['vtx1'] + "]", "tr_roof.vtx[" + vtxs['lvl1']['vtx5'] + "]", d=float(width)/10 )
cm.select( "tr_roof.vtx[" + vtxs['lvl2']['vtx1'] + "]", "tr_roof.vtx[" + vtxs['lvl2']['vtx2'] + "]", r=True )
cm.move( 0, 0, float(width)/0.663 * z , r=True )
cm.select( "tr_roof.vtx[" + vtxs['lvl2']['vtx3'] + "]", "tr_roof.vtx[" + vtxs['lvl2']['vtx4'] + "]", r=True )
cm.move( float(width)/0.663 * x , 0, 0, r=True )
cm.polyMergeVertex( "tr_roof.vtx[" + vtxs['lvl2']['vtx2'] + "]", "tr_roof.vtx[" + vtxs['lvl2']['vtx6'] + "]", d=float(width)/10 )
cm.polyMergeVertex( "tr_roof.vtx[" + vtxs['lvl2']['vtx1'] + "]", "tr_roof.vtx[" + vtxs['lvl2']['vtx5'] + "]", d=float(width)/10 )
cm.select( "tr_roof.vtx[" + vtxs['lvl3']['vtx1'] + "]", "tr_roof.vtx[" + vtxs['lvl3']['vtx2'] + "]", r=True )
cm.move( 0, 0, float(width)/0.46 * z , r=True )
cm.select( "tr_roof.vtx[" + vtxs['lvl3']['vtx3'] + "]", "tr_roof.vtx[" + vtxs['lvl3']['vtx4'] + "]", r=True )
cm.move( float(width)/0.46 * x , 0, 0, r=True )
cm.polyMergeVertex( "tr_roof.vtx[" + vtxs['lvl3']['vtx4'] + "]", "tr_roof.vtx[" + vtxs['lvl3']['vtx6'] + "]", d=float(width)/10 )
cm.polyMergeVertex( "tr_roof.vtx[" + vtxs['lvl3']['vtx3'] + "]", "tr_roof.vtx[" + vtxs['lvl3']['vtx5'] + "]", d=float(width)/10 )
cm.select( "tr_roof.vtx[" + vtxs['lvl4']['vtx1'] + "]", "tr_roof.vtx[" + vtxs['lvl4']['vtx2'] + "]", r=True )
cm.move( 0, 0, float(width)/0.358 * z , r=True )
cm.select( "tr_roof.vtx[" + vtxs['lvl4']['vtx3'] + "]", "tr_roof.vtx[" + vtxs['lvl4']['vtx4'] + "]", r=True )
cm.move( float(width)/0.358 * x , 0, 0, r=True )
cm.polyMergeVertex( "tr_roof.vtx[" + vtxs['lvl4']['vtx4'] + "]", "tr_roof.vtx[" + vtxs['lvl4']['vtx6'] + "]", d=float(width)/10 )
cm.polyMergeVertex( "tr_roof.vtx[" + vtxs['lvl4']['vtx3'] + "]", "tr_roof.vtx[" + vtxs['lvl4']['vtx5'] + "]", d=float(width)/10 )
cm.select( "tr_roof.vtx[" + vtxs['lvl5']['vtx1'] + "]", "tr_roof.vtx[" + vtxs['lvl5']['vtx2'] + "]", r=True )
cm.move( 0, 0, float(width)/0.3 * z , r=True )
cm.select( "tr_roof.vtx[" + vtxs['lvl5']['vtx3'] + "]", "tr_roof.vtx[" + vtxs['lvl5']['vtx4'] + "]", r=True )
cm.move( float(width)/0.3 * x , 0, 0, r=True )
cm.polyMergeVertex( "tr_roof.vtx[" + vtxs['lvl5']['vtx2'] + "]", "tr_roof.vtx[" + vtxs['lvl5']['vtx6'] + "]", d=float(width)/10 )
cm.polyMergeVertex( "tr_roof.vtx[" + vtxs['lvl5']['vtx1'] + "]", "tr_roof.vtx[" + vtxs['lvl5']['vtx5'] + "]", d=float(width)/10 )
def addCurvePieceEdgeLoops( weight , edges ):
edgeLength = len(edges)
cm.select ( "pillar_curvePiece.e[" + edges[0] + "]" , r=True )
for i in range(1,edgeLength):
cm.select( "pillar_curvePiece.e[" + edges[i] + "]" , tgl=True )
cm.polySplitRing( wt= weight , sma=30, fq=True )
#-----------------functions that make the geometry
#makes base (long part) of pillar
def makePillarLongPart():
#makes long part of pillar
cm.polyCube( d=0.2, w=0.2, h=5, n="pillar_longPart" )
cm.move( 0, 2.5, 0)
#makes little cube on top of pillar
cm.polyCube( d=0.3, w=0.3, h=0.2, n="pillar_longPartTop" )
cm.move( 0, 5, 0 )
#makes the little straight piece
def makePillarStraightPiece():
#create the thing
cm.polyCube( d=0.12, w=0.12, h=0.3, n="pillar_straightPiece" )
#extrude
cm.polyExtrudeFacet( 'pillar_straightPiece.f[1]', ltz=0.1)
cm.scale( 1.5, 1, 1.5 )
cm.polyExtrudeFacet( 'pillar_straightPiece.f[1]', ltz=0.12)
cm.select( "pillar_straightPiece", r=True )
cm.move(0, 5.2, 0)
#makes the little curved piece
def makePillarCurvePiece():
#makes curved piece
cm.polyCube( d=0.5 , w=0.17 , h=0.17 , n="pillar_curvePiece" )
cm.move ( 0, 5.06, 0.25 )
cm.select ( "pillar_curvePiece.e[0]" , r=True )
cm.move ( 0 , 0 , 0.05 , r=True )
cm.select ( "pillar_curvePiece.f[0]" , r=True )
cm.polyExtrudeFacet( 'pillar_curvePiece.f[0]', ltz=0.1)
cm.rotate ( -25, 0, 0, r=True )
cm.move (0, -0.11, 0.04, r=True)
cm.polyExtrudeFacet( 'pillar_curvePiece.f[0]', ltz=0.1)
cm.rotate ( -30, 0, 0, r=True)
cm.move ( 0, -0.16, 0.1, r=True )
cm.polyExtrudeFacet( 'pillar_curvePiece.f[0]', ltz=0.1)
cm.rotate ( -20, 0, 0, r=True)
cm.move ( 0, -0.17, 0.09, r=True )
cm.polyExtrudeFacet( 'pillar_curvePiece.f[0]', ltz=0.07)
cm.polyExtrudeFacet( 'pillar_curvePiece.f[0]', ltz=0.095)
cm.scale( 1.5, 1, 1.5 )
cm.move (0, 0, -0.2, r=True)
cm.polyExtrudeFacet( 'pillar_curvePiece.f[0]', ltz=0.1)
#manipulate shape of object
cm.select( "pillar_curvePiece.e[30]", r=True )
cm.move(0, 0, -0.005, r=True )
cm.select( "pillar_curvePiece.e[22]", r=True )
cm.move(0, -0.03, -0.02, r=True )
cm.select( "pillar_curvePiece.e[0]", "pillar_curvePiece.e[3]", r=True )
cm.move (0, 0.02, 0, r=True)
cm.select( "pillar_curvePiece.e[14]", r=True )
cm.move (0, 0.02, -0.004, r=True)
cm.select( "pillar_curvePiece.e[18]", r=True )
cm.move(0, 0, 0, r=True )
cm.select( "pillar_curvePiece.e[26]", r=True )
cm.move(0, -0.03, 0, r=True )
#inserts edge loops
edges = [ '0', '1', '2', '3', '14', '18', '22', '26',
'30', '34', '38', '42', '46', '50', '54', '58' ]
addCurvePieceEdgeLoops( 0.95 , edges )
addCurvePieceEdgeLoops( 0.95 , edges )
addCurvePieceEdgeLoops( 0.1 , edges )
addCurvePieceEdgeLoops( 0.4 , edges )
edges = [ '4', '5', '8', '9', '16', '19', '24', '27', '32', '35', '40', '43',
'48', '51', '56', '59', '74', '90', '106', '122', '138', '154', '170', '186' ]
addCurvePieceEdgeLoops( 0.95 , edges )
edges.extend( [ '189', '191', '193', '195', '197', '199' ] )
addCurvePieceEdgeLoops( 0.05 , edges )
edges = [ '36', '37', '39', '41', '68', '80', '100', '112', '132', '144', '164',
'176', '210', '230', '258', '278' ]
addCurvePieceEdgeLoops( 0.95 , edges )
edges.remove( '112' )
edges.remove( '144' )
edges.remove( '176' )
edges.remove( '80' )
edges.remove( '278' )
edges.remove( '230' )
edges.extend( [ '285', '287', '291', '293', '295', '297' ] )
addCurvePieceEdgeLoops( 0.95 , edges )
edges = [ '44', '45', '47', '49', '70', '78', '102', '110', '134', '142', '166',
'174', '212', '228', '260', '276' ]
addCurvePieceEdgeLoops( 0.95 , edges )
edges.remove( '174' )
edges.remove( '142' )
edges.remove( '110' )
edges.remove( '276' )
edges.remove( '78' )
edges.remove( '228' )
edges.extend( [ '349', '351', '355', '357', '359', '361' ] )
addCurvePieceEdgeLoops( 0.95 , edges )
edges.remove( '349' )
edges.remove( '351' )
edges.remove( '355' )
edges.remove( '357' )
edges.remove( '359' )
edges.remove( '361' )
edges.extend( [ '393', '381', '383', '387', '389', '391' ] )
addCurvePieceEdgeLoops( 0.05 , edges )
edges = [ '381', '383', '387', '389', '391', '393', '412', '417', '427',
'429', '431', '433', '435', '437' ]
addCurvePieceEdgeLoops( 0.05 , edges )
edges = [ '52', '53', '55', '57', '72', '76', '104', '108', '136',
'140', '168', '172', '214', '226', '262', '274' ]
addCurvePieceEdgeLoops( 0.05 , edges )
edges = [ '76', '108', '140', '172', '226', '274', '476', '481',
'491', '493', '495', '497', '499', '501', '503', '505']
addCurvePieceEdgeLoops( 0.05 , edges )
#moves pivots
cm.select( "pillar_curvePiece" , r=True )
cm.move( 0, 0, -0.05 , "pillar_curvePiece.scalePivot","pillar_curvePiece.rotatePivot", r=True )
#assembles a normal pillar
def makeNormalPillar():
#first level
makePillarLongPart()
makePillarStraightPiece()
makePillarCurvePiece()
#second level
cm.select( "pillar_curvePiece" , r=True )
cm.duplicate()
cm.move( 0, 0.5, 0.66, r=True)
cm.duplicate()
cm.rotate(0, 90, 0, r=True)
cm.move( 0,0, -0.18, r=True)
cm.duplicate()
cm.rotate(0, 180, 0, r=True)
cm.duplicate('pillar_straightPiece')
cm.select( "pillar_straightPiece1" , r=True )
cm.move( 0, 0.5, 0.655, r=True)
cm.duplicate('pillar_straightPiece')
cm.select( "pillar_straightPiece2" , r=True )
cm.move( 0, 0.5, 0, r=True)
cm.select ( 'pillar_curvePiece1.f[2]' , 'pillar_curvePiece1.f[76]' ,
'pillar_curvePiece1.f[44]' , 'pillar_curvePiece1.f[60]', 'pillar_curvePiece1.f[92]' ,
'pillar_curvePiece1.f[95:99]' , 'pillar_curvePiece1.f[119:123]' , r=True)
cm.move( 0, 0, -0.66, r=True)
#third level
cm.select ( 'pillar_curvePiece1', tgl=True )
cm.select ( 'pillar_curvePiece2', tgl=True )
cm.select ( 'pillar_curvePiece3', tgl=True )
cm.duplicate()
cm.move( 0, 0.5, 0.65, r=True)
cm.select ( 'pillar_curvePiece4.f[2]' , 'pillar_curvePiece4.f[76]' ,
'pillar_curvePiece4.f[44]' , 'pillar_curvePiece4.f[60]', 'pillar_curvePiece4.f[92]' ,
'pillar_curvePiece4.f[95:99]' , 'pillar_curvePiece4.f[119:123]' , r=True)
cm.move( 0, 0, -0.66, r=True)
cm.duplicate('pillar_straightPiece2')
cm.select( "pillar_straightPiece3" , r=True )
cm.move( 0, 0.5, 0, r=True)
cm.duplicate()
cm.move( 0,0, 0.65, r=True)
cm.duplicate()
cm.move( 0,0, 0.67, r=True)
#smooths it
cm.polySmooth( 'pillar_curvePiece' , dv=1 )
cm.polySmooth( 'pillar_curvePiece1' , dv=1 )
cm.polySmooth( 'pillar_curvePiece2' , dv=1 )
cm.polySmooth( 'pillar_curvePiece3' , dv=1 )
cm.polySmooth( 'pillar_curvePiece4' , dv=1 )
cm.polySmooth( 'pillar_curvePiece5' , dv=1 )
cm.polySmooth( 'pillar_curvePiece6' , dv=1 )
#combines it into single mesh
cm.select ( 'pillar*' , r=True )
cm.polyUnite( n='normalPillar' )
cm.delete( ch=True )
#makes the corner pillar
def makeCornerPillar():
#first level
makePillarLongPart()
makePillarStraightPiece()
makePillarCurvePiece()
cm.move( 0, 0, -0.2 , "pillar_curvePiece.scalePivot","pillar_curvePiece.rotatePivot", r=True )
cm.select( "pillar_curvePiece" , r=True )
cm.duplicate()
cm.rotate( 0, -90, 0 , r=True )
cm.duplicate() #makes diagonal piece
cm.rotate( 0, 45, 0 , r=True )
cm.move ( -0.15 , 0 , 0.15, r=True )
cm.select ( 'pillar_curvePiece2.f[2]' , 'pillar_curvePiece2.f[76]' ,
'pillar_curvePiece2.f[44]' , 'pillar_curvePiece2.f[60]', 'pillar_curvePiece2.f[92]' ,
'pillar_curvePiece2.f[95:99]' , 'pillar_curvePiece2.f[119:123]' , r=True)
cm.move ( 0.15 , 0 , -0.15, r=True )
#second level, make medium curve piece
cm.duplicate( 'pillar_curvePiece' )
cm.select( "pillar_curvePiece3" , r=True )
cm.move( 0, 0.5, 0.65, r=True)
cm.select ( 'pillar_curvePiece3.f[2]' , 'pillar_curvePiece3.f[76]' ,
'pillar_curvePiece3.f[44]' , 'pillar_curvePiece3.f[60]', 'pillar_curvePiece3.f[92]' ,
'pillar_curvePiece3.f[95:99]' , 'pillar_curvePiece3.f[119:123]' , r=True)
cm.move ( 0 , 0 , -0.6, r=True )
cm.move( 0, 0, -0.65 , "pillar_curvePiece3.scalePivot","pillar_curvePiece3.rotatePivot", r=True )
#second level, make everything else
cm.duplicate("pillar_curvePiece3")
cm.select( "pillar_curvePiece4" , r=True )
cm.rotate( 0, -90, 0, r=True)
cm.duplicate() #makes diagonal piece
cm.rotate( 0, 45, 0, r=True)
cm.move ( -0.37, 0, 0.37, r=True )
cm.select ( 'pillar_curvePiece5.f[2]' , 'pillar_curvePiece5.f[76]' ,
'pillar_curvePiece5.f[44]' , 'pillar_curvePiece5.f[60]', 'pillar_curvePiece5.f[92]' ,
'pillar_curvePiece5.f[95:99]' , 'pillar_curvePiece5.f[119:123]' , r=True)
cm.move ( 0.4 , 0 , -0.4, r=True )
cm.select( 'pillar_straightPiece' , r=True )
cm.duplicate()
cm.move(0, 0.5, 0 , r=True)
cm.duplicate()
cm.move(0, 0, 0.66 , r=True)
cm.duplicate()
cm.move(-0.66, 0, -0.66 , r=True)
cm.duplicate()
cm.move(0.06, 0, 0.61 , r=True)
cm.rotate(0,45,0, r=True)
#third level, make long curve piece, part 1
cm.duplicate( 'pillar_curvePiece' )
cm.select( "pillar_curvePiece6" , r=True )
cm.move( 0, 1, 1.3, r=True)
cm.select ( 'pillar_curvePiece6.f[2]' , 'pillar_curvePiece6.f[76]' ,
'pillar_curvePiece6.f[44]' , 'pillar_curvePiece6.f[60]', 'pillar_curvePiece6.f[92]' ,
'pillar_curvePiece6.f[95:99]' , 'pillar_curvePiece6.f[119:123]' , r=True)
cm.move ( 0 , 0 , -1.3 , r=True )
cm.move( 0, 0, -1.3 , "pillar_curvePiece6.scalePivot","pillar_curvePiece6.rotatePivot", r=True )
#third level, make middle long bit
cm.select( "pillar_curvePiece6" , r=True )
cm.duplicate()
cm.rotate( 0, -45, 0, r=True)
cm.move( -0.5 , 0 , 0.5 , r=True )
cm.select ( 'pillar_curvePiece7.f[2]' , 'pillar_curvePiece7.f[76]' ,
'pillar_curvePiece7.f[44]' , 'pillar_curvePiece7.f[60]', 'pillar_curvePiece7.f[92]' ,
'pillar_curvePiece7.f[95:99]' , 'pillar_curvePiece7.f[119:123]' , r=True)
cm.move ( 0.5 , 0 , -0.5 , r=True )
cm.select('pillar_straightPiece1' , r=True)
cm.duplicate()
cm.move( 0 , 0.5 , 0 , r=True )
cm.select('pillar_straightPiece4' , r=True)
cm.duplicate()
cm.move( 0 , 0.5 , 0 , r=True )
cm.duplicate()
cm.move( -0.68 , 0 , 0.68 , r=True )
#third level, make long curve piece, part 2
cm.select('pillar_straightPiece5' , r=True)
cm.duplicate()
cm.move( 0 , 0 , 0.66 , r=True )
cm.duplicate()
cm.move( 0 , 0 , .66 , r=True )
cm.select( "pillar_curvePiece1" , r=True )
cm.duplicate()
cm.move( 0 , 1 , 1.31 , r=True )
cm.polySmooth( 'pillar_curvePiece6' , dv=1 )
cm.select('pillar_curvePiece6' , 'pillar_straightPiece8' , 'pillar_straightPiece9' , r=True)
cm.polyUnite( n='pillar_curvePieceL' )
cm.delete( ch=True )
#third level, make other side
cm.duplicate()
cm.rotate(0,-90,0, r=True)
cm.select('pillar_curvePiece' , r=True)
cm.duplicate()
cm.move( -1.32 , 1 , 0 , r=True )
#smooth curve pieces
cm.polySmooth( 'pillar_curvePiece' , dv=1 )
cm.polySmooth( 'pillar_curvePiece1' , dv=1 )
cm.polySmooth( 'pillar_curvePiece2' , dv=1 )
cm.polySmooth( 'pillar_curvePiece3' , dv=1 )
cm.polySmooth( 'pillar_curvePiece4' , dv=1 )
cm.polySmooth( 'pillar_curvePiece5' , dv=1 )
cm.polySmooth( 'pillar_curvePiece7' , dv=1 )
cm.polySmooth( 'pillar_curvePiece8' , dv=1 )
cm.polySmooth( 'pillar_curvePiece9' , dv=1 )
#combine corner pillar
cm.select( 'pillar_*' , r=True )
cm.polyUnite( n='cornerPillar' )
cm.delete( ch=True )
#makes the part under the roof
def makeFloor():
#make pillars
makeCornerPillar()
for i in range(int(width)):
makeNormalPillar()
cm.move ( (3*i)+3 , 0, 0, r=True )
#make rafters
actualWidth = ( 3 * float(width))
cm.polyCube( d=0.16, h=0.16, w=( actualWidth + 3) , sx=5, sy=5, sz=5, n='rafter1' )
cm.move( (actualWidth + 3 ) * 0.5 , 4.06 , 0, r=True )
cm.duplicate()
cm.move( 0, 1, 0, r=True)
cm.duplicate()
cm.move( 0, 0.52, 0, r=True)
cm.duplicate()
cm.move( 0, 0.495, 0, r=True)
cm.duplicate()
cm.move( 0, 0, 0.676, r=True)
cm.duplicate()
cm.move( 0, 0.5, -0.676, r=True)
cm.polyCube( d=0.16, h=0.16, w=( actualWidth + 4.8) , sx=5, sy=5, sz=5, n='rafter7' )
cm.move( (actualWidth + 2.9 ) * 0.5, 6.564 , 0.68, r=True )
cm.polyCube( d=0.16, h=0.16, w=( actualWidth + 6.2) , sx=5, sy=5, sz=5, n='rafter8' )
cm.move( (actualWidth + 3 ) * 0.5, 6.564 , 1.32, r=True )
cm.polyCube( d=0.16, h=0.16, w=( actualWidth + 8) , sx=5, sy=5, sz=5, n='rafter9' )
cm.move( (actualWidth + 3 ) * 0.5, 6.564 , 1.94, r=True )
#group them
cm.select( 'cornerPillar', 'normalP*', 'rafter*', r=True )
cm.group( n='pillars' )
cm.delete( ch=True )
#move and duplicate
cm.move ( (actualWidth + 3)*-0.5 , 0, (actualWidth + 3)*0.5 , a=True )
cm.move( 0, 0, 0 , "pillars.scalePivot","pillars.rotatePivot", a=True )
cm.duplicate()
cm.rotate( 0, 90, 0, r=True )
cm.duplicate()
cm.rotate( 0, 90, 0, r=True )
cm.duplicate()
cm.rotate( 0, 90, 0, r=True )
#make the inside cube
cm.polyCube( d= actualWidth+3.1 , h=7, w= actualWidth+3.1 , n='insidecube' )
cm.move( 0, 3.5, 0, r=True)
#group
cm.select( 'pillar*', 'insidecube', r=True )
cm.group( n='floor_group' )
#make lower roof
def makeLowerRoof():
#--make balcony floor
#create the roof group
cm.group( em=True , n='roof_group' )
#make balcony bottom floor
balconyWidth = (3 * float(width)) + 5
cm.polyCube( d=balconyWidth , h=0.1 , w=balconyWidth , n='balconyBottomFloor' )
cm.move(0, 7.8, 0, r=True)
cm.parent( 'balconyBottomFloor' , 'roof_group' )
#make balcony rafter supports
for i in range(int(width)):
cm.polyCube( d=balconyWidth+0.8 , w=0.5 , h=0.3, n='balconySupport1' )
cm.move(3*i,0,0, r=True)
cm.select( 'balconySupport*' , r=True)
cm.move( (float(width) -1)*-0.75 , 4, 0, r=True )
cm.group( 'balconySupport*' , n='rafterGroup' , p='roof_group' )
cm.duplicate()
cm.rotate(0, 90, 0)
#make balcony top floor
cm.select( 'balconyBottomFloor' , r=True)
cm.duplicate( n='balconyTopFloor' )
cm.move( 0, 0.4, 0, r=True )
#--make balcony railing
#make horizontal railing
cm.polyCube( d=balconyWidth+2 , h=0.25 , w=0.25 , n='balcony_horizontalRafter' )
cm.move( balconyWidth / 2 -0.125, 8.7, 0, r=True )
cm.move( 0, 9, 0, 'balcony_horizontalRafter.scalePivot',
'balcony_horizontalRafter.rotatePivot', a=True )
cm.rotate( 0, -90, 0, r=True)
cm.duplicate()
cm.move(0, 0.5, 0, r=True)
#make vertical railing
for i in range(int(width)):
cm.polyCube( d=0.25 , w=0.25, h=1, n='balcony_verticalRafter' )
cm.move( 3*i , 0 , 0, r=True )
cm.select( 'balcony_verticalRafter*' , r=True)
cm.move( (float(width) -1)*-0.75 , 4.3, balconyWidth/4 -0.05, r=True )
#group and rotate
cm.select( 'balcony_*' , r=True )
cm.group( n='balconyRailing' , p='roof_group' )
cm.move( 0,9,0, 'balconyRailing.scalePivot' , 'balconyRailing.rotatePivot' , a=True )
cm.duplicate()
cm.rotate( 0,90,0, r=True )
cm.duplicate()
cm.rotate( 0,90,0, r=True )
cm.duplicate()
cm.rotate( 0,90,0, r=True )
#--make roof
#make basic roof shape
extrudeLowerRoofShape( str(4) , 1 , 1 )
extrudeLowerRoofShape( str(0) , 1 , -1 )
extrudeLowerRoofShape( str(2) , 1 , 1 )
extrudeLowerRoofShape( str(5) , 1 , -1 )
#-merge corners
#first corner
vtxs = {
"lvl1" : { "vtx1" : "68" , "vtx2" : "71" , "vtx3" : "48" , "vtx4" : "51" , "vtx5" : "51" , "vtx6" : "48" } ,
"lvl2" : { "vtx1" : "70" , "vtx2" : "73" , "vtx3" : "52" , "vtx4" : "55" , "vtx5" : "55" , "vtx6" : "52" } ,
"lvl3" : { "vtx1" : "72" , "vtx2" : "75" , "vtx3" : "56" , "vtx4" : "59" , "vtx5" : "59" , "vtx6" : "72" , "vtx7" : "74" , "vtx8" : "56" } ,
"lvl4" : { "vtx1" : "74" , "vtx2" : "77" , "vtx3" : "60" , "vtx4" : "63" , "vtx5" : "63" , "vtx6" : "74" , "vtx7" : "60" , "vtx8" : "76" } ,
"lvl5" : { "vtx1" : "76" , "vtx2" : "79" , "vtx3" : "64" , "vtx4" : "67" , "vtx5" : "67" , "vtx6" : "64" } ,
}
mergeCorners( vtxs , -1 , -1 )
#second corner
vtxs = {
'lvl1' : { 'vtx1' : '8' , 'vtx2' : '10' , 'vtx3' : '49' , 'vtx4' : '50' , 'vtx5' : '49' , 'vtx6' : '49' } ,
'lvl2' : { 'vtx1' : '12' , 'vtx2' : '14' , 'vtx3' : '51' , 'vtx4' : '52' , 'vtx5' : '51' , 'vtx6' : '51' } ,
'lvl3' : { 'vtx1' : '16' , 'vtx2' : '18' , 'vtx3' : '53' , 'vtx4' : '54' , 'vtx5' : '53' , 'vtx6' : '18' , 'vtx7' : '53' , 'vtx8' : '16' } ,
'lvl4' : { 'vtx1' : '20' , 'vtx2' : '22' , 'vtx3' : '55' , 'vtx4' : '56' , 'vtx5' : '22' , 'vtx6' : '55' , 'vtx7' : '20' , 'vtx8' : '55' } ,
'lvl5' : { 'vtx1' : '24' , 'vtx2' : '26' , 'vtx3' : '57' , 'vtx4' : '58' , 'vtx5' : '57' , 'vtx6' : '57' } ,
}
mergeCorners( vtxs , 1 , -1 )
#third
vtxs = {
'lvl1' : { 'vtx1' : '9' , 'vtx2' : '11' , 'vtx3' : '29' , 'vtx4' : '30' , 'vtx5' : '29' , 'vtx6' : '30' } ,
'lvl2' : { 'vtx1' : '13' , 'vtx2' : '15' , 'vtx3' : '31' , 'vtx4' : '32' , 'vtx5' : '31' , 'vtx6' : '32' } ,
'lvl3' : { 'vtx1' : '17' , 'vtx2' : '19' , 'vtx3' : '33' , 'vtx4' : '34' , 'vtx5' : '19' , 'vtx6' : '34' , 'vtx7' : '17' , 'vtx8' : '33' } ,
'lvl4' : { 'vtx1' : '21' , 'vtx2' : '23' , 'vtx3' : '35' , 'vtx4' : '36' , 'vtx5' : '23' , 'vtx6' : '36' , 'vtx7' : '21' , 'vtx8' : '35' } ,
'lvl5' : { 'vtx1' : '25' , 'vtx2' : '27' , 'vtx3' : '37' , 'vtx4' : '38' , 'vtx5' : '37' , 'vtx6' : '38' } ,
}
mergeCorners( vtxs , 1 , 1 )
#fourth
vtxs = {
'lvl1' : { 'vtx1' : '48' , 'vtx2' : '49' , 'vtx3' : '29' , 'vtx4' : '28' , 'vtx5' : '28' , 'vtx6' : '29' } ,
'lvl2' : { 'vtx1' : '48' , 'vtx2' : '49' , 'vtx3' : '30' , 'vtx4' : '31' , 'vtx5' : '30' , 'vtx6' : '31' } ,
'lvl3' : { 'vtx1' : '48' , 'vtx2' : '49' , 'vtx3' : '32' , 'vtx4' : '33' , 'vtx5' : '49' , 'vtx6' : '33' , 'vtx7' : '48' , 'vtx8' : '32' } ,
'lvl4' : { 'vtx1' : '48' , 'vtx2' : '49' , 'vtx3' : '35' , 'vtx4' : '34' , 'vtx5' : '35' , 'vtx6' : '49' , 'vtx7' : '34' , 'vtx8' : '48' } ,
'lvl5' : { 'vtx1' : '48' , 'vtx2' : '49' , 'vtx3' : '37' , 'vtx4' : '36' , 'vtx5' : '36' , 'vtx6' : '37' } ,
}
mergeCorners( vtxs , -1 , 1 )
#makes the top roof
def makeTopRoof():
#--make basic shape
cm.polyCube( d=float(width) , w=float(width) , h=0.1, n='tr_roof' )
extrudeUpperRoofShape( str(0) , 1 , -1 )
extrudeUpperRoofShape( str(2) , 1 , 1 )
extrudeUpperRoofShape( str(4) , 1 , 1 )
extrudeUpperRoofShape( str(5) , 1 , -1 )
#--merge edges
vtxs = { #first
'lvl1' : { 'vtx1' : '68' , 'vtx2' : '71' , 'vtx3' : '28' , 'vtx4' : '31' , 'vtx5' : '31' , 'vtx6' : '28' } ,
'lvl2' : { 'vtx1' : '70' , 'vtx2' : '73' , 'vtx3' : '32' , 'vtx4' : '35' , 'vtx5' : '35' , 'vtx6' : '32' } ,
'lvl3' : { 'vtx1' : '72' , 'vtx2' : '75' , 'vtx3' : '36' , 'vtx4' : '39' , 'vtx5' : '74' , 'vtx6' : '72' } ,
'lvl4' : { 'vtx1' : '74' , 'vtx2' : '77' , 'vtx3' : '40' , 'vtx4' : '43' , 'vtx5' : '76' , 'vtx6' : '74' } ,
'lvl5' : { 'vtx1' : '76' , 'vtx2' : '79' , 'vtx3' : '44' , 'vtx4' : '47' , 'vtx5' : '47' , 'vtx6' : '44' } ,
}
mergeTopCorners( vtxs , -1 , -1 )
vtxs = { #second
'lvl1' : { 'vtx1' : '48' , 'vtx2' : '50' , 'vtx3' : '29' , 'vtx4' : '30' , 'vtx5' : '30' , 'vtx6' : '29' } ,
'lvl2' : { 'vtx1' : '50' , 'vtx2' : '52' , 'vtx3' : '33' , 'vtx4' : '34' , 'vtx5' : '34' , 'vtx6' : '33' } ,
'lvl3' : { 'vtx1' : '52' , 'vtx2' : '54' , 'vtx3' : '37' , 'vtx4' : '38' , 'vtx5' : '53' , 'vtx6' : '52' } ,
'lvl4' : { 'vtx1' : '54' , 'vtx2' : '56' , 'vtx3' : '41' , 'vtx4' : '42' , 'vtx5' : '55' , 'vtx6' : '54' } ,
'lvl5' : { 'vtx1' : '56' , 'vtx2' : '58' , 'vtx3' : '45' , 'vtx4' : '46' , 'vtx5' : '46' , 'vtx6' : '45' } ,
}
mergeTopCorners( vtxs , 1 , -1 )
vtxs = { #third
'lvl1' : { 'vtx1' : '48' , 'vtx2' : '49' , 'vtx3' : '9' , 'vtx4' : '10' , 'vtx5' : '9' , 'vtx6' : '10' } ,
'lvl2' : { 'vtx1' : '48' , 'vtx2' : '49' , 'vtx3' : '13' , 'vtx4' : '14' , 'vtx5' : '13' , 'vtx6' : '14' } ,
'lvl3' : { 'vtx1' : '48' , 'vtx2' : '49' , 'vtx3' : '17' , 'vtx4' : '18' , 'vtx5' : '48' , 'vtx6' : '49' } ,
'lvl4' : { 'vtx1' : '48' , 'vtx2' : '49' , 'vtx3' : '21' , 'vtx4' : '22' , 'vtx5' : '48' , 'vtx6' : '49' } ,
'lvl5' : { 'vtx1' : '48' , 'vtx2' : '49' , 'vtx3' : '25' , 'vtx4' : '26' , 'vtx5' : '25' , 'vtx6' : '26' } ,
}
mergeTopCorners( vtxs , 1 , 1 )
vtxs = { #fourth
'lvl1' : { 'vtx1' : '48' , 'vtx2' : '49' , 'vtx3' : '8' , 'vtx4' : '11' , 'vtx5' : '8' , 'vtx6' : '11' } ,
'lvl2' : { 'vtx1' : '48' , 'vtx2' : '49' , 'vtx3' : '12' , 'vtx4' : '15' , 'vtx5' : '12' , 'vtx6' : '15' } ,
'lvl3' : { 'vtx1' : '48' , 'vtx2' : '49' , 'vtx3' : '16' , 'vtx4' : '19' , 'vtx5' : '48' , 'vtx6' : '49' } ,
'lvl4' : { 'vtx1' : '48' , 'vtx2' : '49' , 'vtx3' : '20' , 'vtx4' : '23' , 'vtx5' : '48' , 'vtx6' : '49' } ,
'lvl5' : { 'vtx1' : '48' , 'vtx2' : '49' , 'vtx3' : '24' , 'vtx4' : '27' , 'vtx5' : '24' , 'vtx6' : '27' } ,
}
mergeTopCorners( vtxs , -1 , 1 )
#moves into position
cm.select( 'tr_roof' , r=True )
cm.move( 0, float(width)*2.15, 0, r=True )
#--make top of roof
cm.polyExtrudeFacet( 'tr_roof.f[1]' , ltz=1 )
#make first hemisphere
cm.polySphere( r=( float(width)/3 ) , sx=10 , sy=10 , n='tr_thingOnTopOfTopRoof' )
cm.delete( 'tr_thingOnTopOfTopRoof.f[0:39]' , 'tr_thingOnTopOfTopRoof.f[80:89]' )
cm.scale( 1, 0.8, 1 , r=True )
cm.move( 0, float(width)*2.15 + 1, 0, r=True )
#get the height of the first hemisphere
what = cm.ls( sl = True )
where = cm.xform( what , t = True , query = True )
where[1] = where[1] + float(width)/3 * 0.8
#make the connector between the first and second hemispheres
cm.polyCylinder( r=float(width)/10 , h=0.5, n='tr_connector1' )
cm.move( 0, where[1] + 0.2 , 0, r=True )
cm.delete( 'tr_connector1.f[20:21]' )
#make flower thing
cm.polySphere( r=( float(width)/3 ) , n='tr_flowerThing' )
cm.delete( 'tr_flowerThing.f[180:359]' , 'tr_flowerThing.f[380:399]' )
cm.polySelect( 'tr_flowerThing', el=340, r=True ) #select every other edge loop
for i in range( 342, 359 , 2 ):
cm.polySelect( 'tr_flowerThing', el=i, tgl=True )
cm.scale( 0.7, 0.7, 0.7, r=True )
cm.move( 0, -0.3, 0, r=True )
cm.polyCloseBorder( 'tr_flowerThing.e[180]' )
cm.polyPoke( 'tr_flowerThing.f[200]' )
cm.move( 0, ( float(width)/3 )/-5, 0, 'tr_flowerThing.vtx[201]' , r=True )
cm.select( 'tr_flowerThing', r=True )
cm.move( 0, ( float(width)/3 ) + where[1] + 0.3, 0, r=True)
#make the disc thingys
discWidth=float(width)/3
for i in range(9):
cm.polyCylinder( r=discWidth , h=float(width)/6 , n='tr_disc#' )
cm.move( 0, float(width)/2.3*i + where[1] + float(width), 0, r=True )
discWidth=discWidth*0.95
#make spiral disk thing
cm.polyHelix( c=11, h=float(width)/1.5, w=float(width)/2.3, r=0.03, sco=15, n='tr_spiral' )
cm.move( 0, where[1] + float(width)*5.2 , 0, r=True )
cm.select( 'tr_spiral.f[1320]' , r=True )
cm.rotate( 0, -45, 0, r=True )
cm.move( float(width)/ -6.97, 0, float(width)/ -6.97, r=True )
cm.select( 'tr_spiral.f[1321]' , r=True )
cm.rotate( 0, 45, 0, r=True )
cm.move( float(width)/ -7.8, 0, float(width)/ 7.8, r=True )
#make the connector between the flower thing and the disks and helix
cm.polyCylinder( r=float(width)/ 10 , h=float(width)*5.5 , n='tr_connector2' )
cm.move( 0, ( float(width)/3 ) + where[1] + float(width)*2.7, 0, r=True )
cm.scale( 0.5, 1, 0.5, 'tr_connector2.f[21]', r=True )
#make top cylinder
cm.polyCylinder( r=float(width)/ 7.5 , h=float(width)/3.75, n='tr_topCylinder' )
cm.move( 0, where[1] + float(width)*5.83, 0, r=True )
#make connector between topCylinder and topSphereThing
cm.polyCylinder( r=float(width)/ 15, h=float(width)/6, n='tr_connector3' )
cm.move( 0, where[1] + float(width)*6, 0, r=True )
#make topSphere
cm.polySphere( r=float(width)/ 10 ,sx=10, sy=10, n='tr_topSphere' )
cm.move( 0, where[1] + float(width)*6.15, 0, r=True )
cm.select( 'tr_topSphere.vtx[91]' , r=True )
cm.softSelect( sse=1, ssd=float(width)/6)
cm.move( 0, float(width)/10, 0, r=True )
cm.softSelect( sse=0 )
cm.move( 0, float(width)/60, 0, r=True )
#group it
cm.select( 'tr_*' , r=True )
cm.group( n='topRoof_group' )
#assembles it all together
def assembleIt( width , height ):
#if it's a one storey tower
if height==1:
makeFloor()
makeLowerRoof()
if merge==True:
cm.select( 'floor_group' , 'roof_group' , r=True )
cm.polyUnite( n='Temple' )
cm.delete( ch=True )
#if it's a two storey tower
elif height==2:
makeFloor()
makeLowerRoof()
cm.select( 'floor_group' , r=True )
cm.duplicate()
cm.move( 0,8,0 , r=True )
makeTopRoof()
if width==1:
cm.scale( 2.2, 2.2, 2.2, r=True )
elif width==2:
cm.scale( 1.3, 1.3, 1.3, r=True )
elif width>=4:
roofWidth = ( float(width)* -0.05 + 1 )
cm.scale( roofWidth, roofWidth, roofWidth, r=True )
bottom = cm.xform( bb=True, q=True )
top = cm.xform( 'floor_group1', bb=True , q=True )
cm.move( 0, top[4] - bottom[1] - 1, 0, 'topRoof_group', r=True )
makeFinalCombine_MultipleStorey()
# if it has more than two storeys
else:
makeFloor()
makeLowerRoof()
for i in range( int(height) -2 ):
cm.select( 'floor_group' , r=True )
cm.duplicate()
cm.move( 0,7.9*(i+1),0 , r=True )
cm.select( 'roof_group' , r=True )
cm.duplicate()
cm.move( 0,7.9*(i+1),0 , r=True )
cm.select( 'floor_group' , r=True )
cm.duplicate()
cm.move( 0,7.9*(height-1),0 , r=True )
makeTopRoof()
if width==1:
cm.scale( 2.18, 2.18, 2.18, r=True )
elif width==2:
cm.scale( 1.3, 1.3, 1.3, r=True )
elif width>=4:
roofWidth = ( float(width)* -0.05 + 1 )
cm.scale( roofWidth, roofWidth, roofWidth, r=True )
bottom = cm.xform( bb=True, q=True )
top = cm.xform( 'floor_group'+str(height-1), bb=True , q=True )
cm.move( 0, top[4] - bottom[1] - 0.5, 0, 'topRoof_group', r=True )
makeFinalCombine_MultipleStorey()
#---------UI stuff--------
#closes an old window if present
def closeWindow( *args ):
cm.deleteUI( 'guiWindow' , wnd=True )
#function that makes the temples
def makeTemple( *args ):
#make things global to work
global width
global merge
#turn sliders and buttons into variables
width = cm.intSliderGrp( w, q=True, v=True )
height = cm.intSliderGrp( h, q=True, v=True )
merge = cm.checkBox( m, q=True, v=True )
#assemble the thing
closeWindow()
assembleIt( width , height )
#check to see if the temple builder window exists, and if it does, deletes it
if cm.window('guiWindow', exists = True):
cm.deleteUI('guiWindow')
#creates window
cm.window( 'guiWindow', t='Temple Builder' , mnb = False , mxb = False , s=False, w=400 , h=130 )
form = cm.formLayout( nd=100, w=400 , h=130 )
#sliders
w = cm.intSliderGrp( l='Width ' , f=True , min=1 , max=20 , s=1 , v=2 )
h = cm.intSliderGrp( l='Height' , f=True , min=1 , max=20 , s=1 , v=2 )
#checkbox
m = cm.checkBox( l = 'Merge Geometry' , v=True )
#button
b = cm.button( l='Make Temple' , w=390, h=45, command=makeTemple )
#stick stuff into the window
cm.formLayout( form , edit=True,
af=[ (w, 'left', 0), (w, 'right', 5), (w, 'top', 5), (h, 'left', 0), (h, 'right', 5), (m, 'left', 107), (b, 'left', 5) ],
ac=[ (h, 'top', 5, w), (m, 'top', 5, h), (b, 'top', 5, m) ] )
#display the window
cm.showWindow()
```
|
{
"source": "jessicacardoso/QApedia",
"score": 2
}
|
#### File: QApedia/QApedia/qapedia.py
```python
import os
import argparse
import csv
__author__ = "<NAME>"
__version__ = "v0.0.0-alpha"
__license__ = "MIT"
__doc__ += f"""
Version:
--------
- QApedia {__version__}
"""
_ROOT = os.path.abspath(os.path.dirname(__file__))
def _get_data(path):
return os.path.join(_ROOT, "data", path)
def _make_parser():
p = argparse.ArgumentParser()
p.add_argument(
"-tfile",
help="Qualquer caminho de string válido é aceito. A string pode ser "
"uma URL, por exemplo. Esse caminho corresponde ao arquivo contendo "
"o conjunto de templates. Se nenhum valor for passado, é executado "
"um arquivo de exemplo.",
default=_get_data("example.csv"),
)
p.add_argument(
"-o",
"--output",
help="Corresponde ao caminho do arquivo de saída onde será salvo os "
"pares de questão-sparql gerados. Se nenhum caminho for especificado,"
" o resultado será salvo no arquivo output.txt",
default="output.txt",
)
p.add_argument(
"-d",
"--delim",
help="Delimitador usado para separar os campos do template. "
"(default: ';')",
default=";",
)
p.add_argument(
"-n",
"--number",
help="Quantidade de pares gerados por template. (default: 100)",
type=int,
default=100,
)
p.add_argument(
"-p",
"--prefixes",
help="Caminho do arquivo txt contendo os prefixos utilizados, caso "
"nenhum arquivo seja especificado são utilizados os mesmos prefixos"
" presentes em http://dbpedia.org/snorql/",
default=_get_data("prefixes.txt"),
)
p.add_argument(
"-e",
"--endpoint",
help="URL do SPARQL endpoint. (default: 'http://dbpedia.org/sparql')",
default="http://dbpedia.org/sparql",
)
p.add_argument(
"-l",
"--lang",
help="Idioma das questões do template. (default: 'pt')",
default="pt",
)
p.add_argument(
"-v",
"--verbose",
help="Indica qual template está sendo executado atualmente.",
type=bool,
default=False,
)
return p
def _run():
from QApedia import generator
from QApedia import io
parser = _make_parser()
args = parser.parse_args()
# Carregar lista de prefixos
prefixes, list_of_prefixes = io.load_prefixes(args.prefixes)
# Carregar arquivo contendo os templates
templates = io.load_templates(args.tfile, args.delim)
with open(args.output, "w") as csv_file:
writer = csv.writer(csv_file, delimiter=args.delim)
writer.writerow(["question", "sparql", "template_id"])
for index, template in templates.iterrows():
if args.verbose:
print("Executando template da linha %d" % index)
# Realizar a busca e construção dos pares questão-sparql
pairs = generator.build_pairs_from_template(
template,
prefixes,
list_of_prefixes,
args.endpoint,
args.number,
args.lang,
)
for pair in pairs:
writer.writerow([pair["question"], pair["sparql"], str(index)])
csv_file.close()
```
#### File: QApedia/QApedia/utils.py
```python
import re
__all__ = ["extract_variables", "convert_prefixes_to_list", "encode", "decode"]
_symbols_and_its_equivalent = [
("{", " bracket_open "),
("}", " bracket_close "),
("?", "var_"),
("!=", " not_equal_to "),
(">=", " greater_than_or_equal_to "),
("<=", " less_than_or_equal_to "),
("=", " equal_to "),
(">", " greater_than "),
("<", " less_than "),
("&&", " and "),
("||", " or "),
("!", " not "),
]
def extract_variables(generator_query):
"""Extrai as variáveis correspondente presentes no 'generator_query'.
Parameters
----------
generator_query : str
A 'generator_query' corresponde a query que será utilizada para obter
as variáveis presente nas lacunas da pergunta(``query``) e do sparql.
Returns
-------
list
Lista contendo as variáveis a serem respondidas.
Examples
--------
.. code-block:: python
>>> generator_query = "select distinct ?a where {"\\
... "?uri <http://dbpedia.org/ontology/author> ?a }"
>>> variables = extract_variables(generator_query)
>>> print(variables)
['a']
"""
variables = re.findall("^(.+?)where", generator_query, re.IGNORECASE)
if variables:
variables = re.findall(r"\?(\w)", variables[0])
if not variables:
return None
return variables
def convert_prefixes_to_list(prefixes):
"""Converte uma string dos prefixos em uma lista de tuplas. Onde cada par
contém um identificador e a uri correspondente.
Parameters
----------
prefixes : str
string correspondendo aos prefixos utilizados na consulta SPARQL.
Returns
-------
list
Lista de tuplas, onde cada tupla contém dois itens, o primeiro
corresponde ao nome dado a URI que corresponde ao segundo item.
Examples
--------
.. code-block:: python
>>> from QApedia.utils import convert_prefixes_to_list
>>> prefixes = "PREFIX foaf: <http://xmlns.com/foaf/0.1/>\\
... PREFIX yago: <http://yago-knowledge.org/resource/>\\
... PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#>\\
... PREFIX dbo:<http://dbpedia.org/ontology/>\\
... PREFIX dbp:<http://dbpedia.org/property/>"
>>> list_of_prefixes = convert_prefixes_to_list(prefixes)
>>> for prefix, uri in list_of_prefixes:
... print(prefix, uri)
...
foaf: http://xmlns.com/foaf/0.1/
yago: http://yago-knowledge.org/resource/
rdfs: http://www.w3.org/2000/01/rdf-schema#
dbo: http://dbpedia.org/ontology/
dbp: http://dbpedia.org/property/
"""
pattern = r"(\w+:)\s*\<(.*?)\>"
prefixes_list = re.findall(pattern, prefixes)
return prefixes_list
def _replace_symbols_with_text(sparql):
"""Método auxiliar utilizado por ``encode`` para realizar a substituição
de alguns símbolos da sparql para seu equivalente em texto.
Parameters
----------
sparql : str
sparql normal
Returns
-------
str
sparql codificada
"""
for symbol, text in _symbols_and_its_equivalent:
sparql = sparql.replace(symbol, text)
sparql = re.sub(r"\.(\B|filter)", r" sep_dot \1", sparql, flags=re.I)
sparql = re.sub(r"\;(\B|filter)", r" sep_semicolon \1", sparql, flags=re.I)
return sparql
def encode(sparql, prefixes):
"""Dada uma query sparql, essa função transforma algum de seus caracteres
em texto.
Parameters
----------
sparql : str
sparql a ser transformada.
prefixes : list
lista de prefixos com uris utilizadas na sparql retornadas pela função
:func:`QApedia.utils.convert_prefixes_to_list`.
Returns
-------
str
sparql transformada.
Examples
--------
.. code-block:: python
>>> from QApedia.utils import encode
>>> from QApedia.utils import convert_prefixes_to_list
>>> prefixes = "PREFIX prop: <http://dbpedia.org/property/>\\
... PREFIX dbr: <http://dbpedia.org/resource/>"
>>> query = "ASK {\\n\\
... <http://dbpedia.org/resource/Amazon_River> prop:length \
?amazon .\\n\\
... <http://dbpedia.org/resource/Nile> prop:length ?nile .\\n\\
... FILTER(?amazon > ?nile) .\\n\\
... }"
>>> list_of_prefixes = convert_prefixes_to_list(prefixes)
>>> query_encoded = encode(query, list_of_prefixes)
>>> print(query_encoded)
ASK bracket_open
dbr_Amazon_River prop_length var_amazon sep_dot
dbr_Nile prop_length var_nile sep_dot
FILTER(var_amazon greater_than var_nile) sep_dot
bracket_close
"""
for prefix, uri in prefixes:
encoding = prefix.replace(":", "_")
sparql = sparql.replace(prefix, encoding)
sparql = re.sub(f"<{uri}(.*?)>", fr"{encoding}\1", sparql)
# Realizar substituição dos caracteres da consulta por texto.
sparql = _replace_symbols_with_text(sparql)
return sparql
def _revert_encoded_symbols(sparql):
"""Método auxiliar utilizado por ``decode`` para realizar a substituição
inversa da função ``_replace_symbols_with_text``.
Parameters
----------
sparql : str
sparql codificada
Returns
-------
str
sparql decodificada
"""
for symbol, text in _symbols_and_its_equivalent:
sparql = sparql.replace(text, symbol)
sparql = sparql.replace(" sep_dot ", ".")
sparql = sparql.replace(" sep_semicolon ", ";")
return sparql
def decode(sparql_encoded, prefixes):
"""Dada uma sparql que foi codificada pela função
:func:`QApedia.utils.encode`. O método ``decode`` substuir os termos
codificados por símbolos válidos da consulta sparql.
Parameters
----------
sparql_encoded : str
sparql transformada após passar por :func:`QApedia.utils.encode`.
prefixes : list
lista de prefixos com uris utilizadas na sparql retornadas pela função
:func:`QApedia.utils.convert_prefixes_to_list`.
Returns
-------
str
sparql com os símbolos válidos para uma consulta.
Examples
--------
.. code-block:: python
>>> from QApedia.utils import decode
>>> from QApedia.utils import convert_prefixes_to_list
>>> prefixes = "PREFIX prop: <http://dbpedia.org/property/>\\
... PREFIX dbr: <http://dbpedia.org/resource/>"
>>> list_of_prefixes = convert_prefixes_to_list(prefixes)
>>> query_encoded = "ASK bracket_open \\n\\
... dbr_Amazon_River prop_length var_amazon sep_dot \\n\\
... dbr_Nile prop_length var_nile sep_dot \\n\\
... FILTER(var_amazon greater_than var_nile) sep_dot \\n\\
... bracket_close "
>>> print(decode(query_encoded, list_of_prefixes))
ASK {
dbr:Amazon_River prop:length ?amazon .
dbr:Nile prop:length ?nile .
FILTER(?amazon > ?nile) .
}
"""
sparql = sparql_encoded
for prefix, _ in prefixes:
encoding = prefix.replace(":", "_")
sparql = sparql.replace(encoding, prefix)
sparql = _revert_encoded_symbols(sparql)
return sparql
```
|
{
"source": "jessicacarine/cadastro_veiculos",
"score": 4
}
|
#### File: jessicacarine/cadastro_veiculos/cadastro_pessoa.py
```python
import sqlite3
conn = sqlite3.connect('db_concessionaria.db')
cursor = conn.cursor()
cursor.execute(
"""
CREATE TABLE veiculos(
id INTEGER PRIMARY KEY AUTOINCREMENT,
nome VARCHAR(50),
marca VARCHAR(20),
modelo VARCHAR(20),
cor VARCHAR(20),
placa VARCHAR(7),
proprietario VARCHAR(50),
num_portas INT,
km_rodado INT,
qtd_passageiros INT,
ano INTEGER,
valor INTEGER,
motor INT,
combustivel VARCHAR(20),
meio_locomocao VARCHAR(30)
);
""")
cursor.execute(
"""
CREATE TABLE pessoas(
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
veiculo_id INTEGER,
FOREIGN KEY (veiculo_id) REFERENCES veiculos(id)
);
""")
print('Tabela criada com sucesso.')
conn.close()
class Pessoa:
def __init__(self, nome, data_nascimento, cpf, endereco, salario, profissao, email,
telefone, nome_do_responsavel, sexo, naturalidade, nacionalidade):
self.nome = nome
self.data_nascimento = data_nascimento
self.cpf = cpf
self.endereco = endereco
self.salario = salario
self.profissao = profissao
self.email = email
self.telefone = telefone
self.nome_do_responsavel = nome_do_responsavel
self.sexo = sexo
self.naturalidade = naturalidade
self.nacionalidade = nacionalidade
```
#### File: jessicacarine/cadastro_veiculos/cadastro_veiculo.py
```python
import sqlite3
conn = sqlite3.connect('db_concessionaria.db')
cursor = conn.cursor()
# cursor.execute("""
# CREATE TABLE veiculos(
# id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
# nome Varchar(50),
# marca TEXT NOT NULL,
# modelo TEXT NOT NULL,
# cor TEXT,
# placa VARCHAR(7),
# proprietario VARCHAR(50),
# num_portas INT,
# km_rodado INT,
# qtd_passageiros INT,
# ano INTEGER,
# valor INTEGER,
# motor INT,
# combustivel VARCHAR(20),
# meio_locomocao VARCHAR(30)
# );
# """)
print('Tabela criada com sucesso.')
conn.close()
class Veiculo:
def __init__(self, nome, marca, modelo, cor, placa, proprietario, num_portas, km_rodado,
qtd_passageiros, ano, valor, motor, combustivel, meio_locomocao):
self.marca = marca
self.modelo = modelo
self.ano = ano
self.valor = valor
self.cor = cor
self.nome = nome
self.placa = placa
self.proprietario = proprietario
self.num_portas = num_portas
self.km_rodado = km_rodado
self.qtd_passageiros = qtd_passageiros
self.motor = motor
self.combustivel = combustivel
self.meio_locomocao = meio_locomocao
def salvar_veiculo(self):
bd = sqlite3.connect('db_veiculos.db')
sql = bd.cursor()
sql.execute('''
INSERT INTO veiculos(nome, marca, modelo, cor, placa,
proprietario, num_portas, km_rodado, qtd_passageiros, ano,
valor, motor, combustivel, meio_locomocao)
VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?);''',
(
str(self.nome),
str(self.marca),
str(self.modelo),
str(self.cor),
str(self.placa),
str(self.proprietario),
int(self.num_portas),
int(self.km_rodado),
int(self.qtd_passageiros),
int(self.ano),
float(self.valor),
float(self.motor),
str(self.combustivel),
str(self.meio_locomocao)
)
)
bd.commit()
bd.close()
def cadastro_veiculo():
print("\n\t__________CADASTRO DE VEÍCULOS__________\n")
nome = str(input("<NAME> Carro: ")),
marca = str(input("Marca: ")),
modelo = input("Modelo: ")
cor = input("Cor: ")
placa = input("Placa: ")
proprietario = input("Nome do proprietário: ")
num_portas = int(input("Número de portas: "))
km_rodado = int(input("Km rodado: "))
qtd_passageiros = int(input("Quantidade máxima de passageiros: "))
ano = int(input("Ano: "))
valor = float(input("Valor: R$ "))
motor = float(input("Motor: "))
combustivel = input("Tipo de combustível: ")
meio_locomocao = input("Meio de locomoção: ")
veiculo = Veiculo(nome, marca, modelo, cor, placa, proprietario, num_portas, km_rodado,
qtd_passageiros, ano, valor, motor, combustivel, meio_locomocao) # instancia
veiculo.salvar_veiculo()
cadastro_veiculo()
```
|
{
"source": "jessicaccp/cn-uece",
"score": 3
}
|
#### File: jessicaccp/cn-uece/bisection.py
```python
from function_box import f
# Método da Bisseção
# Recebe como parâmetros os valores do intervalo (a, b), o valor da
# precisão (eps), a amplitude (amp) e a função f
def bisection(a, b, eps, amp, func=f):
# Inicializa a lista onde serão salvas as aproximações de d e o módulo de
# b-a a cada iteração
iter_values = []
# Se a amplitude do intervalo atinge a precisão requerida, d = média(a, b)
if (b - a) < eps:
iter_values.append(((a + b) * 0.5, abs(b - a)))
return iter_values
# M recebe f(a), sendo a o do intervalo inicial
M = func(a, amp)
# Número de iterações vai de 1 a 1000 ou até encontrar a raiz d
for _ in range(1, 1001):
# A aproximação d recebe a média da soma de a e b
d = (a + b) * 0.5
# Se a multiplicação de M por f(d) for maior que zero, atualiza-se
# o valor de a
if (M * func(d, amp)) > 0:
a = d
# Se b-a for menor que o erro, ou seja, se a amplitude do intervalo
# atinge a precisão requerida, d = média(a, b)
if (b - a) < eps:
iter_values.append(((a + b) * 0.5, abs(b- a)))
return iter_values
# Se o resultado da multiplicação não for maior que zero, atualiza-se b
# e passa-se para a próxima iteração
else:
iter_values.append((d, abs(b - a)))
b = d
```
#### File: jessicaccp/cn-uece/plot.py
```python
import numpy as np
import matplotlib.pyplot as plt
# Plota as tabelas com os resultados das iterações dos 3 métodos:
# Bisseção, Newton-Raphson e Secante
def dino_plot(data_list):
# Número da figura (0: bisection, 1: newton-raphson, 2: secant)
k = 0
# Configura os gráficos
for data in data_list[:3]:
fig = plt.figure(k)
fig.patch.set_visible(False)
plt.axis('off')
# Título da tabela (método, fórmula, amplitude, intervalo, epsilon)
plt.title('Method ' + data_list[-1][0][k] + '\n' +
r'$f(d) = amp \times e^d - 4 \times d^2$' + '\n' +
r'$amp = $' + str(data_list[-1][1]) +
r'$,\/I = [$' + str(data_list[-1][2]) + r'$, $' +
str(data_list[-1][3]) + r'$]$' +
r'$,\/\varepsilon = $' + str(data_list[-1][4]))
# Títulos das colunas
if k:
columns = ('d', 'f(d)', r'$abs(d_k - d_{k-1})$', 'Relative error')
else:
columns = ('d', 'f(d)', 'abs(b - a)', 'Relative error')
# Número de linhas (n) e colunas (m)
n = len(data[0])
m = len(data)
# Exibe valor das iterações
rows = [' ' + str(x) + ' ' for x in range(1, n + 1)]
# Insere os dados nas células da tabela
cell_text = []
for i in range(n):
aux = []
for j in range(m):
aux.append(data[j][i])
cell_text.append(['%f' % x for x in aux])
# Configura a tabela
plt.table(cellText=cell_text,
rowLabels=rows,
colLabels=columns,
loc='center')
fig.tight_layout()
# Incrementa o número da figura
k += 1
# Exibe os 3 gráficos
plt.show()
```
|
{
"source": "jessicachung/radpipe",
"score": 2
}
|
#### File: radpipe/radpipe/utils.py
```python
import os.path
def get_output_paths(state):
results_dir = state.config.get_options("results_dir")
output_path = {
"reference": "reference",
"qc": "qc",
"fastqc": "qc/fastqc",
"flagstat": "qc/flagstat",
"process_radtags": "sample_radtags",
"alignments": "alignments",
"gstacks": "gstacks",
"populations": "populations"
}
output_path = [(a, os.path.join(results_dir, b)) for a,b in output_path.items()]
output_path = dict(output_path)
return output_path
def path_list_join(dir, file_list):
'''Join directory to a list of files'''
return [os.path.join(dir, x) for x in file_list]
def run_picard(state, stage, args):
mem = int(state.config.get_stage_options(stage, "mem"))
return run_java(state, stage, PICARD_JAR, mem, args)
def run_trimmomatic(state, stage, args):
mem = int(state.config.get_stage_options(stage, "mem"))
return run_java(state, stage, TRIMMOMATIC_JAR, mem, args)
def create_empty_outputs(outputs):
'''Create empty dummy files for testing purposes'''
if isinstance(outputs, str):
outputs = [outputs]
for output_filename in outputs:
with open(output_filename, "w"):
pass
```
|
{
"source": "jessicachung/rna_seq_pipeline",
"score": 2
}
|
#### File: jessicachung/rna_seq_pipeline/RNA-seq_pipeline.py
```python
import re
import sys
import copy
from ruffus import *
import os.path
import os
import shutil
from glob import glob
#from rubra.utils import *
from pipeline_base.utils import (runStage, runStageCheck, splitPath,
getOptions, initLog, getCommand, mkLogFile,
mkTempFilename, getStageOptions, zeroFile,
mkDir)
from pipeline_base.cmdline_args import get_cmdline_args
args = get_cmdline_args()
options = getOptions(args)
logger = initLog(options)
analysis_name = re.sub("\s", "_", options.analysis_name) # Remove any whitespace
samples_csv = options.samples_csv
comparisons_csv = options.comparisons_csv
platform = options.platform
input_dir = options.raw_seq_dir
output_dir = options.output_dir
paired_end = options.paired_end
stranded = options.stranded
genome_ref = options.genome_ref
genome_ref_fa = options.genome_ref_fa
gene_ref = options.gene_ref
rrna_ref = options.rrna_ref
cuffdiff_mask_file = options.cuffdiff_mask_file
adapter_seq = options.adapter_seq
seed_mismatches = options.seed_mismatches
palendrome_clip_threshold = options.palendrome_clip_threshold
simple_clip_threshold = options.simple_clip_threshold
trimmomatic_extra_parameters = options.extra_parameters
annotation_dataset = options.annotation_dataset
html_index_script = options.html_index_script
index_script = options.index_script
tophat_script = options.tophat_script
merge_tophat_script = options.merge_tophat_script
fix_tophat_unmapped_reads_script = options.fix_tophat_unmapped_reads_script
htseq_script = options.htseq_script
qc_parse_script = options.qc_parse_script
fastqc_parse_script = options.fastqc_parse_script
alignment_stats_script = options.alignment_stats_script
combine_and_annotate_script = options.combine_and_annotate_script
de_analysis_script = options.de_analysis_script
trimmomatic_path = options.trimmomatic_path
reorder_sam_jar = options.reorder_sam_path
mark_duplicates_jar = options.mark_duplicates_path
rnaseqc_jar = options.rnaseqc_path
add_rg_jar = options.add_or_replace_read_groups_path
java_tmp = "-Djava.io.tmpdir=$TMPDIR" if options.using_merri else ""
samples_csv_name = os.path.basename(samples_csv)[:-4] if \
samples_csv[-4:].lower() == ".csv" else os.path.basename(samples_csv)
comparisons_csv_name = os.path.basename(comparisons_csv)[:-4] if \
comparisons_csv[-4:].lower() == ".csv" else \
os.path.basename(comparisons_csv)
# Output directories:
mkDir(output_dir)
fastqc_dir = os.path.join(output_dir, "fastqc")
mkDir(fastqc_dir)
fastqc_post_trim_dir = os.path.join(output_dir, "fastqc_post_trim")
mkDir(fastqc_post_trim_dir)
transcriptome_dir = os.path.join(output_dir, "transcriptome_index")
# mkDir(transcriptome_dir)
trimmed_dir = os.path.join(output_dir, "trimmed_reads")
mkDir(trimmed_dir)
tophat_raw_dir = os.path.join(output_dir, "tophat_raw")
mkDir(tophat_raw_dir)
tophat_dir = os.path.join(output_dir, "tophat")
mkDir(tophat_dir)
cufflinks_dir = os.path.join(output_dir, "cufflinks")
mkDir(cufflinks_dir)
cuffmerge_dir = os.path.join(output_dir, "cuffmerge")
mkDir(cuffmerge_dir)
cuffdiff_dir = os.path.join(output_dir, "cuffdiff")
mkDir(cuffdiff_dir)
htseq_dir = os.path.join(output_dir, "htseq_count")
mkDir(htseq_dir)
counts_dir = os.path.join(output_dir, "read_counts")
mkDir(counts_dir)
merged_dir = os.path.join(output_dir, "tophat_merged")
mkDir(merged_dir)
rnaseqc_dir = os.path.join(output_dir, "rnaseqc")
mkDir(rnaseqc_dir)
alignment_stats_dir = os.path.join(output_dir, "alignment_stats")
mkDir(alignment_stats_dir)
qc_summary_dir = os.path.join(output_dir, "qc_summary")
mkDir(qc_summary_dir)
main_voom_dir = os.path.join(output_dir, "voom_analysis")
mkDir(main_voom_dir)
main_edger_dir = os.path.join(output_dir, "edgeR_analysis")
mkDir(main_edger_dir)
voom_dir = os.path.join(main_voom_dir, analysis_name + "_voom")
edger_dir = os.path.join(main_edger_dir, analysis_name + "_edgeR")
cuffmerge_sub_dir = os.path.join(cuffmerge_dir, analysis_name + "_cuffmerge")
class sample(object):
"""
Class for sample information
"""
def __init__(self, name, condition, covariates = None, files = None):
self.name = name
self.condition = condition
self.files = files if files else []
self.covariates = covariates if covariates else []
self.sm = name
def __repr__(self):
str = "\n".join(["Name: %s" % self.name,
"Condition: %s" % self.condition,
"Files: %s" % "\n ".join(self.files),
"Covariates: %s" % self.covariates])
return str
def print_info(self):
str = "\n".join(["\t%s" % self.name,
"\t\tCondition: %s" % self.condition,
"\t\tCovariates: %s" % self.covariates,
"\t\tFiles: %s" % \
"\n\t\t ".join(self.files)])
return str
def get_trimmed_filenames(self, paired=False):
if paired:
return map(lambda x: "%s/%s" % (trimmed_dir, re.sub(r'.fastq.gz',
r'.trimmed-paired.fastq.gz', os.path.basename(x))),
self.files)
else:
return map(lambda x: "%s/%s" % (trimmed_dir, re.sub(r'.fastq.gz',
r'.trimmed-single.fastq.gz', os.path.basename(x))),
self.files)
def print_heading(heading):
print "#" * 80 + "\n## %s:" % heading.upper()
print_heading(analysis_name)
# Get files in sequence directory
seqfiles = glob(input_dir + "/*.fastq.gz")
seqfiles.sort()
if not seqfiles:
print "Error: No *fastq.gz files in sequence directory."
sys.exit(1)
# Open and read CSV files
try:
with open(samples_csv) as samples_file:
sample_csv_list = samples_file.read().strip().split("\n")
except IOError:
print "Error: Cannot open %s" % samples_csv
sys.exit(1)
try:
with open(comparisons_csv) as comparison_file:
comparisons_csv_list = comparison_file.read().strip().split("\n")
except IOError:
print "Error: Cannot open %s" % comparisons_csv
sys.exit(1)
# get smrp list from fastq.gz files in sequence directory
smrp_dict = {}
for file in seqfiles:
try:
smrp = re.search('(.+\/)?(SM_[A-Za-z0-9-.]+_' \
'RP_[A-Za-z0-9-.]+)_?.*', file).group(2)
if smrp not in smrp_dict:
smrp_dict[smrp] = [file]
else:
smrp_dict[smrp].append(file)
except AttributeError:
print "Warning: FASTQ file %s is not in the correct name format. " \
"File will not be included in analysis." % file
# parse samples in samples.csv
sample_csv_dict = {}
sample_dict = {}
sample_list = []
for i in sample_csv_list:
if len(i) > 1 and i[0] != "#":
try:
line = re.search('([^#]+)#?.+', i).group(1) if "#" in i else i
line = map(lambda x: x.strip(), line.split(","))
line[0] = re.search('(SM_)?([A-Za-z0-9-.]+)_?.*', line[0]).group(2)
if re.search("\+|\-", line[1]):
print "Error: Non-allowed characters (+,-) in CSV file's " \
"condition column."
sys.exit(1)
sample_csv_dict[line[0]] = [0] + line[1:]
sample_dict[line[1]] = []
except IndexError:
print "Error: CSV file not formatted correctly.\n" \
"Columns in the CSV file should be " \
"sample,condition,[covariates...]\n" \
"Examples are listed in the user manual document."
sys.exit(1)
sequences = []
for smrp_name in smrp_dict:
sm_name = re.search('SM_([A-Za-z0-9-.]+)_RP_.*', smrp_name).group(1)
try:
sample_csv_dict[sm_name][0] += 1
condition = sample_csv_dict[sm_name][1]
covariates = sample_csv_dict[sm_name][2:]
smrp_files = smrp_dict[smrp_name]
new_sample = sample(smrp_name, condition, covariates, smrp_files)
sample_dict[condition].append(new_sample)
sample_list.append(new_sample)
sequences += smrp_files
except KeyError:
print "Warning: Sample %s is not listed in the sample CSV file " \
"and will not be used in analysis." % smrp_name
for sm_name in sample_csv_dict:
if sample_csv_dict[sm_name][0] == 0:
print "Warning: Samples %s in CSV file don't match any files in the " \
"FASTQ directory." % sm_name
if not sequences:
print "Error: No files given for analysis.\n" \
"Check if your CSV file and your fastq filenames are in the " \
"correct format.\n" \
"Refer to the user manual document for more information."
sys.exit(1)
# check all samples have the same number of covariates
number_of_covariates = len(sample_list[0].covariates)
for smrp in sample_list:
if len(smrp.covariates) != number_of_covariates:
print "Error: Samples in CSV file have an unequal number of " \
"covariates."
sys.exit(1)
print_heading("samples")
for condition in sample_dict:
print "-", condition
for sample in sample_dict[condition]:
print sample.print_info()
# parse comparisons in comparisons.csv
comparisons_list = []
comparisons_print = []
no_replicates_warning = False
for i in comparisons_csv_list:
if len(i) > 1 and i[0] != "#":
try:
line = re.search('([^#]+)#?.+', i).group(1) if "#" in i else i
line = map(lambda x: x.strip(), line.split(","))
c1 = line[0]
c2 = line[1]
n1 = len(sample_dict[c1])
n2 = len(sample_dict[c2])
comparisons_list.append([c1, c2])
comparisons_print.append("\t%s (n = %d) vs. %s (n = %d)" % (c1, n1,
c2, n2))
if n1 < 2 or n2 < 2:
no_replicates_warning = True
except IndexError:
print "Error: CSV file not formatted correctly.\n" \
"Columns in the CSV file should be " \
"condition1,condition2\n" \
"Examples are listed in the user manual document."
sys.exit(1)
except KeyError as e:
print "Error: No samples have condition %s in sample CSV file " \
"needed for comparison '%s vs. %s'." % (e, c1, c2)
sys.exit(1)
print_heading("comparisons")
print "\n".join(comparisons_print)
if no_replicates_warning:
print "Warning: Lacking replicates for some conditions! " \
"Some analyses in the pipeline will fail."
@files([samples_csv,
comparisons_csv],
["%s/index.html" % output_dir,
"%s/makeIndexHtml.Success" % output_dir])
def makeIndexHtml(inputs, outputs):
"""
Make index HTML file of results.
"""
output_filename, flagFile = outputs
abs_sample_csv_file = os.path.abspath(samples_csv)
abs_comparison_csv_file = os.path.abspath(comparisons_csv)
abs_output_dir = os.path.abspath(output_dir)
runStageCheck('makeIndexHtml', flagFile, logger, options,
html_index_script, analysis_name, abs_sample_csv_file,
abs_comparison_csv_file, abs_output_dir, output_filename)
if paired_end:
@transform(sequences,
regex('(.+\/)?(.+?)\_R1.fastq\.gz'),
add_inputs(r'\1\2_R2.fastq.gz'),
[r'%s/\2_R1_fastqc.zip' % fastqc_dir,
r'%s/\2_R2_fastqc.zip' % fastqc_dir,
r'%s/\2.fastqc.Success' % fastqc_dir])
def fastQC(inputs, outputs):
"""
Obtain stats on reads from fastq using fastQC (paired-end)
"""
paired1, paired2 = inputs
out1, out2, flagFile = outputs
runStageCheck('fastQC', flagFile, logger, options, fastqc_dir,
paired1, paired2)
else: # if single-end reads
@transform(sequences,
regex('(.+\/)?(.+?)\_R1.fastq\.gz'),
[r'%s/\2_R1_fastqc.zip' % fastqc_dir,
r'%s/\2.fastqc.Success' % fastqc_dir])
def fastQC(inputs, outputs):
"""
Obtain stats on reads from fastq using fastQC (single-end)
"""
paired1 = inputs
out1, flagFile = outputs
paired2 = ""
runStageCheck('fastQC', flagFile, logger, options, fastqc_dir,
paired1, paired2)
if paired_end:
@transform(sequences,
regex('(.+\/)?(.+?)\_R1.fastq\.gz'),
add_inputs(r'\1\2_R2.fastq.gz'),
[r'%s/\2_R1.trimmed-paired.fastq.gz' % trimmed_dir,
r'%s/\2_R1.trimmed-unpaired.fastq.gz' % trimmed_dir,
r'%s/\2_R2.trimmed-paired.fastq.gz' % trimmed_dir,
r'%s/\2_R2.trimmed-unpaired.fastq.gz' % trimmed_dir,
r'%s/\2.trimReads.Success' % trimmed_dir],
[r'\2'])
def trimReads(inputs, outputs, samp_name):
"""
Trim adapter sequences from fastq files using Trimmomatic
(paired-end)
"""
paired1, paired2 = inputs
out1, unpaired1, out2, unpaired2, flagFile = outputs
paired = "PE"
parameters = "%s:%d:%d:%d %s" % (adapter_seq, seed_mismatches,
palendrome_clip_threshold,
simple_clip_threshold,
trimmomatic_extra_parameters)
trim_log = "-trimlog %s/%s.trimReads.log" % \
(trimmed_dir, samp_name[0]) if options.write_trimmomatic_log \
else ""
trimmomatic_input = "%s %s %s %s %s %s ILLUMINACLIP:%s" % \
(paired1, paired2, out1, unpaired1, out2, unpaired2, parameters)
runStageCheck('trimReads', flagFile, logger, options, java_tmp,
trimmomatic_path, paired, trim_log, trimmomatic_input)
else:
@transform(sequences,
regex('(.+\/)?(.+?)\_R1.fastq\.gz'),
[r'%s/\2_R1.trimmed-single.fastq.gz' % trimmed_dir,
r'%s/\2.trimReads.Success' % trimmed_dir],
[r'\2'])
def trimReads(inputs, outputs, samp_name):
"""
Trim adapter sequences from fastq files using Trimmomatic
(single-end)
"""
paired1 = inputs
out1, flagFile = outputs
paired = "SE"
parameters = "%s:%d:%d:%d %s" % (adapter_seq, seed_mismatches,
palendrome_clip_threshold,
simple_clip_threshold,
trimmomatic_extra_parameters)
trim_log = "-trimlog %s/%s.trimReads.log" % \
(trimmed_dir, samp_name[0]) if options.write_trimmomatic_log \
else ""
trimmomatic_input = "%s %s ILLUMINACLIP:%s" % (paired1, out1,
parameters)
runStageCheck('trimReads', flagFile, logger, options, java_tmp,
trimmomatic_path, paired, trim_log, trimmomatic_input)
if paired_end:
@transform(trimReads,
regex('(.+\/)?(.+?)\_R1.trimmed-paired.fastq\.gz'),
[r'%s/\2_R1.trimmed-paired_fastqc.zip' % fastqc_post_trim_dir,
r'%s/\2_R2.trimmed-paired_fastqc.zip' % fastqc_post_trim_dir,
r'%s/\2.fastqcPostTrim.Success' % fastqc_post_trim_dir])
def fastQCPostTrim(inputs, outputs):
"""
Obtain stats on reads from fastq using fastQC on trimmed files
(paired-end)
"""
paired1, unpaired1, paired2, unpaired2, _success = inputs
out1, out2, flagFile = outputs
runStageCheck('fastQC', flagFile, logger, options,
fastqc_post_trim_dir, paired1, paired2)
else:
@transform(trimReads,
regex('(.+\/)?(.+?)\_R1.trimmed-single.fastq\.gz'),
[r'%s/\2_R1.trimmed-single_fastqc.zip' % fastqc_post_trim_dir,
r'%s/\2.fastqcPostTrim.Success' % fastqc_post_trim_dir])
def fastQCPostTrim(inputs, outputs):
"""
Obtain stats on reads from fastq using fastQC on trimmed files
(single-end)
"""
paired1, _success = inputs
out1, flagFile = outputs
paired2 = ""
runStageCheck('fastQC', flagFile, logger, options,
fastqc_post_trim_dir, paired1, paired2)
@follows(fastQC)
@merge(fastQCPostTrim,
[r'%s/FastQC_summary.html' % qc_summary_dir,
r'%s/FastQC_basic_statistics_summary.html' % qc_summary_dir,
r'%s/FastQC_summary.txt' % qc_summary_dir,
r'%s/fastqcSummary.Success' % qc_summary_dir])
def fastQCSummary(inputs, outputs):
"""
Parse results from fastQC analysis
"""
qc_summary, basic_statistics_summary, summary_txt, flagFile = outputs
paired = "paired" if paired_end else "single"
runStageCheck('fastQCSummary', flagFile, logger, options,
fastqc_parse_script, fastqc_dir, fastqc_post_trim_dir,
qc_summary, basic_statistics_summary, paired, summary_txt)
@files([genome_ref_fa, gene_ref],
["%s/known.rev.1.bt2" % transcriptome_dir,
"%s/buildIndex.Success" % transcriptome_dir])
def buildTranscriptomeIndex(inputs, outputs):
"""
Build index for Bowtie2 from reference files
"""
genomeRef, geneRef = inputs
index, flagFile = outputs
tmp_dir = "%s/tmp" % output_dir
if not os.path.exists(tmp_dir):
mkDir(tmp_dir)
transcriptome_index = "%s/known" % transcriptome_dir
seq = sequences[0]
runStageCheck('buildTranscriptomeIndex', flagFile, logger, options,
index_script, seq, tmp_dir, transcriptome_dir,
transcriptome_index, geneRef, genome_ref)
# Get inputs for tophatAlign:
# Treat samples which have the same SM and RP identifier as technical
# replicates. Technical replicates are inputted together.
tophat_files = []
rg_tags = {}
for samp in sample_list:
trimmed_files = samp.get_trimmed_filenames(paired_end)
input_files = ["%s/known.rev.1.bt2" % transcriptome_dir] + trimmed_files
output_files = ["%s/%s/accepted_hits.bam" % (tophat_raw_dir, samp.name),
"%s/%s.accepted_hits.bam" % (tophat_dir, samp.name),
"%s/%s.tophat.Success" % (tophat_raw_dir, samp.name)]
paired1 = []
paired2 = []
for i in trimmed_files:
if paired_end:
if "_R1.trimmed-paired.fastq.gz" in os.path.basename(i):
paired1.append(i)
elif "_R2.trimmed-paired.fastq.gz" in os.path.basename(i):
paired2.append(i)
else:
if "_R1.trimmed-single.fastq.gz" in os.path.basename(i):
paired1.append(i)
paired2 = ["False"]
match = re.search('(.+\/)?SM_([A-Za-z0-9-.]+)_RP_([A-Za-z0-9-.]+)_' \
'LB_([A-Za-z0-9-.]+)_ID_([A-Za-z0-9-.]+)_L([0-9]+)_' \
'R.\.trimmed-(paired|single)\.fastq\.gz', input_files[1])
rgsm = match.group(2) + "_RP-" + match.group(3) # sample name + replicate
rglb = match.group(4) # library name
rgid = match.group(5) + "_L" + match.group(6) # id + lane
rgpl = platform # platform
rg_tags[samp.name] = [rgsm, rglb, rgid, rgpl]
extra_parameters = [",".join(paired1), ",".join(paired2), rgsm, rglb,
rgid, rgpl]
tophat_files.append([input_files, output_files, extra_parameters])
# print_heading("tophat files")
# for i in tophat_files: print i
@follows(buildTranscriptomeIndex)
@follows(trimReads)
@files(tophat_files)
def tophatAlign(inputs, outputs, extra_parameters):
"""
Align reads in fastq file using TopHat
"""
input_files = inputs
acceptedHits, linkFile, flagFile = outputs
paired1, paired2, rgsm, rglb, rgid, rgpl = extra_parameters
sample_dir = os.path.dirname(acceptedHits)
transcriptome_index = "%s/known" % transcriptome_dir
runStageCheck('tophatAlign', flagFile, logger, options, tophat_script,
paired1, paired2, sample_dir, gene_ref, genome_ref,
transcriptome_index, rgsm, rglb, rgid, rgpl, linkFile)
@transform(tophatAlign,
regex('(.+\/)?(.+?)/accepted_hits\.bam'),
[r'%s/\2.accepted_hits.sorted.bam' % tophat_dir,
r'%s/\2.sortBam.Success' % tophat_dir])
def sortBam(inputs, outputs):
"""
Sort BAM files using Samtools
"""
originalFile, bamFile, _success = inputs
output, flagFile = outputs
output = output[:-4]
runStageCheck('sortBam', flagFile, logger, options, bamFile, output)
@transform(sortBam,
regex('(.+\/)?(.+?)\.accepted_hits\.sorted\.bam'),
[r'%s/\2.accepted_hits.sorted.bam.bai' % tophat_dir,
r'%s/\2.indexSortedBam.Success' % tophat_dir])
def indexSortedBam(inputs, outputs):
"""
Index sorted BAM files using Samtools
"""
bamFile, _success = inputs
output, flagFile = outputs
runStageCheck('indexBam', flagFile, logger, options, bamFile)
@transform(tophatAlign,
regex('(.+\/)?(.+?)/accepted_hits\.bam'),
add_inputs(r'\1\2/unmapped.bam'),
[r'%s/\2.merged.bam' % merged_dir,
r'%s/\2.tophatMerge.Success' % merged_dir])
def mergeTophat(inputs, outputs):
"""
Fix unmapped reads and merges Tophat accepted_hits.bam and unmapped.bam
"""
[originalFile, bamFile, _success], unmapped = inputs
output, flagFile = outputs
sample_dir = os.path.dirname(originalFile)
runStageCheck('mergeTophat', flagFile, logger, options,
merge_tophat_script, fix_tophat_unmapped_reads_script,
sample_dir, output)
@transform(mergeTophat,
regex('(.+\/)?(.+?)\.merged\.bam'),
[r'%s/\2.merged.reordered.bam' % merged_dir,
r'%s/\2.reorderBam.Success' % merged_dir])
def reorderBam(inputs, outputs):
"""
Reorder BAM files to match the contig ordering of a reference file using
Picard's Reorder SAM
"""
bamFile, _success = inputs
output, flagFile = outputs
runStageCheck('reorderBam', flagFile, logger, options, java_tmp,
reorder_sam_jar, bamFile, output, genome_ref_fa)
@transform(reorderBam,
regex('(.+\/)?(.+?).merged\.reordered\.bam'),
[r'%s/\2.merged.reordered.addedRG.bam' % merged_dir,
r'%s/\2.addRG.Success' % merged_dir], [r'\2'])
def addRG(inputs, outputs, samp_name):
"""
Add Read Groups to BAM file
"""
bamFile, _success = inputs
output, flagFile = outputs
rgsm, rglb, rgid, rgpl = rg_tags[samp_name[0]]
rgpu = rgid ### platform unit ?????
runStageCheck('addRG', flagFile, logger, options, java_tmp, add_rg_jar,
bamFile, output, rgsm, rglb, rgid, rgpl, rgpu)
@transform(addRG,
regex('(.+\/)?(.+?)\.merged\.reordered\.addedRG\.bam'),
[r'%s/\2.merged.reordered.addedRG.bam.bai' % merged_dir,
r'%s/\2.indexReorderedBam.Success' % merged_dir])
def indexReorderedBam(inputs, outputs):
"""
Index reordered BAM files using Samtools
"""
bamFile, _success = inputs
output, flagFile = outputs
runStageCheck('indexBam', flagFile, logger, options, bamFile)
@follows(indexReorderedBam)
@transform(addRG,
regex('(.+\/)?(.+?)\.merged\.reordered\.addedRG\.bam'),
[r'%s/\2.merged.reordered.addedRG.markdup.bam' % merged_dir,
r'%s/\2.markdup.log' % merged_dir,
r'%s/\2.markDuplicates.Success' % merged_dir])
def markDuplicates(inputs, outputs):
"""
Mark duplicates in BAM files using Picard
"""
bamFile, _success = inputs
output, markDupLog, flagFile = outputs
runStageCheck('markDuplicates', flagFile, logger, options, java_tmp,
mark_duplicates_jar, bamFile, markDupLog, output)
@transform(markDuplicates,
regex('(.+\/)?(.+?)\.merged\.reordered\.addedRG\.markdup\.bam'),
[r'%s/\2.merged.reordered.addedRG.markdup.bam.bai' % merged_dir,
r'%s/\2.indexMardupBam.Success' % merged_dir])
def indexMarkdupBam(inputs, outputs):
"""
Index marked duplicates BAM files using Samtools
"""
bamFile, markDupLog, _success = inputs
output, flagFile = outputs
runStageCheck('indexBam', flagFile, logger, options, bamFile)
@follows(indexMarkdupBam)
@transform(markDuplicates,
regex('(.+\/)?(.+?)\.merged\.reordered\.addedRG\.markdup\.bam'),
[r'%s/\2/report.html' % rnaseqc_dir,
r'%s/\2.rnaSeQC.Success' % rnaseqc_dir],
[r'\2'])
def rnaSeQC(inputs, outputs, samp_name):
"""
Obtain stats on RNA-seq data using RNA-SeQC
"""
bamFile, markDupLog, _success = inputs
output, flagFile = outputs
samp = "\"%s|%s|%s\"" % (samp_name[0], bamFile, samp_name[0])
sample_dir = os.path.dirname(output)
if not rrna_ref:
rrna = ""
elif rrna_ref.split(".")[-1] in ("fasta", "fa"):
rrna = "-BWArRNA %s" % rrna_ref
elif rrna_ref.split(".")[-1] == "list":
rrna = "-rRNA %s" % rrna_ref
else:
rrna = ""
paired = "" if paired_end else "-singleEnd"
runStageCheck('rnaSeQC', flagFile, logger, options, java_tmp, rnaseqc_jar,
paired, samp, genome_ref_fa, gene_ref, rrna, sample_dir)
@follows(indexSortedBam)
@transform(sortBam,
regex('(.+\/)?(.+?)\.accepted_hits\.sorted\.bam'),
[r'%s/\2/transcripts.gtf' % cufflinks_dir,
r'%s/\2.cufflinksAssembly.Success' % cufflinks_dir])
def cufflinksAssembly(inputs, outputs):
"""
Assemble aligned reads into transcripts using Cufflinks
"""
bamFile, _success = inputs
transcripts, flagFile = outputs
samp_dir = os.path.dirname(transcripts)
runStageCheck('cufflinksAssembly', flagFile, logger, options, samp_dir,
bamFile)
@merge(cufflinksAssembly,
['%s/assemblies.txt' % cuffmerge_sub_dir,
'%s/assemblies.Success' % cuffmerge_sub_dir])
def createCuffmergeFile(inputs, outputs):
"""
Create assemblies.txt file containing a list of transcript.gtf files
"""
assemblies, flagFile = outputs
transcripts = []
success = []
for i in inputs:
transcripts.append(i[0])
success.append(i[1])
mkDir(cuffmerge_sub_dir)
os.system('echo "%s" > %s' % ("\n".join(transcripts), assemblies))
os.system('> %s' % flagFile)
@files(createCuffmergeFile,
['%s/merged.gtf' % cuffmerge_sub_dir,
'%s/cuffmerge.Success' % cuffmerge_sub_dir])
def cuffmerge(inputs, outputs):
"""
Create a single merged transcriptome annotation from all assemblies in
assembly.txt using cuffmerge
"""
assemblies, _success = inputs
transcripts, flagFile = outputs
runStageCheck('cuffmerge', flagFile, logger, options, gene_ref,
genome_ref_fa, cuffmerge_sub_dir, assemblies)
# Input files in the same group for Cuffdiff analysis
cuffdiff_files = []
for comparison in comparisons_list:
c1_samples = map(lambda x: x.name, sample_dict[comparison[0]])
c2_samples = map(lambda x: x.name, sample_dict[comparison[1]])
c1_files = map(lambda x: "%s/%s.accepted_hits.bam" % (tophat_dir, x),
c1_samples)
c2_files = map(lambda x: "%s/%s.accepted_hits.bam" % (tophat_dir, x),
c2_samples)
label = analysis_name + "_" + "_vs_".join(comparison)
input_files = ["%s/merged.gtf" % cuffmerge_sub_dir] + c1_files + c2_files
output_files = ["%s/%s/gene_exp.diff" % (cuffdiff_dir, label),
"%s/%s.cuffdiff.Success" % (cuffdiff_dir, label)]
extra_parameters = [",".join(c1_files), ",".join(c2_files), comparison[0],
comparison[1]]
cuffdiff_files.append([input_files, output_files, extra_parameters])
# print_heading("cuffdiff files")
# for i in cuffdiff_files: print i
@follows(cuffmerge)
@files(cuffdiff_files)
def cuffdiff(inputs, outputs, extras):
"""
Identify differentially expressed genes in each group using Cuffdiff.
"""
merged_gtk = inputs[0]
output_de, flagFile = outputs
c1_files, c2_files, c1_label, c2_label = extras
labels = c1_label + "," + c2_label
outputDir = os.path.dirname(output_de)
mask = "-M %s" % cuffdiff_mask_file if cuffdiff_mask_file else ""
runStageCheck('cuffdiff', flagFile, logger, options, mask, outputDir,
labels, merged_gtk, c1_files, c2_files)
@transform(tophatAlign,
regex('(.+\/)?(.+?)/accepted_hits\.bam'),
[r'%s/\2.accepted_hits.sortedByName.bam' % tophat_dir,
r'%s/\2.sortBamByName.Success' % tophat_dir])
def sortBamByName(inputs, outputs):
"""
Sort BAM file by name
"""
originalFile, bamFile, _success = inputs
output, flagFile = outputs
output = output[:-4]
runStageCheck('sortBamByName', flagFile, logger, options, bamFile, output)
@transform(sortBamByName,
regex('(.+\/)?(.+?)\.accepted_hits\.sortedByName\.bam'),
add_inputs(r'%s/\2/unmapped.bam' % tophat_raw_dir),
[r'%s/\2.alignmentStats.txt' % alignment_stats_dir,
r'%s/\2.alignmentStats.Success' % alignment_stats_dir])
def alignmentStats(inputs, outputs):
"""
Count the number of reads which had unique alignments, the number of
reads which had multiple alignments, and the number of unmapped reads
"""
[bamFile, _success], unmappedBam = inputs
output, flagFile = outputs
paired = "paired" if paired_end else "single"
runStageCheck('alignmentStats', flagFile, logger, options,
alignment_stats_script, bamFile, unmappedBam, output, paired)
@follows(alignmentStats)
@follows(fastQC)
@follows(fastQCPostTrim)
@merge(rnaSeQC,
[r'%s/qc_summary.html' % qc_summary_dir,
r'%s/qcSummary.Success' % qc_summary_dir])
def qcSummary(inputs, outputs):
"""
Parse results from QC analysis
"""
qc_summary, flagFile = outputs
paired = "paired" if paired_end else "single"
runStageCheck('qcSummary', flagFile, logger, options, qc_parse_script,
fastqc_dir, fastqc_post_trim_dir, alignment_stats_dir,
rnaseqc_dir, qc_summary, paired)
@transform(sortBamByName,
regex('(.+\/)?(.+?)\.accepted_hits\.sortedByName\.bam'),
[r'%s/\2.union_HTSeqCount.txt' % htseq_dir,
r'%s/\2.strictIntersect_HTSeqCount.txt' % htseq_dir,
r'%s/\2.htseqCount.Success' % htseq_dir])
def countReads(inputs, outputs):
"""
Count reads for each feature in GTF file.
"""
bamFile, _success = inputs
unionFile, strictFile, flagFile = outputs
runStageCheck('countReads', flagFile, logger, options, htseq_script,
bamFile, gene_ref, unionFile, strictFile, stranded)
@merge(countReads,
['%s/%s_samples.csv' % (counts_dir, analysis_name),
'%s/%s_comparisons.csv' % (counts_dir, analysis_name),
r'%s/%s_counts.txt' % (counts_dir, analysis_name),
r'%s/%s_counts.RData' % (counts_dir, analysis_name),
r'%s/%s_counts.stdout' % (counts_dir, analysis_name),
r'%s/%s_counts.stderr' % (counts_dir, analysis_name),
r'%s/%s.combineAndAnnotate.Success' % (counts_dir, analysis_name)])
def combineAndAnnotate(inputs, outputs):
"""
Create csv files containing sample information and comparison information
needed for edgeR and voom analysis. Combine feature counts from HTSeq into
one CSV file. Also removes all features with zero counts. Annotates Ensembl
IDs with symbols, chr, description etc. Text file of raw counts can be used
for DGE-Vis.
"""
sample_R_csv, comparison_R_csv, plain_text_counts, rdata_counts, \
combine_stdout, combine_stderr, flagFile = outputs
# If replicates labels are all identical, then remove RP tag
rp_list = map(lambda x: re.search('SM_[A-Za-z0-9-.]+_RP_([A-Za-z0-9-.]+)',
x.name).group(1), sample_list)
if len(set(rp_list)) == 1:
for s in sample_list:
s.sm = re.search('SM_([A-Za-z0-9-.]+)_RP_([A-Za-z0-9-.]+)',
s.name).group(1)
# Write sample csv file
try:
with open(sample_R_csv, 'w') as output_file:
output_lines = []
for smrp_name in sample_list:
htseq_count = "%s/%s.union_HTSeqCount.txt" % (htseq_dir,
smrp_name.name)
if smrp_name.covariates:
output_lines.append(",".join([smrp_name.sm, htseq_count,
smrp_name.condition,
",".join(smrp_name.covariates)]))
else:
output_lines.append(",".join([smrp_name.sm, htseq_count,
smrp_name.condition]))
output_file.write("\n".join(output_lines) + "\n")
except:
print "Error. Could not create file %s" % sample_R_csv
sys.exit(1)
# Write comparison csv file
try:
with open(comparison_R_csv, 'w') as output_file:
output_lines = []
for comparison in comparisons_list:
output_lines.append(",".join(comparison))
output_file.write("\n".join(output_lines) + "\n")
except:
print "Error. Could not create file %s" % comparison_R_csv
sys.exit(1)
annotation_dataset = str(options.annotation_dataset)
runStageCheck('combineAndAnnotate', flagFile, logger, options,
sample_R_csv, comparison_R_csv, plain_text_counts,
rdata_counts, annotation_dataset,
combine_and_annotate_script, combine_stdout, combine_stderr)
@files(combineAndAnnotate,
['%s/voom.stdout' % voom_dir,
'%s/voom.stderr' % voom_dir,
'%s/voom.Success' % voom_dir])
def voom(inputs, outputs):
"""
Perform DE analysis using Voom (limma)
"""
sample_R_csv, comparison_R_csv, plain_text_counts, rdata_counts, \
combine_stdout, combine_stderr, _success = inputs
voom_stdout, voom_stderr, flagFile = outputs
mkDir(voom_dir)
runStageCheck('voom', flagFile, logger, options, rdata_counts, voom_dir,
"voom", de_analysis_script, voom_stdout, voom_stderr)
@files(combineAndAnnotate,
['%s/edgeR.stdout' % edger_dir,
'%s/edgeR.stderr' % edger_dir,
'%s/edgeR.Success' % edger_dir])
def edgeR(inputs, outputs):
"""
Perform DE analysis using edgeR
"""
sample_R_csv, comparison_R_csv, plain_text_counts, rdata_counts, \
combine_stdout, combine_stderr, _success = inputs
edger_stdout, edger_stderr, flagFile = outputs
mkDir(edger_dir)
runStageCheck('edgeR', flagFile, logger, options, rdata_counts, edger_dir,
"edgeR", de_analysis_script, edger_stdout, edger_stderr)
# Invoke the pipeline.
pipelineOptions = options.pipeline
endTasks = pipelineOptions['end']
forcedTasks = pipelineOptions['force']
style = pipelineOptions['style']
if style == 'run':
# Perform the pipeline steps.
pipeline_run(endTasks,
multiprocess = pipelineOptions['procs'],
logger = black_hole_logger,
forcedtorun_tasks = forcedTasks,
gnu_make_maximal_rebuild_mode = options.maximal_rebuild_mode)
elif style == 'flowchart':
# Draw the pipeline as a diagram.
pipeline_printout_graph('flowchart.svg', 'svg', endTasks,
no_key_legend = False)
elif style == 'print':
pipeline_printout(sys.stdout, endTasks, verbose = 5, wrap_width=100000,
forcedtorun_tasks = forcedTasks,
gnu_make_maximal_rebuild_mode = options.maximal_rebuild_mode)
```
#### File: rna_seq_pipeline/scripts/fastqc_parse.py
```python
extract_fastqc = [
"Total Sequences",
"%GC",
"Sequence length"]
extract_summary = [
"Per base sequence quality",
"Per sequence GC content",
"Overrepresented sequences",
"Kmer Content"]
from sys import argv
from sys import exit
from glob import glob
import os.path
script, fastqc_dir, fastqc_post_trim_dir, fastqc_summary_file, \
basic_statistics_file, paired_end = argv
paired_end = True if paired_end == "paired" else False
anchor_links = {"Basic Statistics": "M0",
"Per base sequence quality": "M1",
"Per sequence quality scores": "M2",
"Per base sequence content": "M3",
"Per base GC content": "M4",
"Per sequence GC content": "M5",
"Per base N content": "M6",
"Sequence Length Distribution": "M7",
"Sequence Duplication Levels": "M8",
"Overrepresented sequences": "M9",
"Kmer Content": "M10"}
CSS = """<html>
<head><title>FastQC Summary</title>
<style type="text/css">
table {
border-width: 1px;
border-spacing: 2px;
border-style: solid;
border-color: gray;
border-collapse: collapse;
}
table td {
border-width: 2px;
padding: 4px;
border-style: solid;
border-color: gray;
}
</style>
</head>
"""
def parse_file(filename):
file = open(filename)
dict = {}
for i in file.read().split("\n>>")[1:-1]:
if i != "END_MODULE":
lines = i.split("\n")
module_name, status = lines[0].split("\t")
dict[module_name] = lines
file.close()
return dict
def extract_info(module, extract):
dict = {}
list = []
for i in module:
dict[i.split("\t")[0]] = i.split("\t")[1]
for i in extract:
try:
list.append(dict[i])
except:
list.append("-")
return list
def parse_summary(module, extract):
list = []
for i in extract:
try:
list.append(module[i][0].split("\t")[1])
except:
list.append("-")
return list
def print_list(list, extract, columns):
print "\tPre-Trim" + "\t" * (len(extract_fastqc)-1) + "Post-Trim" + \
"\t" * (len(extract_fastqc)-1)
print "File\t" + "\t".join(extract * columns)
for i in list:
print "%s\t" % i[0],
print "\t".join(i[1])
print "\n"
def table_fastqc(output, list):
output.write("<table>\n")
output.write('<tr>\n<td></td>\n<td colspan="%d" align="center">' \
'FastQC Pre-Trim</td>\n<td colspan="%d" align="center">' \
'FastQC Post-Trim</td>\n</tr>' % (len(extract_summary),
len(extract_summary) ))
output.write("<tr>\n<td>File</td>\n" + td(extract_summary, "left") * 2 + \
"</tr>\n" )
for i in range(0,len(list)):
output.write("<tr>\n")
# file
output.write(td([list[i][0]], "left"))
# fastqc + hyperlinks
output.write(td_pass_fail(list[i][1], "right", list[i][0]))
output.write("</tr>\n")
output.write("</table>")
def table_basic_statistics(output, list):
output.write("<table>\n")
output.write('<tr>\n<td></td>\n<td colspan="%d" align="center">' \
'FastQC Pre-Trim</td>\n<td colspan="%d" align="center">' \
'FastQC Post-Trim</td>\n' % (len(extract_fastqc),
len(extract_fastqc)))
output.write("<tr><td>File</td>\n" + td(extract_fastqc, "left") * 2 + \
"</tr>\n" )
for i in range(0,len(list)):
output.write("<tr>\n")
# file
output.write(td([list[i][0]], "left"))
# fastqc
output.write(td(list[i][1][0:(2*len(extract_fastqc))], "right"))
output.write("</tr>\n")
output.write("</table>")
def td(list, align):
string = ""
for i in list:
string += "<td align=\"%s\">%s</td>\n" % (align, parse_number(i))
return string
def td_pass_fail(list, align, sample_name):
string = ""
count = 0
for i in list:
if i == "pass":
colour = "#C5D8A2"
elif i == "warn":
colour = "#FFFFE7"
elif i == "fail":
colour = "#FCD8D4"
else:
colour = "#FFFFFF"
if count < len(extract_summary):
link_dir = fastqc_dir
suffix = "_fastqc"
else:
link_dir = fastqc_post_trim_dir
if paired_end:
suffix = ".trimmed-paired_fastqc"
else:
suffix = ".trimmed-single_fastqc"
hyperlink = "%s/%s%s/fastqc_report.html" % (link_dir, sample_name,
suffix)
anchor = anchor_links[extract_summary[count % len(extract_summary)]]
string += '<td align="%s" bgcolor="%s"><a href="%s#%s">%s</a>' \
'</td>\n' % (align, colour, hyperlink, anchor, i.upper())
count += 1
return string
def parse_number(number):
try:
int(number)
return format(int(number), ",d")
except:
return number
def main():
# Get fastQC files
try:
files = glob(os.path.join(fastqc_dir, "*/fastqc_data.txt"))
post_trim_files = glob(os.path.join(fastqc_post_trim_dir,
"*/fastqc_data.txt"))
except:
print "ERROR"
exit()
# Parse files
samples = {}
for filename in files:
samples[filename] = parse_file(filename)
post_trim_samples = {}
for filename in post_trim_files:
post_trim_samples[filename] = parse_file(filename)
#---------------------------------------------
# Parse module results: pass/warn/fail
#---------------------------------------------
module_results = {}
for filename in samples:
sample_name = filename.split("/")[-2][:-7]
module_results[sample_name] = parse_summary(samples[filename],
extract_summary)
for filename in post_trim_samples:
sample_name = filename.split("/")[-2][:-22]
try:
module_results[sample_name] = module_results[sample_name] + \
parse_summary(post_trim_samples[filename], extract_summary)
except:
pass
# If no post-trim file, fill in empty cells with "-"
for sample_name in module_results:
if len(module_results[sample_name]) != 2 * len(extract_summary):
module_results[sample_name] = module_results[sample_name] + \
["-"] * len(extract_summary)
# Print table to stdout
module_sorted = module_results.items()
module_sorted.sort()
print "FastQC Summary"
print_list(module_sorted, extract_summary, 2)
# Output html table
try:
output = open(fastqc_summary_file,'w')
output.write(CSS)
output.write("<body>\n<h1>FastQC Summary</h1>\n")
table_fastqc(output, module_sorted)
output.write("</body>\n</html>")
output.close()
except:
print "ERROR. Could not create file %s." % fastqc_summary_file
#----------------------------------------------------
# Parse information from 'Basic Statistics' module
#----------------------------------------------------
basic_statistics_results = {}
for filename in samples:
sample_name = filename.split("/")[-2][:-7]
basic_statistics_results[sample_name] = extract_info(
samples[filename]["Basic Statistics"], extract_fastqc)
information_post_trim = {}
for filename in post_trim_samples:
sample_name = filename.split("/")[-2][:-22]
try:
basic_statistics_results[sample_name] = \
basic_statistics_results[sample_name] + \
extract_info(post_trim_samples[filename]\
["Basic Statistics"], extract_fastqc)
except:
pass
# If no post-trim file, fill in empty cells with "-"
for sample_name in basic_statistics_results:
if len(basic_statistics_results[sample_name]) != \
2 * len(extract_fastqc):
basic_statistics_results[sample_name] = \
basic_statistics_results[sample_name] + \
["-"] * len(extract_fastqc)
# Print table to stdout
basic_statistics_sorted = basic_statistics_results.items()
basic_statistics_sorted.sort()
print "FastQC Basic Statistics Summary"
print_list(basic_statistics_sorted, extract_fastqc, 2)
# Output html table
try:
output = open(basic_statistics_file,'w')
output.write(CSS)
output.write("<body>\n<h1>FastQC Basic Statistics Summary</h1>\n")
table_basic_statistics(output, basic_statistics_sorted)
output.write("</body>\n</html>")
output.close()
except:
print "ERROR. Could not create file %s." % basic_statistics_file
if __name__ == "__main__":
main()
```
|
{
"source": "jessica-dai/fairlearn",
"score": 2
}
|
#### File: fairlearn/metrics/_group_metric_set.py
```python
import sklearn.metrics as skm
from sklearn import preprocessing
from ._extra_metrics import (_balanced_root_mean_squared_error,
_mean_overprediction,
_mean_underprediction,
_root_mean_squared_error,
false_negative_rate,
false_positive_rate,
mean_prediction,
selection_rate,
true_negative_rate)
from ._metric_frame import MetricFrame
from ._input_manipulations import _convert_to_ndarray_and_squeeze
_Y_TRUE = 'trueY'
_Y_PRED = 'predictedY'
_PRECOMPUTED_METRICS = 'precomputedMetrics'
_GLOBAL = 'global'
_BINS = 'bins'
_PRECOMPUTED_BINS = 'precomputedFeatureBins'
_BIN_VECTOR = 'binVector'
_BIN_LABELS = 'binLabels'
_FEATURE_BIN_NAME = 'featureBinName'
_PREDICTION_TYPE = 'predictionType'
_PREDICTION_BINARY_CLASSIFICATION = 'binaryClassification'
_PREDICTION_REGRESSION = 'regression'
_MODEL_NAMES = 'modelNames'
_SCHEMA = 'schemaType'
_DASHBOARD_DICTIONARY = 'dashboardDictionary'
_VERSION = 'schemaVersion'
BINARY_CLASSIFICATION = 'binary_classification'
REGRESSION = 'regression'
_allowed_prediction_types = frozenset([BINARY_CLASSIFICATION, REGRESSION])
# The following keys need to match those of _metric_methods in
# _fairlearn_dashboard.py
# Issue 269 is about unifying the two sets
ACCURACY_SCORE_GROUP_SUMMARY = "accuracy_score"
BALANCED_ROOT_MEAN_SQUARED_ERROR_GROUP_SUMMARY = "balanced_root_mean_squared_error"
F1_SCORE_GROUP_SUMMARY = "f1_score"
FALLOUT_RATE_GROUP_SUMMARY = "fallout_rate"
LOG_LOSS_GROUP_SUMMARY = "log_loss"
MEAN_ABSOLUTE_ERROR_GROUP_SUMMARY = "mean_absolute_error"
MEAN_OVERPREDICTION_GROUP_SUMMARY = "overprediction"
MEAN_PREDICTION_GROUP_SUMMARY = "average"
MEAN_SQUARED_ERROR_GROUP_SUMMARY = "mean_squared_error"
MEAN_UNDERPREDICTION_GROUP_SUMMARY = "underprediction"
MISS_RATE_GROUP_SUMMARY = "miss_rate"
PRECISION_SCORE_GROUP_SUMMARY = "precision_score"
R2_SCORE_GROUP_SUMMARY = "r2_score"
RECALL_SCORE_GROUP_SUMMARY = "recall_score"
ROC_AUC_SCORE_GROUP_SUMMARY = "balanced_accuracy_score"
ROOT_MEAN_SQUARED_ERROR_GROUP_SUMMARY = "root_mean_squared_error"
SELECTION_RATE_GROUP_SUMMARY = "selection_rate"
SPECIFICITY_SCORE_GROUP_SUMMARY = "specificity_score"
ZERO_ONE_LOSS_GROUP_SUMMARY = "zero_one_loss"
BINARY_CLASSIFICATION_METRICS = {}
BINARY_CLASSIFICATION_METRICS[ACCURACY_SCORE_GROUP_SUMMARY] = skm.accuracy_score
BINARY_CLASSIFICATION_METRICS[FALLOUT_RATE_GROUP_SUMMARY] = false_positive_rate
BINARY_CLASSIFICATION_METRICS[F1_SCORE_GROUP_SUMMARY] = skm.f1_score
BINARY_CLASSIFICATION_METRICS[MEAN_OVERPREDICTION_GROUP_SUMMARY] = _mean_overprediction
BINARY_CLASSIFICATION_METRICS[MEAN_UNDERPREDICTION_GROUP_SUMMARY] = _mean_underprediction
BINARY_CLASSIFICATION_METRICS[MISS_RATE_GROUP_SUMMARY] = false_negative_rate
BINARY_CLASSIFICATION_METRICS[PRECISION_SCORE_GROUP_SUMMARY] = skm.precision_score
BINARY_CLASSIFICATION_METRICS[RECALL_SCORE_GROUP_SUMMARY] = skm.recall_score
BINARY_CLASSIFICATION_METRICS[ROC_AUC_SCORE_GROUP_SUMMARY] = skm.roc_auc_score
BINARY_CLASSIFICATION_METRICS[SELECTION_RATE_GROUP_SUMMARY] = selection_rate
BINARY_CLASSIFICATION_METRICS[SPECIFICITY_SCORE_GROUP_SUMMARY] = true_negative_rate
REGRESSION_METRICS = {}
REGRESSION_METRICS[BALANCED_ROOT_MEAN_SQUARED_ERROR_GROUP_SUMMARY] = _balanced_root_mean_squared_error # noqa: E501
REGRESSION_METRICS[LOG_LOSS_GROUP_SUMMARY] = skm.log_loss
REGRESSION_METRICS[MEAN_ABSOLUTE_ERROR_GROUP_SUMMARY] = skm.mean_absolute_error
REGRESSION_METRICS[MEAN_OVERPREDICTION_GROUP_SUMMARY] = _mean_overprediction
REGRESSION_METRICS[MEAN_UNDERPREDICTION_GROUP_SUMMARY] = _mean_underprediction
REGRESSION_METRICS[MEAN_PREDICTION_GROUP_SUMMARY] = mean_prediction
REGRESSION_METRICS[MEAN_SQUARED_ERROR_GROUP_SUMMARY] = skm.mean_squared_error
REGRESSION_METRICS[R2_SCORE_GROUP_SUMMARY] = skm.r2_score
REGRESSION_METRICS[ROOT_MEAN_SQUARED_ERROR_GROUP_SUMMARY] = _root_mean_squared_error
REGRESSION_METRICS[ZERO_ONE_LOSS_GROUP_SUMMARY] = skm.zero_one_loss
def _process_sensitive_features(sensitive_features):
"""Convert the dictionary into the required list."""
unsorted_features = []
for column_name, column in sensitive_features.items():
nxt = dict()
nxt[_FEATURE_BIN_NAME] = column_name
np_column = _convert_to_ndarray_and_squeeze(column)
le = preprocessing.LabelEncoder()
# Since these will likely be JSON serialised we
# need to make sure we have Python ints and not
# numpy ints
nxt[_BIN_VECTOR] = [int(x) for x in list(le.fit_transform(np_column))]
nxt[_BIN_LABELS] = [str(x) for x in le.classes_]
unsorted_features.append(nxt)
result = sorted(unsorted_features, key=lambda x: x[_FEATURE_BIN_NAME])
return result
def _process_predictions(predictions):
"""Convert the dictionary into two lists."""
names = []
preds = []
for model_name in sorted(predictions):
names.append(model_name)
y_p = _convert_to_ndarray_and_squeeze(predictions[model_name])
preds.append(y_p.tolist())
return names, preds
def _create_group_metric_set(y_true,
predictions,
sensitive_features,
prediction_type):
"""Create a dictionary matching the Dashboard's cache."""
result = dict()
result[_SCHEMA] = _DASHBOARD_DICTIONARY
result[_VERSION] = 0
if prediction_type not in _allowed_prediction_types:
msg_format = "prediction_type '{0}' not in {1}"
msg = msg_format.format(prediction_type, sorted(
list(_allowed_prediction_types)))
raise ValueError(msg)
function_dict = None
if prediction_type == BINARY_CLASSIFICATION:
result[_PREDICTION_TYPE] = _PREDICTION_BINARY_CLASSIFICATION
function_dict = BINARY_CLASSIFICATION_METRICS
elif prediction_type == REGRESSION:
result[_PREDICTION_TYPE] == _PREDICTION_REGRESSION
function_dict = REGRESSION_METRICS
else:
raise NotImplementedError(
"No support yet for {0}".format(prediction_type))
# Sort out y_true
_yt = _convert_to_ndarray_and_squeeze(y_true)
result[_Y_TRUE] = _yt.tolist()
# Sort out predictions
result[_MODEL_NAMES], result[_Y_PRED] = _process_predictions(predictions)
# Sort out the sensitive features
result[_PRECOMPUTED_BINS] = _process_sensitive_features(sensitive_features)
result[_PRECOMPUTED_METRICS] = []
for g in result[_PRECOMPUTED_BINS]:
by_prediction_list = []
for prediction in result[_Y_PRED]:
metric_dict = dict()
for metric_key, metric_func in function_dict.items():
gmr = MetricFrame(metric_func,
result[_Y_TRUE], prediction, sensitive_features=g[_BIN_VECTOR])
curr_dict = dict()
curr_dict[_GLOBAL] = gmr.overall
curr_dict[_BINS] = list(gmr.by_group)
metric_dict[metric_key] = curr_dict
by_prediction_list.append(metric_dict)
result[_PRECOMPUTED_METRICS].append(by_prediction_list)
return result
```
#### File: fairlearn/metrics/_metrics_engine.py
```python
import numpy as np
import sklearn.metrics as skm
from sklearn.utils import Bunch
from ._extra_metrics import (
true_positive_rate, true_negative_rate,
false_positive_rate, false_negative_rate,
_root_mean_squared_error, _balanced_root_mean_squared_error,
mean_prediction,
selection_rate,
_mean_overprediction,
_mean_underprediction,
)
from ._input_manipulations import _convert_to_ndarray_and_squeeze
_MESSAGE_SIZE_MISMATCH = "Array {0} is not the same size as {1}"
# Parameters to metrics that should be split according to sensitive features
_DEFAULT_INDEXED_PARAMS = {"sample_weight"}
def _group_summary(metric_function, y_true, y_pred, *,
sensitive_features,
indexed_params=None,
**metric_params):
r"""Apply a metric to each subgroup of a set of data.
:param metric_function: Function with signature
``metric_function(y_true, y_pred, \*\*metric_params)``
:param y_true: Array of ground-truth values
:param y_pred: Array of predicted values
:param sensitive_features: Array indicating the group to which each input value belongs
:param indexed_params: Names of ``metric_function`` parameters that
should be split according to ``sensitive_features`` in addition to ``y_true``
and ``y_pred``. Defaults to ``None`` corresponding to ``{"sample_weight"}``.
:param \*\*metric_params: Optional arguments to be passed to the ``metric_function``
:return: Object containing the result of applying ``metric_function`` to the entire dataset
and to each group identified in ``sensitive_features``
:rtype: :py:class:`sklearn.utils.Bunch` with the fields ``overall`` and ``by_group``
"""
_check_array_sizes(y_true, y_pred, 'y_true', 'y_pred')
_check_array_sizes(y_true, sensitive_features, 'y_true', 'sensitive_features')
# Make everything a numpy array
# This allows for fast slicing of the groups
y_t = _convert_to_ndarray_and_squeeze(y_true)
y_p = _convert_to_ndarray_and_squeeze(y_pred)
s_f = _convert_to_ndarray_and_squeeze(sensitive_features)
# Evaluate the overall metric with the numpy arrays
# This ensures consistency in how metric_function is called
checked_args = _check_metric_params(y_t, metric_params, indexed_params)
result_overall = metric_function(y_t, y_p, **checked_args)
groups = np.unique(s_f)
result_by_group = {}
for group in groups:
group_indices = (group == s_f)
result_by_group[group] = metric_function(
y_t[group_indices], y_p[group_indices],
**_check_metric_params(y_t, metric_params, indexed_params, group_indices))
return Bunch(overall=result_overall, by_group=result_by_group)
# This loosely follows the pattern of _check_fit_params in
# sklearn/utils/validation.py
def _check_metric_params(y_true, metric_params,
indexed_params=None, indices=None):
metric_params_validated = {}
if indexed_params is None:
indexed_params = _DEFAULT_INDEXED_PARAMS
for param_key, param_value in metric_params.items():
if (param_key in indexed_params and param_value is not None):
_check_array_sizes(y_true, param_value, 'y_true', param_key)
p_v = _convert_to_ndarray_and_squeeze(param_value)
if indices is not None:
p_v = p_v[indices]
metric_params_validated[param_key] = p_v
else:
metric_params_validated[param_key] = param_value
return metric_params_validated
def _function_name(func):
if hasattr(func, '__name__'):
return func.__name__
else:
return str(func)
class _MetricGroupSummaryCallable:
r"""Callable that calculates the group summary of a metric.
:param metric_function: A metric function with the signature
``metric_function(y_true, y_pred, **metric_params)``
:type metric_function: func
:param indexed_params: The names of parameters of ``metric_function`` that
should be split according to ``sensitive_features`` in addition to ``y_true``
and ``y_pred``. Defaults to ``None`` corresponding to ``['sample_weight']``.
"""
def __init__(self, metric_function, indexed_params=None, name=None):
self._metric_function = metric_function
self._indexed_params = indexed_params
if name is not None:
self.__name__ = name
def __repr__(self):
if self._indexed_params is None:
args_string = ""
else:
args_string = ", indexed_params={0}".format(self._indexed_params)
return "make_metric_group_summary({0}{1})".format(
_function_name(self._metric_function),
args_string)
def __call__(self, y_true, y_pred, *, sensitive_features, **metric_params):
return _group_summary(self._metric_function,
y_true, y_pred,
sensitive_features=sensitive_features,
indexed_params=self._indexed_params,
**metric_params)
class _DerivedMetricCallable:
"""Callable that calculates a derived metric.
:param transformation_function: A transformation function with the signature
``transformation_function(summary)`` which can consume the result
produced by ``summary_function`` (typically a ``Bunch`` with fields including
``overall`` and ``by_group``)
:type transformation_function: func
:param summary_function: A metric group summary function with the signature
``summary_function(y_true, y_pred, *, sensitive_features, **metric_params)``
:type summary_function: func
"""
def __init__(self, transformation_function, summary_function, name=None):
self._transformation_function = transformation_function
self._summary_function = summary_function
if name is not None:
self.__name__ = name
def __repr__(self):
return "make_derived_metric({0}, {1})".format(
_function_name(self._transformation_function),
_function_name(self._summary_function))
def __call__(self, y_true, y_pred, *, sensitive_features, **metric_params):
return self._transformation_function(self._summary_function(
y_true, y_pred,
sensitive_features=sensitive_features,
**metric_params))
def _make_metric_group_summary(metric_function, indexed_params=None, name=None):
"""Make a callable that calculates the group summary of a metric.
:param metric_function: A metric function with the signature
``metric_function(y_true, y_pred, **metric_params)``
:type metric_function: func
:param indexed_params: The names of parameters of ``metric_function`` that
should be split according to ``sensitive_features`` in addition to ``y_true``
and ``y_pred``. Defaults to ``None`` corresponding to ``['sample_weight']``.
:return: A callable object with the signature
``metric_group_summary(y_true, y_pred, *, sensitive_features, **metric_params)``
:rtype: func
"""
return _MetricGroupSummaryCallable(
metric_function, indexed_params=indexed_params, name=name)
def _make_derived_metric(transformation_function, summary_function, name=None):
"""Make a callable that calculates a derived metric from the group summary.
:param transformation_function: A transformation function with the signature
``transformation_function(summary)``
:type transformation_function: func
:param summary_function: A metric group summary function with the signature
``summary_function(y_true, y_pred, *, sensitive_features, **metric_params)``
:type summary_function: func
:return: A callable object with the signature
``derived_metric(y_true, y_pred, *, sensitive_features, **metric_params)``
:rtype: func
"""
return _DerivedMetricCallable(
transformation_function, summary_function, name=name)
def _difference_from_summary(summary):
"""Calculate the difference between the maximum and minimum metric value across groups.
:param summary: A group metric summary
:return: The difference between the maximum and the minimum group-level
metrics described in ``summary``.
:rtype: float
"""
return _group_max_from_summary(summary) - _group_min_from_summary(summary)
def _ratio_from_summary(summary):
"""Calculate the ratio between the maximum and minimum metric value across groups.
:param summary: A group metric summary
:return: The ratio between the maximum and the minimum group-level
metrics described in ``summary``.
:rtype: float
"""
group_min = _group_min_from_summary(summary)
group_max = _group_max_from_summary(summary)
if group_min < 0.0:
return np.nan
elif group_max == 0.0:
return 1.0
else:
return group_min / group_max
def _group_min_from_summary(summary):
"""Retrieve the minimum group-level metric value from group summary.
:param summary: A group metric summary
:return: The minimum group-level metric value across all groups in ``summary``.
:rtype: float
"""
return min(summary.by_group.values())
def _group_max_from_summary(summary):
"""Retrieve the minimum group-level metric value from group summary.
:param summary: A group metric summary
:return: The maximum group-level metric value across all groups in ``summary``.
:rtype: float
"""
return max(summary.by_group.values())
def _check_array_sizes(a, b, a_name, b_name):
if len(a) != len(b):
raise ValueError(_MESSAGE_SIZE_MISMATCH.format(b_name, a_name))
TRANSFORMATIONS = {
"difference": _difference_from_summary,
"ratio": _ratio_from_summary,
"group_min": _group_min_from_summary,
"group_max": _group_max_from_summary,
}
# Base metrics and the variants that are implemented by the metrics engine
METRICS_SPEC = [
# base metrics from _extra_metrics
(true_positive_rate, ["difference", "ratio"]),
(true_negative_rate, ["difference", "ratio"]),
(false_positive_rate, ["difference", "ratio"]),
(false_negative_rate, ["difference", "ratio"]),
(selection_rate, ["difference", "ratio"]),
(mean_prediction, []),
(_root_mean_squared_error, []),
(_balanced_root_mean_squared_error, []),
(_mean_overprediction, []),
(_mean_underprediction, []),
# base metrics from sklearn.metrics
(skm.confusion_matrix, []),
(skm.accuracy_score, ["difference", "ratio", "group_min"]),
(skm.zero_one_loss, ["difference", "ratio", "group_max"]),
(skm.balanced_accuracy_score, ["group_min"]),
(skm.precision_score, ["group_min"]),
(skm.recall_score, ["group_min"]),
(skm.roc_auc_score, ["group_min"]),
(skm.mean_absolute_error, ["group_max"]),
(skm.mean_squared_error, ["group_max"]),
(skm.r2_score, ["group_min"]),
(skm.f1_score, ["group_max"]),
(skm.log_loss, ["group_min"]),
]
def _derive_metrics(metrics_spec):
metric_group_summary_dict = {}
derived_metric_dict = {}
for base_metric, variants in metrics_spec:
metric_group_summary_name = "{0}_group_summary".format(base_metric.__name__)
metric_group_summary = _make_metric_group_summary(
base_metric,
name=metric_group_summary_name)
metric_group_summary_dict[metric_group_summary_name] = metric_group_summary
for variant in variants:
derived_metric_name = "{0}_{1}".format(base_metric.__name__, variant)
derived_metric = _make_derived_metric(
TRANSFORMATIONS[variant],
metric_group_summary,
name=derived_metric_name)
derived_metric_dict[derived_metric_name] = derived_metric
return metric_group_summary_dict, derived_metric_dict
_metric_group_summary_dict, _derived_metric_dict = _derive_metrics(METRICS_SPEC)
globals().update(_derived_metric_dict)
```
|
{
"source": "jessicadavies-intel/llvm",
"score": 2
}
|
#### File: models/inlining/config.py
```python
import tensorflow as tf
POLICY_DECISION_LABEL = 'inlining_decision'
POLICY_OUTPUT_SPEC = """
[
{
"logging_name": "inlining_decision",
"tensor_spec": {
"name": "StatefulPartitionedCall",
"port": 0,
"type": "int64_t",
"shape": [
1
]
}
}
]
"""
# pylint: disable=g-complex-comprehension
def get_input_signature():
"""Returns the list of features for LLVM inlining."""
# int64 features
inputs = [
tf.TensorSpec(dtype=tf.int64, shape=(), name=key) for key in [
'caller_basic_block_count', 'caller_conditionally_executed_blocks',
'caller_users', 'callee_basic_block_count',
'callee_conditionally_executed_blocks', 'callee_users',
'nr_ctant_params', 'node_count', 'edge_count', 'callsite_height',
'cost_estimate', 'inlining_default'
]
]
# float32 features
inputs.extend([
tf.TensorSpec(dtype=tf.float32, shape=(), name=key)
for key in ['discount', 'reward']
])
# int32 features
inputs.extend([
tf.TensorSpec(dtype=tf.int32, shape=(), name=key)
for key in ['step_type']
])
return inputs
def get_output_signature():
return POLICY_DECISION_LABEL
def get_output_spec():
return POLICY_OUTPUT_SPEC
```
#### File: opdsl/ops/core_named_ops.py
```python
from ..lang import *
T1 = TV.T1
T2 = TV.T2
Batch = S.Batch
@linalg_structured_op
def matmul(
A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
"""Performs a matrix multiplication of two 2D inputs.
Numeric casting is performed on the operands to the inner multiply, promoting
them to the same data type as the accumulator/output.
"""
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
@linalg_structured_op
def batch_matmul(
A=TensorDef(T1, Batch, S.M, S.K),
B=TensorDef(T2, Batch, S.K, S.N),
C=TensorDef(U, Batch, S.M, S.N, output=True)):
"""Performs a batched matrix multiplication of two 3D inputs.
Numeric casting is performed on the operands to the inner multiply, promoting
them to the same data type as the accumulator/output.
"""
implements(ContractionOpInterface)
C[D.b, D.m, D.n] += cast(U, A[D.b, D.m, D.k]) * cast(U, B[D.b, D.k, D.n])
@linalg_structured_op
def matvec(
A=TensorDef(T1, S.M, S.N),
y=TensorDef(T2, S.N),
x=TensorDef(U, S.M, output=True)):
"""Performs a matrix-vector multiplication.
Numeric casting is performed on the operands to the inner multiply, promoting
them to the same data type as the accumulator/output.
"""
implements(ContractionOpInterface)
x[D.m] += cast(U, A[D.m, D.n]) * cast(U, y[D.n])
@linalg_structured_op
def vecmat(
y=TensorDef(T1, S.M),
A=TensorDef(T2, S.M, S.N),
x=TensorDef(U, S.N, output=True)):
"""Performs a vector-matrix multiplication.
Numeric casting is performed on the operands to the inner multiply, promoting
them to the same data type as the accumulator/output.
"""
implements(ContractionOpInterface)
x[D.n] += cast(U, y[D.m]) * cast(U, A[D.m, D.n])
@linalg_structured_op
def dot(
A=TensorDef(T1, S.M), B=TensorDef(T2, S.M), C=TensorDef(U, output=True)):
"""Performs a dot product of two vectors to a scalar result.
Numeric casting is performed on the operands to the inner multiply, promoting
them to the same data type as the accumulator/output.
"""
implements(ContractionOpInterface)
C[None] += cast(U, A[D.m]) * cast(U, B[D.m])
@linalg_structured_op
def depthwise_conv_2d_input_nhwc_filter_hwc_poly(
I=TensorDef(T1, S.N, S.IH, S.IW, S.C),
K=TensorDef(T2, S.KH, S.KW, S.C),
O=TensorDef(U, S.N, S.OH, S.OW, S.C, output=True),
strides=AttributeDef(S.SH, S.SW),
dilations=AttributeDef(S.DH, S.DW)):
"""Performs depth-wise 2-D convolution.
Numeric casting is performed on the operands to the inner multiply, promoting
them to the same data type as the accumulator/output.
"""
O[D.n, D.oh, D.ow, D.c] += cast(
U, I[D.n, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW,
D.c]) * cast(U, K[D.kh, D.kw, D.c])
@linalg_structured_op
def pooling_nhwc_sum_poly(
I=TensorDef(T1, S.N, S.H, S.W, S.C),
K=TensorDef(T2, S.KH, S.KW, index_dims=[D.kh, D.kw]),
O=TensorDef(U, S.N, S.OH, S.OW, S.C, output=True),
strides=AttributeDef(S.SH, S.SW),
dilations=AttributeDef(S.DH, S.DW)):
"""Performs sum pooling.
Numeric casting is performed on the input operand, promoting it to the same
data type as the accumulator/output.
"""
O[D.n, D.oh, D.ow, D.c] += cast(
U, I[D.n, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW, D.c])
@linalg_structured_op
def fill_rng_2d(
min=ScalarDef(F64),
max=ScalarDef(F64),
seed=ScalarDef(I32),
O=TensorDef(T, S.M, S.N, output=True)):
"""Fills the output tensor with pseudo random numbers.
The operation generations pseudo random numbers using a linear congruential
generator. It provides no guarantees regarding the distribution of the
generated random numbers. Instead of generating the random numbers
sequentially, it instantiates one random number generator per data element
and runs them in parallel. The seed operand and the indices of the data
element seed the random number generation. The min and max operands limit
the range of the generated random numbers.
"""
multiplier = cast(I32, const(1103515245))
increment = cast(I32, const(12345))
rand1 = (cast(I32, index(D.m)) + seed) * multiplier + increment
rand2 = (cast(I32, index(D.n)) + rand1) * multiplier + increment
inv_range = cast(F64, const(2.3283064e-10))
offset = cast(F64, const(2147483647))
scaling = (max - min) * inv_range
O[D.m, D.n] = cast(T, (offset + cast(F64, rand2)) * scaling + min)
```
|
{
"source": "jessicadelrio/HandyHouse",
"score": 2
}
|
#### File: admin/models/buttons.py
```python
import os
def A_button(*a, **b):
b['_data-role'] = 'button'
b['_data-inline'] = 'true'
return A(*a, **b)
def button(href, label):
if is_mobile:
ret = A_button(SPAN(label), _href=href)
else:
ret = A(SPAN(label), _class='button btn', _href=href)
return ret
def button_enable(href, app):
if os.path.exists(os.path.join(apath(app, r=request), 'DISABLED')):
label = SPAN(T('Enable'), _style='color:red')
else:
label = SPAN(T('Disable'), _style='color:green')
id = 'enable_' + app
return A(label, _class='button btn', _id=id, callback=href, target=id)
def sp_button(href, label):
if request.user_agent().get('is_mobile'):
ret = A_button(SPAN(label), _href=href)
else:
ret = A(SPAN(label), _class='button special btn btn-inverse', _href=href)
return ret
def helpicon():
return IMG(_src=URL('static', 'images/help.png'), _alt='help')
def searchbox(elementid):
return SPAN(LABEL(IMG(_id="search_start", _src=URL('static', 'images/search.png'), _alt=T('filter')),
_class='icon', _for=elementid), ' ',
INPUT(_id=elementid, _type='text', _size=12, _class="input-medium"),
_class="searchbox")
```
#### File: contrib/login_methods/basic_auth.py
```python
from gluon._compat import urlopen
from gluon._compat import urllib2
import base64
def basic_auth(server="http://127.0.0.1"):
"""
to use basic login with a different server
from gluon.contrib.login_methods.basic_auth import basic_auth
auth.settings.login_methods.append(basic_auth('http://server'))
"""
def basic_login_aux(username,
password,
server=server):
key = base64.b64encode(username + ':' + password)
headers = {'Authorization': 'Basic ' + key}
request = urllib2.Request(server, None, headers)
try:
urlopen(request)
return True
except (urllib2.URLError, urllib2.HTTPError):
return False
return basic_login_aux
```
#### File: contrib/login_methods/loginradius_account.py
```python
import os
from gluon import *
from gluon.storage import Storage
from gluon.tools import fetch
import json
class LoginRadiusAccount(object):
"""
from gluon.contrib.login_methods.loginradius_account import LoginRadiusAccount
auth.settings.actions_disabled=['register','change_password',
'request_reset_password']
auth.settings.login_form = LoginRadiusAccount(request,
api_key="...",
api_secret="...",
url = "http://localhost:8000/%s/default/user/login" % request.application)
"""
def __init__(self, request, api_key="", api_secret="",
url="", on_login_failure=None):
self.request = request
self.api_key = api_key
self.api_secret = api_secret
self.url = url
self.auth_base_url = "https://hub.loginradius.com/UserProfile.ashx/"
self.profile = None
self.on_login_failure = on_login_failure
self.mappings = Storage()
def defaultmapping(profile):
first_name = profile.get('FirstName')
last_name = profile.get('LastName')
email = profile.get('Email', [{}])[0].get('Value')
reg_id = profile.get('ID', '')
username = profile.get('ProfileName', email)
return dict(registration_id=reg_id, username=username, email=email,
first_name=first_name, last_name=last_name)
self.mappings.default = defaultmapping
def get_user(self):
request = self.request
user = None
if request.vars.token:
try:
auth_url = self.auth_base_url + self.api_secret + "/" + request.vars.token
json_data = fetch(auth_url, headers={'User-Agent': "LoginRadius - Python - SDK"})
self.profile = json.loads(json_data)
provider = self.profile['Provider']
mapping = self.mappings.get(provider, self.mappings['default'])
user = mapping(self.profile)
except (ValueError, KeyError):
pass
if user is None and self.on_login_failure:
redirect(self.on_login_failure)
return user
def login_form(self):
loginradius_url = "https://hub.loginradius.com/include/js/LoginRadius.js"
loginradius_lib = SCRIPT(_src=loginradius_url, _type='text/javascript')
container = DIV(_id="interfacecontainerdiv", _class='interfacecontainerdiv')
widget = SCRIPT("""var options={}; options.login=true;
LoginRadius_SocialLogin.util.ready(function () {
$ui = LoginRadius_SocialLogin.lr_login_settings;
$ui.interfacesize = "";$ui.apikey = "%s";
$ui.callback="%s"; $ui.lrinterfacecontainer ="interfacecontainerdiv";
LoginRadius_SocialLogin.init(options); });""" % (self.api_key, self.url))
form = DIV(container, loginradius_lib, widget)
return form
def use_loginradius(auth, filename='private/loginradius.key', **kwargs):
path = os.path.join(current.request.folder, filename)
if os.path.exists(path):
request = current.request
domain, public_key, private_key = open(path, 'r').read().strip().split(':')
url = URL('default', 'user', args='login', scheme=True)
auth.settings.actions_disabled = \
['register', 'change_password', 'request_reset_password']
auth.settings.login_form = LoginRadiusAccount(
request, api_key=public_key, api_secret=private_key,
url=url, **kwargs)
```
#### File: contrib/markdown/__init__.py
```python
from .markdown2 import *
from gluon.html import XML
def WIKI(text, encoding="utf8", safe_mode='escape', html4tags=False, **attributes):
if not text:
test = ''
if 'extras' in attributes:
extras = attributes['extras']
del attributes['extras']
else:
extras=None
text = text.decode(encoding,'replace')
return XML(markdown(text,extras=extras,
safe_mode=safe_mode, html4tags=html4tags)\
.encode(encoding,'xmlcharrefreplace'),**attributes)
```
#### File: contrib/minify/jsmin.py
```python
r"""
=====================
Javascript Minifier
=====================
Javascript Minifier based on `jsmin.c by <NAME>`_\.
This module is a re-implementation based on the semantics of jsmin.c. Usually
it produces the same results. It differs in the following ways:
- there is no error detection: unterminated string, regex and comment
literals are treated as regular javascript code and minified as such.
- Control characters inside string and regex literals are left untouched; they
are not converted to spaces (nor to \n)
- Newline characters are not allowed inside string and regex literals, except
for line continuations in string literals (ECMA-5).
- "return /regex/" is recognized correctly.
- rjsmin does not handle streams, but only complete strings. (However, the
module provides a "streamy" interface).
Besides the list above it differs from direct python ports of jsmin.c in
speed. Since most parts of the logic are handled by the regex engine it's way
faster than the original python port by <NAME>. The speed factor varies
between about 6 and 55 depending on input and python version (it gets faster
the more compressed the input already is). Compared to the speed-refactored
python port by <NAME> the performance gain is less dramatic but still
between 1.2 and 7. See the docs/BENCHMARKS file for details.
rjsmin.c is a reimplementation of rjsmin.py in C and speeds it up even more.
Both python 2 and python 3 are supported.
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
Original author of Python version: <NAME>
Home page: http://opensource.perlig.de/rjsmin/
Modified by <NAME> <<EMAIL>> for inclusion into web2py.
"""
__author__ = "<NAME>"
__author__ = getattr(__author__, 'decode', lambda x: __author__)('latin-1')
__docformat__ = "restructuredtext en"
__license__ = "Apache License, Version 2.0"
__version__ = '1.0.2'
__all__ = ['jsmin', 'jsmin_for_posers']
import re as _re
def _make_jsmin(extended=True, python_only=True):
"""
Generate JS minifier based on `jsmin.c by Douglas Crockford`_
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Parameters:
`extended` : ``bool``
Extended Regexps? (using lookahead and lookbehind). This is faster,
because it can be optimized way more. The regexps used with `extended`
being false are only left here to allow easier porting to platforms
without extended regex features (and for my own reference...)
`python_only` : ``bool``
Use only the python variant. If true, the c extension is not even
tried to be loaded.
:Return: Minifier
:Rtype: ``callable``
"""
# pylint: disable = R0912, R0914, W0612
if not python_only:
try:
import _rjsmin
except ImportError:
pass
else:
return _rjsmin.jsmin
try:
xrange
except NameError:
xrange = range # pylint: disable = W0622
space_chars = r'[\000-\011\013\014\016-\040]'
line_comment = r'(?://[^\r\n]*)'
space_comment = r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)'
string1 = \
r'(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^\047\\\r\n]*)*\047)'
string2 = r'(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^"\\\r\n]*)*")'
strings = r'(?:%s|%s)' % (string1, string2)
charclass = r'(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\])'
nospecial = r'[^/\\\[\r\n]'
if extended:
regex = r'(?:/(?![\r\n/*])%s*(?:(?:\\[^\r\n]|%s)%s*)*/)' % (
nospecial, charclass, nospecial
)
else:
regex = (
r'(?:/(?:[^*/\\\r\n\[]|%s|\\[^\r\n])%s*(?:(?:\\[^\r\n]|%s)%s*)*/)'
)
regex = regex % (charclass, nospecial, charclass, nospecial)
space = r'(?:%s|%s)' % (space_chars, space_comment)
newline = r'(?:%s?[\r\n])' % line_comment
def fix_charclass(result):
""" Fixup string of chars to fit into a regex char class """
pos = result.find('-')
if pos >= 0:
result = r'%s%s-' % (result[:pos], result[pos + 1:])
def sequentize(string):
"""
Notate consecutive characters as sequence
(1-4 instead of 1234)
"""
first, last, result = None, None, []
for char in map(ord, string):
if last is None:
first = last = char
elif last + 1 == char:
last = char
else:
result.append((first, last))
first = last = char
if last is not None:
result.append((first, last))
return ''.join(['%s%s%s' % (
chr(first),
last > first + 1 and '-' or '',
last != first and chr(last) or ''
) for first, last in result])
return _re.sub(r'([\000-\040\047])', # for better portability
lambda m: '\\%03o' % ord(m.group(1)), (sequentize(result)
.replace('\\', '\\\\')
.replace('[', '\\[')
.replace(']', '\\]')
)
)
def id_literal_(what):
""" Make id_literal like char class """
match = _re.compile(what).match
result = ''.join([
chr(c) for c in xrange(127) if not match(chr(c))
])
return '[^%s]' % fix_charclass(result)
def not_id_literal_(keep):
""" Make negated id_literal like char class """
match = _re.compile(id_literal_(keep)).match
result = ''.join([
chr(c) for c in xrange(127) if not match(chr(c))
])
return r'[%s]' % fix_charclass(result)
not_id_literal = not_id_literal_(r'[a-zA-Z0-9_$]')
preregex1 = r'[(,=:\[!&|?{};\r\n]'
preregex2 = r'%(not_id_literal)sreturn' % locals()
if extended:
id_literal = id_literal_(r'[a-zA-Z0-9_$]')
id_literal_open = id_literal_(r'[a-zA-Z0-9_${\[(+-]')
id_literal_close = id_literal_(r'[a-zA-Z0-9_$}\])"\047+-]')
space_sub = _re.compile((
r'([^\047"/\000-\040]+)'
r'|(%(strings)s[^\047"/\000-\040]*)'
r'|(?:(?<=%(preregex1)s)%(space)s*(%(regex)s[^\047"/\000-\040]*))'
r'|(?:(?<=%(preregex2)s)%(space)s*(%(regex)s[^\047"/\000-\040]*))'
r'|(?<=%(id_literal_close)s)'
r'%(space)s*(?:(%(newline)s)%(space)s*)+'
r'(?=%(id_literal_open)s)'
r'|(?<=%(id_literal)s)(%(space)s)+(?=%(id_literal)s)'
r'|%(space)s+'
r'|(?:%(newline)s%(space)s*)+'
) % locals()).sub
def space_subber(match):
""" Substitution callback """
# pylint: disable = C0321, R0911
groups = match.groups()
if groups[0]:
return groups[0]
elif groups[1]:
return groups[1]
elif groups[2]:
return groups[2]
elif groups[3]:
return groups[3]
elif groups[4]:
return '\n'
elif groups[5]:
return ' '
else:
return ''
def jsmin(script): # pylint: disable = W0621
r"""
Minify javascript based on `jsmin.c by <NAME>`_\.
Instead of parsing the stream char by char, it uses a regular
expression approach which minifies the whole script with one big
substitution regex.
.. _jsmin.c by <NAME>:
http://www.crockford.com/javascript/jsmin.c
:Parameters:
`script` : ``str``
Script to minify
:Return: Minified script
:Rtype: ``str``
"""
return space_sub(space_subber, '\n%s\n' % script).strip()
else:
pre_regex = r'(?:%(preregex1)s|%(preregex2)s)' % locals()
not_id_literal_open = not_id_literal_(r'[a-zA-Z0-9_${\[(+-]')
not_id_literal_close = not_id_literal_(r'[a-zA-Z0-9_$}\])"\047+-]')
space_norm_sub = _re.compile((
r'(%(strings)s)'
r'|(?:(%(pre_regex)s)%(space)s*(%(regex)s))'
r'|(%(space)s)+'
r'|(?:(%(newline)s)%(space)s*)+'
) % locals()).sub
def space_norm_subber(match):
""" Substitution callback """
# pylint: disable = C0321
groups = match.groups()
if groups[0]:
return groups[0]
elif groups[1]:
return groups[1].replace('\r', '\n') + groups[2]
elif groups[3]:
return ' '
elif groups[4]:
return '\n'
space_sub1 = _re.compile((
r'[\040\n]?(%(strings)s|%(pre_regex)s%(regex)s)'
r'|\040(%(not_id_literal)s)'
r'|\n(%(not_id_literal_open)s)'
) % locals()).sub
def space_subber1(match):
""" Substitution callback """
groups = match.groups()
return groups[0] or groups[1] or groups[2]
space_sub2 = _re.compile((
r'(%(strings)s)\040?'
r'|(%(pre_regex)s%(regex)s)[\040\n]?'
r'|(%(not_id_literal)s)\040'
r'|(%(not_id_literal_close)s)\n'
) % locals()).sub
def space_subber2(match):
""" Substitution callback """
groups = match.groups()
return groups[0] or groups[1] or groups[2] or groups[3]
def jsmin(script):
r"""
Minify javascript based on `jsmin.c by <NAME>`_\.
Instead of parsing the stream char by char, it uses a regular
expression approach. The script is minified with three passes:
normalization
Control character are mapped to spaces, spaces and newlines
are squeezed and comments are stripped.
space removal 1
Spaces before certain tokens are removed
space removal 2
Spaces after certain tokens are remove
.. _jsmin.c by <NAME>:
http://www.crockford.com/javascript/jsmin.c
:Parameters:
`script` : ``str``
Script to minify
:Return: Minified script
:Rtype: ``str``
"""
return space_sub2(space_subber2,
space_sub1(space_subber1,
space_norm_sub(space_norm_subber,
'\n%s\n' % script)
)
).strip()
return jsmin
jsmin = _make_jsmin()
#####################
# EXAMPLE USAGE #
#####################
#
# import jsmin
# jsmin.jsmin(script)
#
def jsmin_for_posers(script):
r"""
Minify javascript based on `jsmin.c by Douglas Crockford`_\.
Instead of parsing the stream char by char, it uses a regular
expression approach which minifies the whole script with one big
substitution regex.
.. _jsmin.c by <NAME>:
http://www.crockford.com/javascript/jsmin.c
:Warning: This function is the digest of a _make_jsmin() call. It just
utilizes the resulting regex. It's just for fun here and may
vanish any time. Use the `jsmin` function instead.
:Parameters:
`script` : ``str``
Script to minify
:Return: Minified script
:Rtype: ``str``
"""
def subber(match):
""" Substitution callback """
groups = match.groups()
return (
groups[0] or
groups[1] or
groups[2] or
groups[3] or
(groups[4] and '\n') or
(groups[5] and ' ') or
''
)
return _re.sub(
r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?'
r'\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|'
r'\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|(?:(?<=[(,=:\[!&|?{};\r\n]'
r')(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/'
r'))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*'
r'(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*'
r'))|(?:(?<=[\000-#%-,./:-@\[-^`{-~-]return)(?:[\000-\011\013\014\01'
r'6-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*((?:/(?![\r\n/*])[^/'
r'\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]'
r'*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*))|(?<=[^\000-!#%&(*,./'
r':-@\[\\^`{|~])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/'
r'*][^*]*\*+)*/))*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\01'
r'4\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-#%-\04'
r'7)*,./:-@\\-^`|-~])|(?<=[^\000-#%-,./:-@\[-^`{-~-])((?:[\000-\011'
r'\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=[^\000-'
r'#%-,./:-@\[-^`{-~-])|(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*'
r'+(?:[^/*][^*]*\*+)*/))+|(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011'
r'\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+',
subber, '\n%s\n' % script
).strip()
if __name__ == '__main__':
import sys as _sys
_sys.stdout.write(jsmin(_sys.stdin.read()))
```
#### File: contrib/pysimplesoap/transport.py
```python
import logging
import ssl
import sys
try:
import urllib2
from cookielib import CookieJar
except ImportError:
from urllib import request as urllib2
from http.cookiejar import CookieJar
from . import __author__, __copyright__, __license__, __version__, TIMEOUT
from .simplexml import SimpleXMLElement, TYPE_MAP, Struct
log = logging.getLogger(__name__)
#
# Socket wrapper to enable socket.TCP_NODELAY - this greatly speeds up transactions in Linux
# WARNING: this will modify the standard library socket module, use with care!
# TODO: implement this as a transport faciliy
# (to pass options directly to httplib2 or pycurl)
# be aware of metaclasses and socks.py (SocksiPy) used by httplib2
if False:
import socket
realsocket = socket.socket
def socketwrap(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0):
sockobj = realsocket(family, type, proto)
if type == socket.SOCK_STREAM:
sockobj.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return sockobj
socket.socket = socketwrap
#
# We store metadata about what available transport mechanisms we have available.
#
_http_connectors = {} # libname: classimpl mapping
_http_facilities = {} # functionalitylabel: [sequence of libname] mapping
class TransportBase:
@classmethod
def supports_feature(cls, feature_name):
return cls._wrapper_name in _http_facilities[feature_name]
#
# httplib2 support.
#
try:
import httplib2
if sys.version > '3' and httplib2.__version__ <= "0.7.7":
import http.client
# httplib2 workaround: check_hostname needs a SSL context with either
# CERT_OPTIONAL or CERT_REQUIRED
# see https://code.google.com/p/httplib2/issues/detail?id=173
orig__init__ = http.client.HTTPSConnection.__init__
def fixer(self, host, port, key_file, cert_file, timeout, context,
check_hostname, *args, **kwargs):
chk = kwargs.get('disable_ssl_certificate_validation', True) ^ True
orig__init__(self, host, port=port, key_file=key_file,
cert_file=cert_file, timeout=timeout, context=context,
check_hostname=chk)
http.client.HTTPSConnection.__init__ = fixer
except ImportError:
TIMEOUT = None # timeout not supported by urllib2
pass
else:
class Httplib2Transport(httplib2.Http, TransportBase):
_wrapper_version = "httplib2 %s" % httplib2.__version__
_wrapper_name = 'httplib2'
def __init__(self, timeout, proxy=None, cacert=None, sessions=False):
# httplib2.debuglevel=4
kwargs = {}
if proxy:
import socks
kwargs['proxy_info'] = httplib2.ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, **proxy)
log.info("using proxy %s" % proxy)
# set optional parameters according to supported httplib2 version
if httplib2.__version__ >= '0.3.0':
kwargs['timeout'] = timeout
if httplib2.__version__ >= '0.7.0':
kwargs['disable_ssl_certificate_validation'] = cacert is None
kwargs['ca_certs'] = cacert
httplib2.Http.__init__(self, **kwargs)
_http_connectors['httplib2'] = Httplib2Transport
_http_facilities.setdefault('proxy', []).append('httplib2')
_http_facilities.setdefault('cacert', []).append('httplib2')
import inspect
if 'timeout' in inspect.getargspec(httplib2.Http.__init__)[0]:
_http_facilities.setdefault('timeout', []).append('httplib2')
#
# urllib2 support.
#
class urllib2Transport(TransportBase):
_wrapper_version = "urllib2 %s" % urllib2.__version__
_wrapper_name = 'urllib2'
def __init__(self, timeout=None, proxy=None, cacert=None, sessions=False):
if (timeout is not None) and not self.supports_feature('timeout'):
raise RuntimeError('timeout is not supported with urllib2 transport')
if proxy:
raise RuntimeError('proxy is not supported with urllib2 transport')
if cacert:
raise RuntimeError('cacert is not support with urllib2 transport')
handlers = []
if ((sys.version_info[0] == 2 and sys.version_info >= (2,7,9)) or
(sys.version_info[0] == 3 and sys.version_info >= (3,2,0))):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
handlers.append(urllib2.HTTPSHandler(context=context))
if sessions:
handlers.append(urllib2.HTTPCookieProcessor(CookieJar()))
opener = urllib2.build_opener(*handlers)
self.request_opener = opener.open
self._timeout = timeout
def request(self, url, method="GET", body=None, headers={}):
req = urllib2.Request(url, body, headers)
try:
f = self.request_opener(req, timeout=self._timeout)
return f.info(), f.read()
except urllib2.HTTPError as f:
if f.code != 500:
raise
return f.info(), f.read()
_http_connectors['urllib2'] = urllib2Transport
_http_facilities.setdefault('sessions', []).append('urllib2')
if sys.version_info >= (2, 6):
_http_facilities.setdefault('timeout', []).append('urllib2')
#
# pycurl support.
# experimental: pycurl seems faster + better proxy support (NTLM) + ssl features
#
try:
import pycurl
except ImportError:
pass
else:
try:
from cStringIO import StringIO
except ImportError:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class pycurlTransport(TransportBase):
_wrapper_version = pycurl.version
_wrapper_name = 'pycurl'
def __init__(self, timeout, proxy=None, cacert=None, sessions=False):
self.timeout = timeout
self.proxy = proxy or {}
self.cacert = cacert
def request(self, url, method, body, headers):
c = pycurl.Curl()
c.setopt(pycurl.URL, url)
if 'proxy_host' in self.proxy:
c.setopt(pycurl.PROXY, self.proxy['proxy_host'])
if 'proxy_port' in self.proxy:
c.setopt(pycurl.PROXYPORT, self.proxy['proxy_port'])
if 'proxy_user' in self.proxy:
c.setopt(pycurl.PROXYUSERPWD, "%(proxy_user)s:%(proxy_pass)s" % self.proxy)
self.buf = StringIO()
c.setopt(pycurl.WRITEFUNCTION, self.buf.write)
#c.setopt(pycurl.READFUNCTION, self.read)
#self.body = StringIO(body)
#c.setopt(pycurl.HEADERFUNCTION, self.header)
if self.cacert:
c.setopt(c.CAINFO, self.cacert)
c.setopt(pycurl.SSL_VERIFYPEER, self.cacert and 1 or 0)
c.setopt(pycurl.SSL_VERIFYHOST, self.cacert and 2 or 0)
c.setopt(pycurl.CONNECTTIMEOUT, self.timeout)
c.setopt(pycurl.TIMEOUT, self.timeout)
if method == 'POST':
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.POSTFIELDS, body)
if headers:
hdrs = ['%s: %s' % (k, v) for k, v in headers.items()]
log.debug(hdrs)
c.setopt(pycurl.HTTPHEADER, hdrs)
c.perform()
c.close()
return {}, self.buf.getvalue()
_http_connectors['pycurl'] = pycurlTransport
_http_facilities.setdefault('proxy', []).append('pycurl')
_http_facilities.setdefault('cacert', []).append('pycurl')
_http_facilities.setdefault('timeout', []).append('pycurl')
class DummyTransport:
"""Testing class to load a xml response"""
def __init__(self, xml_response):
self.xml_response = xml_response
def request(self, location, method, body, headers):
log.debug("%s %s", method, location)
log.debug(headers)
log.debug(body)
return {}, self.xml_response
def get_http_wrapper(library=None, features=[]):
# If we are asked for a specific library, return it.
if library is not None:
try:
return _http_connectors[library]
except KeyError:
raise RuntimeError('%s transport is not available' % (library,))
# If we haven't been asked for a specific feature either, then just return our favourite
# implementation.
if not features:
return _http_connectors.get('httplib2', _http_connectors['urllib2'])
# If we are asked for a connector which supports the given features, then we will
# try that.
current_candidates = _http_connectors.keys()
new_candidates = []
for feature in features:
for candidate in current_candidates:
if candidate in _http_facilities.get(feature, []):
new_candidates.append(candidate)
current_candidates = new_candidates
new_candidates = []
# Return the first candidate in the list.
try:
candidate_name = current_candidates[0]
except IndexError:
raise RuntimeError("no transport available which supports these features: %s" % (features,))
else:
return _http_connectors[candidate_name]
def set_http_wrapper(library=None, features=[]):
"""Set a suitable HTTP connection wrapper."""
global Http
Http = get_http_wrapper(library, features)
return Http
def get_Http():
"""Return current transport class"""
global Http
return Http
# define the default HTTP connection class (it can be changed at runtime!):
set_http_wrapper()
```
#### File: pydal/dialects/ingres.py
```python
from .._compat import basestring
from ..adapters.ingres import Ingres, IngresUnicode
from .base import SQLDialect
from . import dialects, sqltype_for
@dialects.register_for(Ingres)
class IngresDialect(SQLDialect):
SEQNAME = 'ii***lineitemsequence'
@sqltype_for('text')
def type_text(self):
return 'CLOB'
@sqltype_for('integer')
def type_integer(self):
return 'INTEGER4'
@sqltype_for('bigint')
def type_bigint(self):
return 'BIGINT'
@sqltype_for('double')
def type_float(self):
return 'FLOAT8'
@sqltype_for('date')
def type_date(self):
return 'ANSIDATE'
@sqltype_for('time')
def type_time(self):
return 'TIME WITHOUT TIME ZONE'
@sqltype_for('datetime')
def type_datetime(self):
return 'TIMESTAMP WITHOUT TIME ZONE'
@sqltype_for('id')
def type_id(self):
return 'int not null unique with default next value for %s' % \
self.INGRES_SEQNAME
@sqltype_for('big-id')
def type_big_id(self):
return 'bigint not null unique with default next value for %s' % \
self.INGRES_SEQNAME
@sqltype_for('reference')
def type_reference(self):
return 'INT, FOREIGN KEY (%(field_name)s) REFERENCES ' + \
'%(foreign_key)s ON DELETE %(on_delete_action)s'
@sqltype_for('big-reference')
def type_big_reference(self):
return 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES ' + \
'%(foreign_key)s ON DELETE %(on_delete_action)s'
@sqltype_for('reference FK')
def type_reference_fk(self):
return ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY ' + \
'(%(field_name)s) REFERENCES %(foreign_key)s ' + \
'ON DELETE %(on_delete_action)s'
@sqltype_for('reference TFK')
def type_reference_tfk(self):
return ' CONSTRAINT FK_%(constraint_name)s_PK FOREIGN KEY ' + \
'(%(field_name)s) REFERENCES %(foreign_table)s' + \
'(%(foreign_key)s) ON DELETE %(on_delete_action)s'
def left_join(self, val, query_env={}):
# Left join must always have an ON clause
if not isinstance(val, basestring):
val = self.expand(val, query_env=query_env)
return 'LEFT OUTER JOIN %s' % val
@property
def random(self):
return 'RANDOM()'
def select(self, fields, tables, where=None, groupby=None, having=None,
orderby=None, limitby=None, distinct=False, for_update=False):
dst, whr, grp, order, limit, offset, upd = '', '', '', '', '', '', ''
if distinct is True:
dst = ' DISTINCT'
elif distinct:
dst = ' DISTINCT ON (%s)' % distinct
if where:
whr = ' %s' % self.where(where)
if groupby:
grp = ' GROUP BY %s' % groupby
if having:
grp += ' HAVING %s' % having
if orderby:
order = ' ORDER BY %s' % orderby
if limitby:
(lmin, lmax) = limitby
fetch_amt = lmax - lmin
if fetch_amt:
limit = ' FIRST %i' % fetch_amt
if lmin:
offset = ' OFFSET %i' % lmin
if for_update:
upd = ' FOR UPDATE'
return 'SELECT%s%S %s FROM %s%s%s%s%s%s;' % (
dst, limit, fields, tables, whr, grp, order, offset, upd)
@dialects.register_for(IngresUnicode)
class IngresUnicodeDialect(IngresDialect):
@sqltype_for('string')
def type_string(self):
return 'NVARCHAR(%(length)s)'
@sqltype_for('text')
def type_text(self):
return 'NCLOB'
```
#### File: pydal/dialects/__init__.py
```python
from .._compat import with_metaclass, iteritems
from .._gae import gae
from .._load import OrderedDict
from ..helpers._internals import Dispatcher
from ..objects import Expression
dialects = Dispatcher("dialect")
class sqltype_for(object):
_inst_count_ = 0
def __init__(self, key):
self.key = key
self._inst_count_ = sqltype_for._inst_count_
sqltype_for._inst_count_ += 1
def __call__(self, f):
self.f = f
return self
class register_expression(object):
_inst_count_ = 0
def __init__(self, name):
self.name = name
self._inst_count_ = register_expression._inst_count_
register_expression._inst_count_ += 1
def __call__(self, f):
self.f = f
return self
class ExpressionMethodWrapper(object):
def __init__(self, dialect, obj):
self.dialect = dialect
self.obj = obj
def __call__(self, expression, *args, **kwargs):
return self.obj.f(self.dialect, expression, *args, **kwargs)
class MetaDialect(type):
def __new__(cls, name, bases, attrs):
new_class = type.__new__(cls, name, bases, attrs)
if bases == (object,):
return new_class
#: collect declared attributes
sqltypes = []
expressions = []
for key, value in list(attrs.items()):
if isinstance(value, sqltype_for):
sqltypes.append((key, value))
if isinstance(value, register_expression):
expressions.append((key, value))
sqltypes.sort(key=lambda x: x[1]._inst_count_)
expressions.sort(key=lambda x: x[1]._inst_count_)
declared_sqltypes = OrderedDict()
declared_expressions = OrderedDict()
for key, val in sqltypes:
declared_sqltypes[key] = val
new_class._declared_sqltypes_ = declared_sqltypes
for key, val in expressions:
declared_expressions[key] = val
new_class._declared_expressions_ = declared_expressions
#: get super declared attributes
all_sqltypes = OrderedDict()
all_expressions = OrderedDict()
for base in reversed(new_class.__mro__[1:]):
if hasattr(base, '_declared_sqltypes_'):
all_sqltypes.update(base._declared_sqltypes_)
if hasattr(base, '_declared_expressions_'):
all_expressions.update(base._declared_expressions_)
#: set re-constructed attributes
all_sqltypes.update(declared_sqltypes)
all_expressions.update(declared_expressions)
new_class._all_sqltypes_ = all_sqltypes
new_class._all_expressions_ = all_expressions
return new_class
class Dialect(with_metaclass(MetaDialect)):
def __init__(self, adapter):
self.adapter = adapter
self.types = {}
for name, obj in iteritems(self._all_sqltypes_):
self.types[obj.key] = obj.f(self)
for name, obj in iteritems(self._all_expressions_):
Expression._dialect_expressions_[obj.name] = \
ExpressionMethodWrapper(self, obj)
def expand(self, *args, **kwargs):
return self.adapter.expand(*args, **kwargs)
from .base import SQLDialect
from .sqlite import SQLiteDialect, SpatialiteDialect
from .postgre import PostgreDialect
from .mysql import MySQLDialect
from .mssql import MSSQLDialect
from .mongo import MongoDialect
from .db2 import DB2Dialect
from .firebird import FireBirdDialect
from .informix import InformixDialect
from .ingres import IngresDialect
from .oracle import OracleDialect
from .sap import SAPDBDialect
from .teradata import TeradataDialect
from .couchdb import CouchDBDialect
if gae is not None:
from .google import GoogleDatastoreDialect
```
#### File: pydal/parsers/base.py
```python
import json
from base64 import b64decode
from datetime import datetime, date, time, timedelta
from decimal import Decimal
from .._compat import PY2, integer_types, basestring, to_bytes, to_native
from ..adapters.base import SQLAdapter
from ..helpers.classes import Reference
from ..helpers.methods import bar_decode_string, bar_decode_integer
from . import Parser, parsers, for_type, before_parse
long = integer_types[-1]
class BasicParser(Parser):
@for_type('id')
def _id(self, value):
return long(value)
@for_type('integer')
def _integer(self, value):
return long(value)
@for_type('float')
def _float(self, value):
return float(value)
@for_type('double')
def _double(self, value):
return self.registered['float'](value, 'double')
@for_type('boolean')
def _boolean(self, value):
return value == self.dialect.true or str(value)[:1].lower() == 't'
@for_type('blob')
def _blob(self, value):
decoded = b64decode(to_bytes(value))
try:
decoded = to_native(decoded)
except:
pass
return decoded
@before_parse('reference')
def reference_extras(self, field_type):
return {'referee': field_type[10:].strip()}
@for_type('reference')
def _reference(self, value, referee):
if '.' not in referee:
value = Reference(value)
value._table, value._record = self.adapter.db[referee], None
return value
@before_parse('list:reference')
def referencelist_extras(self, field_type):
return {'field_type': field_type}
@for_type('list:reference')
def _list_references(self, value, field_type):
return [self.registered['reference'](
el, field_type[5:]) for el in value]
@for_type('bigint')
def _bigint(self, value):
return self.registered['integer'](value, 'bigint')
class DateParser(Parser):
@for_type('date')
def _date(self, value):
if isinstance(value, datetime):
return value.date()
(y, m, d) = map(int, str(value)[:10].strip().split('-'))
return date(y, m, d)
class TimeParser(Parser):
@for_type('time')
def _time(self, value):
if isinstance(value, datetime):
return value.time()
time_items = list(map(int, str(value)[:8].strip().split(':')[:3]))
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
return time(h, mi, s)
class DateTimeParser(Parser):
@for_type('datetime')
def _datetime(self, value):
value = str(value)
date_part, time_part, timezone = value[:10], value[11:19], value[19:]
if '+' in timezone:
ms, tz = timezone.split('+')
h, m = tz.split(':')
dt = timedelta(seconds=3600 * int(h) + 60 * int(m))
elif '-' in timezone:
ms, tz = timezone.split('-')
h, m = tz.split(':')
dt = -timedelta(seconds=3600 * int(h) + 60 * int(m))
else:
ms = timezone.upper().split('Z')[0]
dt = None
(y, m, d) = map(int, date_part.split('-'))
time_parts = time_part and time_part.split(':')[:3] or (0, 0, 0)
while len(time_parts) < 3:
time_parts.append(0)
time_items = map(int, time_parts)
(h, mi, s) = time_items
if ms and ms[0] == '.':
ms = int(float('0' + ms) * 1000000)
else:
ms = 0
value = datetime(y, m, d, h, mi, s, ms)
if dt:
value = value + dt
return value
class DecimalParser(Parser):
@for_type('decimal')
def _decimal(self, value):
return Decimal(value)
class JSONParser(Parser):
@for_type('json')
def _json(self, value):
#if 'loads' not in self.driver_auto_json:
if not isinstance(value, basestring):
raise RuntimeError('json data not a string')
if PY2 and isinstance(value, unicode):
value = value.encode('utf-8')
return json.loads(value)
class ListsParser(BasicParser):
@for_type('list:integer')
def _list_integers(self, value):
return bar_decode_integer(value)
@for_type('list:string')
def _list_strings(self, value):
return bar_decode_string(value)
@for_type('list:reference')
def _list_references(self, value, field_type):
value = bar_decode_integer(value)
return [self.registered['reference'](
el, field_type[5:]) for el in value]
@parsers.register_for(SQLAdapter)
class Commonparser(
ListsParser, DateParser, TimeParser, DateTimeParser, DecimalParser,
JSONParser
):
pass
```
#### File: HandyHouse/scripts/extract_mssql_models.py
```python
_author__ = "<NAME> <<EMAIL>>"
HELP = """
USAGE: extract_mssql_models db host port user passwd
Call with SQL Server database connection parameters,
web2py model will be printed on standard output.
EXAMPLE: python extract_mssql_models.py mydb localhost 3306 kflanaga pass
or
python extract_mssql_models.py mydb localhost 3306 kflanaga pass > db_model.py
"""
# Config options
DEBUG = False # print debug messages to STDERR
SCHEMA = 'dbo'
COMMAND_LINE_MODE = True # running from command prompt. Disable to specify variables and use in IDE
# Only specify values below if not running from command line
DB = None
HOST = None
USER = None
PASSWD = None
PORT = None
# Constant for Field keyword parameter order (and filter):
KWARGS = ('type', 'length', 'default', 'required', 'ondelete',
'notnull', 'unique', 'label', 'comment', 'rname')
import sys
import re
# This is from pydal/helpers/regex.py as of 2016-06-16
# Use this to recognize if a field name need to have an rname representation
REGEX_VALID_TB_FLD = re.compile(r'^[^\d_][_0-9a-zA-Z]*\Z')
# For replacing invalid characters in field names
INVALID_CHARS = re.compile(r'[^a-zA-Z0-9_]')
def get_valid_column_name(field):
"""Return a valid column name that follows Python's rules for identifiers, which is what web2py requires for column
names. Replaces invalid characters with underscores and leading digits with their associated English word."""
if not REGEX_VALID_TB_FLD.match(field):
# If the first character is a digit, replace it with its word counterpart
if re.match(r'^[0-9]', field):
numbers = ['Zero', 'One', 'Two', 'Three', 'Four',
'Five', 'Six', 'Seven', 'Eight', 'Nine']
field = numbers[int(field[0])] + field[1:]
field = INVALID_CHARS.sub('_', field)
return field
def query(conn, sql, *args):
"Execute a SQL query and return rows as a list of dicts"
cur = conn.cursor()
ret = []
try:
if DEBUG: print >> sys.stderr, "QUERY: ", sql % args
cur.execute(sql % args)
for row in cur:
dic = {}
for i, value in enumerate(row):
field = cur.description[i][0]
dic[field] = value
if DEBUG: print >> sys.stderr, "RET: ", dic
ret.append(dic)
return ret
finally:
cur.close()
def get_tables(conn, schema=SCHEMA):
"List table names in a given schema"
rows = query(conn, """SELECT table_name FROM information_schema.tables
WHERE table_schema = '%s'
ORDER BY table_name""", schema)
return [row['table_name'] for row in rows]
def get_fields(conn, table):
"Retrieve field list for a given table"
if DEBUG: print >> sys.stderr, "Processing TABLE", table
rows = query(conn, """
SELECT column_name, data_type,
is_nullable,
character_maximum_length,
numeric_precision, numeric_precision_radix, numeric_scale,
column_default
FROM information_schema.columns
WHERE table_name='%s'
ORDER BY ordinal_position""", table)
return rows
def define_field(conn, table, field, pks):
"Determine field type, default value, references, etc."
f = {}
ref = references(conn, table, field['column_name'])
if ref:
f.update(ref)
elif field['column_default'] and \
field['column_default'].startswith("nextval") and \
field['column_name'] in pks:
f['type'] = "'id'"
elif field['data_type'].startswith('character'):
f['type'] = "'string'"
if field['character_maximum_length']:
f['length'] = field['character_maximum_length']
elif field['data_type'] in ('text', 'ntext'):
f['type'] = "'text'"
elif field['data_type'] in ('boolean', 'bit'):
f['type'] = "'boolean'"
elif field['data_type'] in ('tinyint', 'smallint', 'bigint', 'int'):
f['type'] = "'integer'"
elif field['data_type'] in ('real', 'float'):
f['type'] = "'double'"
elif field['data_type'] in ('datetime', 'datetime2', 'smalldatetime'):
f['type'] = "'datetime'"
elif field['data_type'] in ('timestamp',):
f['type'] = "'datetime'"
f['default'] = "request.now"
f['update'] = "request.now"
elif field['data_type'] in ('date',):
f['type'] = "'date'"
elif field['data_type'] in ('time',):
f['type'] = "'time'"
elif field['data_type'] in ('numeric', 'money', 'smallmoney', 'decimal'):
f['type'] = "'decimal'"
f['precision'] = field['numeric_precision']
f['scale'] = field['numeric_scale'] or 0
elif field['data_type'] in ('binary', 'varbinary', 'image'):
f['type'] = "'blob'"
elif field['data_type'] in ('point', 'lseg', 'polygon', 'unknown', 'USER-DEFINED', 'sql_variant'):
f['type'] = "" # unsupported?
elif field['data_type'] in ('varchar', 'char', 'nchar', 'nvarchar', 'uniqueidentifer'):
f['type'] = "'string'"
else:
raise RuntimeError("Data Type not supported: %s " % str(field))
try:
if field['column_default']:
if field['column_default'] == "now()":
d = "request.now"
elif field['column_default'] == "true":
d = "True"
elif field['column_default'] == "false":
d = "False"
else:
d = repr(eval(field['column_default']))
f['default'] = str(d)
except (ValueError, SyntaxError):
pass
except Exception, e:
raise RuntimeError("Default unsupported '%s'" % field['column_default'])
if not field['is_nullable']:
f['notnull'] = "True"
# For field names that are not valid python identifiers, we need to add a reference to their actual name
# in the back end database
if not REGEX_VALID_TB_FLD.match(field['column_name']):
f['rname'] = "'[%s]'" % field['column_name']
return f
def is_unique(conn, table, field):
"Find unique columns (incomplete support)"
rows = query(conn, """
SELECT c.column_name
FROM information_schema.table_constraints t
INNER JOIN information_schema.constraint_column_usage c
ON (t.CONSTRAINT_CATALOG = c.CONSTRAINT_CATALOG
AND t.CONSTRAINT_NAME = c.CONSTRAINT_NAME
AND t.CONSTRAINT_SCHEMA = c.CONSTRAINT_SCHEMA
AND t.TABLE_CATALOG = c.TABLE_CATALOG
AND t.TABLE_NAME = c.TABLE_NAME
AND t.TABLE_SCHEMA = c.TABLE_SCHEMA)
WHERE t.table_name='%s'
AND c.column_name='%s'
AND t.constraint_type='UNIQUE'
;""", table, field['column_name'])
return rows and True or False
def primarykeys(conn, table):
"Find primary keys"
rows = query(conn, """
SELECT c.column_name
FROM information_schema.table_constraints t
INNER JOIN information_schema.constraint_column_usage c
ON (t.CONSTRAINT_CATALOG = c.CONSTRAINT_CATALOG
AND t.CONSTRAINT_NAME = c.CONSTRAINT_NAME
AND t.CONSTRAINT_SCHEMA = c.CONSTRAINT_SCHEMA
AND t.TABLE_CATALOG = c.TABLE_CATALOG
AND t.TABLE_NAME = c.TABLE_NAME
AND t.TABLE_SCHEMA = c.TABLE_SCHEMA)
WHERE t.table_name='%s'
AND t.constraint_type='PRIMARY KEY'
;""", table)
return [row['column_name'] for row in rows]
def references(conn, table, field):
"Find a FK (fails if multiple)"
rows1 = query(conn, """
SELECT k.table_name, k.column_name, k.constraint_name,
r.update_rule, r.delete_rule, k.ordinal_position
FROM information_schema.key_column_usage k
INNER JOIN information_schema.referential_constraints r
ON (k.CONSTRAINT_CATALOG = r.CONSTRAINT_CATALOG
AND k.CONSTRAINT_NAME = r.CONSTRAINT_NAME
AND k.CONSTRAINT_SCHEMA = r.CONSTRAINT_SCHEMA)
INNER JOIN information_schema.table_constraints t
ON (r.CONSTRAINT_CATALOG = t.CONSTRAINT_CATALOG
AND r.CONSTRAINT_NAME = t.CONSTRAINT_NAME
AND r.CONSTRAINT_SCHEMA = t.CONSTRAINT_SCHEMA)
WHERE k.table_name='%s'
AND k.column_name='%s'
AND t.constraint_type='FOREIGN KEY'
;""", table, field)
if len(rows1) == 1:
rows2 = query(conn, """
SELECT table_name, column_name, *
FROM information_schema.constraint_column_usage
WHERE constraint_name='%s'
""", rows1[0]['constraint_name'])
row = None
if len(rows2) > 1:
row = rows2[int(rows1[0]['ordinal_position']) - 1]
keyed = True
if len(rows2) == 1:
row = rows2[0]
keyed = False
if row:
if keyed: # THIS IS BAD, DON'T MIX "id" and primarykey!!!
ref = {'type': "'reference %s.%s'" % (row['table_name'],
row['column_name'])}
else:
ref = {'type': "'reference %s'" % (row['table_name'],)}
if rows1[0]['delete_rule'] != "NO ACTION":
ref['ondelete'] = repr(rows1[0]['delete_rule'])
return ref
elif rows2:
raise RuntimeError("Unsupported foreign key reference: %s" %
str(rows2))
elif rows1:
raise RuntimeError("Unsupported referential constraint: %s" %
str(rows1))
def define_table(conn, table):
"Output single table definition"
fields = get_fields(conn, table)
pks = primarykeys(conn, table)
print "db.define_table('%s'," % (table,)
for field in fields:
fname = field['column_name']
fdef = define_field(conn, table, field, pks)
if fname not in pks and is_unique(conn, table, field):
fdef['unique'] = "True"
if fdef['type'] == "'id'" and fname in pks:
pks.pop(pks.index(fname))
print " Field('%s', %s)," % (get_valid_column_name(fname),
', '.join(["%s=%s" % (k, fdef[k]) for k in KWARGS
if k in fdef and fdef[k]]))
if pks:
print " primarykey=[%s]," % ", ".join(["'%s'" % pk for pk in pks])
print " migrate=migrate)"
print
def define_db(conn, db, host, port, user, passwd):
"Output database definition (model)"
dal = 'db = DAL("mssql4://%s:%s@%s:%s/%s", pool_size=10, decode_credentials=True)'
print dal % (
user.replace('@', '%40').replace(':', '%3A'), passwd.replace('@', '%40').replace(':', '%3A'), host, port, db)
print
print "migrate = False"
print
for table in get_tables(conn):
define_table(conn, table)
if __name__ == "__main__":
# Parse arguments from command line:
if len(sys.argv) < 6 and COMMAND_LINE_MODE:
print HELP
else:
# Parse arguments from command line:
if COMMAND_LINE_MODE:
db, host, port, user, passwd = sys.argv[1:6]
else:
db = DB
host = HOST
user = USER
passwd = <PASSWORD>
port = PORT
# Make the database connection (change driver if required)
import pyodbc
# cnn = pyodbc.connect(database=db, host=host, port=port,
# user=user, password=<PASSWORD>,
# )
cnn = pyodbc.connect(
r'DRIVER={{SQL Server Native Client 11.0}};SERVER={server};PORT={port};DATABASE={db};UID={user};PWD={passwd}'.format(
server=host, port=port, db=db, user=user, passwd=<PASSWORD>)
)
# Start model code generation:
define_db(cnn, db, host, port, user, passwd)
```
#### File: HandyHouse/scripts/extract_oracle_models.py
```python
_author__ = "<NAME> <<EMAIL>>"
HELP = """
USAGE: extract_oracle_models db host port user passwd
Call with Oracle database connection parameters,
web2py model will be printed on standard output.
EXAMPLE: python extract_oracle_models.py ORCL localhost 1521 user password
"""
# Config options
DEBUG = False # print debug messages to STDERR
# Constant for Field keyword parameter order (and filter):
KWARGS = ('type', 'length', 'default', 'required', 'ondelete',
'notnull', 'unique', 'label', 'comment')
import sys
def query(conn, sql, *args):
"Execute a SQL query and return rows as a list of dicts"
cur = conn.cursor()
ret = []
try:
if DEBUG:
print >> sys.stderr, "QUERY: ", sql , args
cur.execute(sql, args)
for row in cur:
dic = {}
for i, value in enumerate(row):
field = cur.description[i][0]
dic[field] = value
if DEBUG:
print >> sys.stderr, "RET: ", dic
ret.append(dic)
return ret
except cx_Oracle.DatabaseError, exc:
error, = exc.args
print >> sys.stderr, "Oracle-Error-Message:", error.message
finally:
cur.close()
def get_tables(conn):
"List table names in a given schema"
rows = query(conn, """SELECT TABLE_NAME FROM USER_TABLES
ORDER BY TABLE_NAME""")
return [row['TABLE_NAME'] for row in rows]
def get_fields(conn, table):
"Retrieve field list for a given table"
if DEBUG:
print >> sys.stderr, "Processing TABLE", table
rows = query(conn, """
SELECT COLUMN_NAME, DATA_TYPE,
NULLABLE AS IS_NULLABLE,
CHAR_LENGTH AS CHARACTER_MAXIMUM_LENGTH,
DATA_PRECISION AS NUMERIC_PRECISION,
DATA_SCALE AS NUMERIC_SCALE,
DATA_DEFAULT AS COLUMN_DEFAULT
FROM USER_TAB_COLUMNS
WHERE TABLE_NAME=:t
""", table)
return rows
def define_field(conn, table, field, pks):
"Determine field type, default value, references, etc."
f = {}
ref = references(conn, table, field['COLUMN_NAME'])
# Foreign Keys
if ref:
f.update(ref)
# PK & Numeric & autoincrement => id
elif field['COLUMN_NAME'] in pks and \
field['DATA_TYPE'] in ('INT', 'NUMBER') and \
is_autoincrement(conn, table, field):
f['type'] = "'id'"
# Other data types
elif field['DATA_TYPE'] in ('BINARY_DOUBLE'):
f['type'] = "'double'"
elif field['DATA_TYPE'] in ('CHAR','NCHAR'):
f['type'] = "'string'"
f['comment'] = "'Alternative types: boolean, time'"
elif field['DATA_TYPE'] in ('BLOB', 'CLOB'):
f['type'] = "'blob'"
f['comment'] = "'Alternative types: text, json, list:*'"
elif field['DATA_TYPE'] in ('DATE'):
f['type'] = "'datetime'"
f['comment'] = "'Alternative types: date'"
elif field['DATA_TYPE'] in ('FLOAT'):
f['type'] = "'float'"
elif field['DATA_TYPE'] in ('INT'):
f['type'] = "'integer'"
elif field['DATA_TYPE'] in ('NUMBER'):
f['type'] = "'bigint'"
elif field['DATA_TYPE'] in ('NUMERIC'):
f['type'] = "'decimal'"
f['precision'] = field['NUMERIC_PRECISION']
f['scale'] = field['NUMERIC_SCALE'] or 0
elif field['DATA_TYPE'] in ('VARCHAR2','NVARCHAR2'):
f['type'] = "'string'"
if field['CHARACTER_MAXIMUM_LENGTH']:
f['length'] = field['CHARACTER_MAXIMUM_LENGTH']
f['comment'] = "'Other possible types: password, upload'"
else:
f['type'] = "'blob'"
f['comment'] = "'WARNING: Oracle Data Type %s was not mapped." % \
str(field['DATA_TYPE']) + " Using 'blob' as fallback.'"
try:
if field['COLUMN_DEFAULT']:
if field['COLUMN_DEFAULT'] == "sysdate":
d = "request.now"
elif field['COLUMN_DEFAULT'].upper() == "T":
d = "True"
elif field['COLUMN_DEFAULT'].upper() == "F":
d = "False"
else:
d = repr(eval(field['COLUMN_DEFAULT']))
f['default'] = str(d)
except (ValueError, SyntaxError):
pass
except Exception, e:
raise RuntimeError(
"Default unsupported '%s'" % field['COLUMN_DEFAULT'])
if not field['IS_NULLABLE']:
f['notnull'] = "True"
return f
def is_unique(conn, table, field):
"Find unique columns"
rows = query(conn, """
SELECT COLS.COLUMN_NAME
FROM USER_CONSTRAINTS CONS, ALL_CONS_COLUMNS COLS
WHERE CONS.OWNER = COLS.OWNER
AND CONS.CONSTRAINT_NAME = COLS.CONSTRAINT_NAME
AND CONS.CONSTRAINT_TYPE = 'U'
AND COLS.TABLE_NAME = :t
AND COLS.COLUMN_NAME = :c
""", table, field['COLUMN_NAME'])
return rows and True or False
# Returns True when a "BEFORE EACH ROW INSERT" trigger is found and:
# a) it mentions the "NEXTVAL" keyword (used by sequences)
# b) it operates on the given table and column
#
# On some (inelegant) database designs, SEQUENCE.NEXTVAL is called directly
# from each "insert" statement, instead of using triggers. Such cases cannot
# be detected by inspecting Oracle's metadata tables, as sequences are not
# logically bound to any specific table or field.
def is_autoincrement(conn, table, field):
"Find auto increment fields (best effort)"
rows = query(conn, """
SELECT TRIGGER_NAME
FROM USER_TRIGGERS,
(SELECT NAME, LISTAGG(TEXT, ' ') WITHIN GROUP (ORDER BY LINE) TEXT
FROM USER_SOURCE
WHERE TYPE = 'TRIGGER'
GROUP BY NAME
) TRIGGER_DEFINITION
WHERE TRIGGER_NAME = NAME
AND TRIGGERING_EVENT = 'INSERT'
AND TRIGGER_TYPE = 'BEFORE EACH ROW'
AND TABLE_NAME = :t
AND UPPER(TEXT) LIKE UPPER('%.NEXTVAL%')
AND UPPER(TEXT) LIKE UPPER('%:NEW.' || :c || '%')
""", table, field['COLUMN_NAME'])
return rows and True or False
def primarykeys(conn, table):
"Find primary keys"
rows = query(conn, """
SELECT COLS.COLUMN_NAME
FROM USER_CONSTRAINTS CONS, ALL_CONS_COLUMNS COLS
WHERE COLS.TABLE_NAME = :t
AND CONS.CONSTRAINT_TYPE = 'P'
AND CONS.OWNER = COLS.OWNER
AND CONS.CONSTRAINT_NAME = COLS.CONSTRAINT_NAME
""", table)
return [row['COLUMN_NAME'] for row in rows]
def references(conn, table, field):
"Find a FK (fails if multiple)"
rows1 = query(conn, """
SELECT COLS.CONSTRAINT_NAME,
CONS.DELETE_RULE,
COLS.POSITION AS ORDINAL_POSITION
FROM USER_CONSTRAINTS CONS, ALL_CONS_COLUMNS COLS
WHERE COLS.TABLE_NAME = :t
AND COLS.COLUMN_NAME = :c
AND CONS.CONSTRAINT_TYPE = 'R'
AND CONS.OWNER = COLS.OWNER
AND CONS.CONSTRAINT_NAME = COLS.CONSTRAINT_NAME
""", table, field)
if len(rows1) == 1:
rows2 = query(conn, """
SELECT COLS.TABLE_NAME, COLS.COLUMN_NAME
FROM USER_CONSTRAINTS CONS, ALL_CONS_COLUMNS COLS
WHERE CONS.CONSTRAINT_NAME = :k
AND CONS.R_CONSTRAINT_NAME = COLS.CONSTRAINT_NAME
ORDER BY COLS.POSITION ASC
""", rows1[0]['CONSTRAINT_NAME'])
row = None
if len(rows2) > 1:
row = rows2[int(rows1[0]['ORDINAL_POSITION']) - 1]
keyed = True
if len(rows2) == 1:
row = rows2[0]
keyed = False
if row:
if keyed: # THIS IS BAD, DON'T MIX "id" and primarykey!!!
ref = {'type': "'reference %s.%s'" % (row['TABLE_NAME'],
row['COLUMN_NAME'])}
else:
ref = {'type': "'reference %s'" % (row['TABLE_NAME'],)}
if rows1[0]['DELETE_RULE'] != "NO ACTION":
ref['ondelete'] = repr(rows1[0]['DELETE_RULE'])
return ref
elif rows2:
raise RuntimeError("Unsupported foreign key reference: %s" %
str(rows2))
elif rows1:
raise RuntimeError("Unsupported referential constraint: %s" %
str(rows1))
def define_table(conn, table):
"Output single table definition"
fields = get_fields(conn, table)
pks = primarykeys(conn, table)
print "db.define_table('%s'," % (table, )
for field in fields:
fname = field['COLUMN_NAME']
fdef = define_field(conn, table, field, pks)
if fname not in pks and is_unique(conn, table, field):
fdef['unique'] = "True"
if fdef['type'] == "'id'" and fname in pks:
pks.pop(pks.index(fname))
print " Field('%s', %s)," % (fname,
', '.join(["%s=%s" % (k, fdef[k]) for k in KWARGS
if k in fdef and fdef[k]]))
if pks:
print " primarykey=[%s]," % ", ".join(["'%s'" % pk for pk in pks])
print " migrate=migrate)"
print
def define_db(conn, db, host, port, user, passwd):
"Output database definition (model)"
dal = 'db = DAL("oracle://%s/%s@%s:%s/%s", pool_size=10)'
print dal % (user, passwd, host, port, db)
print
print "migrate = False"
print
for table in get_tables(conn):
define_table(conn, table)
if __name__ == "__main__":
if len(sys.argv) < 6:
print HELP
else:
# Parse arguments from command line:
db, host, port, user, passwd = sys.argv[1:6]
# Make the database connection (change driver if required)
import cx_Oracle
dsn = cx_Oracle.makedsn(host, port, db)
cnn = cx_Oracle.connect(user, passwd, dsn)
# Start model code generation:
define_db(cnn, db, host, port, user, passwd)
```
|
{
"source": "jessica-dl/2XB3-ML-Training",
"score": 3
}
|
#### File: 2XB3-ML-Training/trainer/mnist_dataset.py
```python
import numpy as np
import keras
from keras.datasets import mnist
class Dataset:
def __init__(self, path, local):
"""
Initialize the MNIST dataset.
Parameters path and local are only included to fit the interface of Dataset
:param path: Ignored
:param local: Ignored
"""
(x, y), (_, _) = mnist.load_data()
# Configure input
x = (x.astype(np.float32) - 127.5) / 127.5
x = np.expand_dims(x, axis=3)
x_padding = np.zeros((x.shape[0], 64, 64, 1)) - 1
x_padding[:, :28, :28, :] = x
x = x_padding
y = keras.utils.np_utils.to_categorical(y, 10)
self.x = x
self.y = y
print('Loaded dataset')
print('X:', self.x.shape)
print('Y:', self.y.shape)
```
#### File: 2XB3-ML-Training/trainer/trainer.py
```python
import argparse
from trainer.dataset import Dataset
from trainer.models.cgan_model import CGANModel
def main(job_dir, dataset_path=None, local=False, generator_weights=None, discriminator_weights=None, encoder_weights=None):
dataset = Dataset(dataset_path, local)
aging_model = CGANModel(job_dir + "/", local, generator_weights, discriminator_weights, encoder_weights)
aging_model.train_gpu(dataset, job_dir + "/logs/tensorboard")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_path",
help="Path to dataset")
parser.add_argument("--job-dir",
help="GCS relative location within bucket to write checkpoints and export models",
default="out")
parser.add_argument("--local",
help="True if training should be run locally",
type=bool)
parser.add_argument("--generator_weights",
help="Path to generator weights")
parser.add_argument("--discriminator_weights",
help="Path to discriminator weights")
parser.add_argument("--encoder_weights",
help="Path to encoder weights")
arguments = parser.parse_args()
arguments = arguments.__dict__
main(**arguments)
```
|
{
"source": "jessicaengel451/BiblioPixelAnimations",
"score": 4
}
|
#### File: BiblioPixelAnimations/base/Twinkle.py
```python
from bibliopixel.colors import COLORS
from bibliopixel.colors.arithmetic import color_scale
import random
# Base class to be used by any display type
class TwinkleBase:
def __init__(self, layout, colors=[COLORS.Red, COLORS.Green, COLORS.Blue],
density=20, speed=2, max_bright=255):
self.layout = layout
self.colors = colors
self.density = density
self.speed = speed
self.max_bright = max_bright
# Make sure speed, density & max_bright are in sane ranges
self.speed = min(self.speed, 100)
self.speed = max(self.speed, 2)
self.density = min(self.density, 100)
self.density = max(self.density, 2)
self.max_bright = min(self.max_bright, 255)
self.max_bright = max(self.max_bright, 5)
def pre_run(self):
self._step = 0
# direction, color, level
self.pixels = [(0, COLORS.Off, 0)] * self.layout.numLEDs
def pick_led(self, speed):
idx = random.randrange(0, self.layout.numLEDs)
p_dir, p_color, p_level = self.pixels[idx]
if random.randrange(0, 100) < self.density:
if p_dir == 0: # 0 is off
p_level += speed
p_dir = 1 # 1 is growing
p_color = random.choice(self.colors)
self.layout._set_base(idx, color_scale(p_color, p_level))
self.pixels[idx] = p_dir, p_color, p_level
def step(self, amt=1):
self.layout.all_off()
self.pick_led(self.speed)
for i, val in enumerate(self.pixels):
p_dir, p_color, p_level = val
if p_dir == 1:
p_level += self.speed
if p_level > 255:
p_level = 255
p_dir = 2 # start dimming
self.layout._set_base(i, color_scale(p_color, p_level))
elif p_dir == 2:
p_level -= self.speed
if p_level < 0:
p_level = 0
p_dir = 0 # turn off
self.layout._set_base(i, color_scale(p_color, p_level))
self.pixels[i] = (p_dir, p_color, p_level)
self._step += amt
```
#### File: BiblioPixelAnimations/circle/arc_clock.py
```python
import time
from bibliopixel.animation.circle import Circle
from bibliopixel.colors import COLORS
class ArcClock(Circle):
def __init__(self, layout, **kwds):
super().__init__(layout, **kwds)
self.hands = [
{
'rings': [0, 1],
'color': COLORS.Red,
'segments': 60,
'key': 'tm_sec'
},
{
'rings': [2, 3],
'color': COLORS.Green,
'segments': 60,
'key': 'tm_min'
},
{
'rings': [4, 5],
'color': COLORS.Blue,
'segments': 12,
'key': 'tm_hour'
}
]
def step(self, amt=1):
self.layout.all_off()
t = time.localtime()
for h in self.hands:
segs = h['segments']
end = (360 / segs) * (getattr(t, h['key']) % segs)
if end:
for i in h['rings']:
self.layout.fillRing(
i, h['color'], startAngle=0, endAngle=end)
```
#### File: BiblioPixelAnimations/circle/circle_clock.py
```python
import time
from bibliopixel.animation.circle import Circle
from bibliopixel.colors import COLORS
class CircleClock(Circle):
def __init__(self, layout, **kwds):
super().__init__(layout, **kwds)
last = self.lastRing
self.hands = [
{
'radius': last - 0,
'color': COLORS.Red,
'segments': 60,
'key': 'tm_sec'
},
{
'radius': last - 2,
'color': COLORS.Green,
'segments': 60,
'key': 'tm_min'
},
{
'radius': last - 4,
'color': COLORS.Blue,
'segments': 12,
'key': 'tm_hour'
}
]
def step(self, amt=1):
self.layout.all_off()
t = time.localtime()
for h in self.hands:
segs = h['segments']
angle = (360 / segs) * (getattr(t, h['key']) % h['segments'])
self.layout.drawRadius(angle, h['color'], endRing=h['radius'])
```
#### File: BiblioPixelAnimations/circle/fireflies.py
```python
import random
from bibliopixel.animation.circle import Circle
from bibliopixel.colors import COLORS
class FireFlies(Circle):
COLOR_DEFAULTS = ('colors', [COLORS.Red, COLORS.Green, COLORS.Blue]),
def __init__(self, layout, count=10, **kwds):
super().__init__(layout, **kwds)
self._count = count
def pre_run(self):
self._step = 0
def step(self, amt=1):
amt = 1 # anything other than 1 would be just plain silly
if self._step > self.layout.numLEDs:
self._step = 0
self.layout.all_off()
for i in range(self._count):
pixel = random.randint(0, self.layout.numLEDs - 1)
color = random.choice(self.palette)
self.layout._set_base(pixel, color)
self._step += amt
```
#### File: BiblioPixelAnimations/circle/swirl.py
```python
from bibliopixel.animation.circle import Circle
from bibliopixel.colors import palettes
class Swirl(Circle):
COLOR_DEFAULTS = ('palette', palettes.get('three_sixty')),
def __init__(self, layout, angle=12, **kwds):
super().__init__(layout, **kwds)
self.angle = angle
def pre_run(self):
self._step = 0
def step(self, amt=1):
for a in range(0, 360, self.angle):
c = self.palette(self._step)
for i in range(self.ringCount):
self.layout.set(i, a, c)
self._step += amt
```
#### File: BiblioPixelAnimations/cube/wave_spiral.py
```python
from bibliopixel.animation.cube import Cube
def spiralOrder(matrix):
return matrix and list(matrix.pop(0)) + spiralOrder(list(zip(*matrix))[::-1])
class WaveSpiral(Cube):
def __init__(self, layout, offset=1, dir=True, **kwds):
super().__init__(layout, **kwds)
self.offset = offset
self._dir = dir
self.spiral_len = self.x * self.y
self.matrix = []
for x in range(self.x):
col = []
for y in range(self.y):
col.append((x, y))
self.matrix.append(col)
self.spiral = spiralOrder(self.matrix)
def pre_run(self):
self._step = 0
def step(self, amt=1):
if self._dir:
s = 255 - self._step
else:
s = self._step
offset_total = 0
for z in range(self.z):
for i in range(self.spiral_len):
x, y = self.spiral[i]
index = i * 255 / self.spiral_len + s + offset_total
self.layout.set(x, y, z, self.palette(index))
offset_total += self.offset
self._step += amt
if(self._step >= 255):
self._step = 0
```
#### File: BiblioPixelAnimations/matrix/ImageDissolve.py
```python
from bibliopixel.animation.matrix import Matrix
from bibliopixel.util.image import load_image
from random import shuffle
import os
class ImageDissolve(Matrix):
def __init__(self, layout, imageFiles=None, pixelRate=10, waitFrames=30,
**kwds):
super().__init__(layout, **kwds)
self.pixelRate = pixelRate
if imageFiles is None:
imageFiles = []
cur_dir = os.path.dirname(os.path.realpath(__file__))
imageFiles.append(os.path.abspath(os.path.join(cur_dir, '../../Graphics/ml_logo.bmp')))
imageFiles.append(os.path.abspath(os.path.join(cur_dir, '../../Graphics/rainbow.jpg')))
self.imageFiles = imageFiles
self.imgIndex = 0
self.waitFrames = waitFrames
self.waitCount = 0
def pre_run(self):
self.resetAndLoad()
self.imgIndex = 0
self.waitCount = 0
def resetAndLoad(self):
self.layout.setTexture(load_image.loadImage(self.layout, imagePath=self.imageFiles[self.imgIndex]))
self.map = [(x, y) for x in range(self.width) for y in range(self.height)]
shuffle(self.map)
def step(self, amt):
if self.waitCount == 0:
for i in range(self.pixelRate):
x, y = self.map.pop()
self.layout.set(x, y)
if len(self.map) == 0:
if len(self.imageFiles) == 1:
self.layout.all_off()
self.animComplete = True
else:
self.imgIndex += 1
if self.imgIndex >= len(self.imageFiles):
self.animComplete = True
self.imgIndex = 0
self.waitCount = self.waitFrames
self.resetAndLoad()
break
else:
self.waitCount -= 1
```
#### File: BiblioPixelAnimations/matrix/Mainframe.py
```python
from bibliopixel.animation.matrix import Matrix
from bibliopixel.colors import COLORS
import os
class Mainframe(Matrix):
COLOR_DEFAULTS = ('bgcolor', COLORS.Off), ('color', COLORS.Red)
def __init__(self, layout, scroll=True, **kwds):
super().__init__(layout, **kwds)
self.scroll = scroll
self.rand_bytes_rows = (self.height // 8) + 1
self.__genBytes()
def __genBytes(self):
self.bytes = [[x for x in bytearray(os.urandom(self.width))]
for y in range(self.rand_bytes_rows)]
def step(self, amt=8):
if self.scroll:
new_bytes = [i for i in bytearray(
os.urandom(self.rand_bytes_rows))]
for y in range(self.rand_bytes_rows):
self.bytes[y].pop(0)
self.bytes[y].append(new_bytes[y])
else:
self.__genBytes()
for y in range(self.height):
for x in range(self.width):
b = self.bytes[y // 8][x]
bit = bool(b & (1 << (y % 8)))
color = self.palette(int(bit))
self.layout.set(self.width - x - 1, y, color)
```
#### File: BiblioPixelAnimations/strip/Alternates.py
```python
from bibliopixel.animation.strip import Strip
class Alternates(Strip):
COLOR_DEFAULTS = ('color1', (255, 255, 255)), ('color2', (0, 0, 0))
def __init__(self, layout, max_led=-1, **kwds):
super().__init__(layout, 0, -1, **kwds)
self._current = 0
self._minLed = 0
self._maxLed = max_led
if self._maxLed < 0 or self._maxLed < self._minLed:
self._maxLed = self.layout.numLEDs - 1
self._positive = True
def pre_run(self):
self._step = 0
def step(self, amt=1):
while self._current < self._maxLed:
odd = bool(self._current % 2)
color = self.palette(odd == self._positive)
self.layout.fill(color, self._current, self._current)
self._current += amt
self._current = self._minLed
self._positive = not self._positive
```
#### File: BiblioPixelAnimations/strip/ColorChase.py
```python
from bibliopixel.animation.strip import Strip
class ColorChase(Strip):
"""Chase one pixel down the strip."""
COLOR_DEFAULTS = ('color', [255, 0, 0]),
def __init__(self, layout, width=1, start=0, end=-1, **kwds):
super().__init__(layout, start, end, **kwds)
self._width = width
def pre_run(self):
self._step = 0
def step(self, amt=1):
self.layout.all_off() # because I am lazy
for i in range(self._width):
self.layout.set(self._start + self._step + i, self.palette(0))
self._step += amt
overflow = (self._start + self._step) - self._end
if overflow >= 0:
self._step = overflow
```
#### File: BiblioPixelAnimations/strip/PixelPingPong.py
```python
from bibliopixel.animation.strip import Strip
class PixelPingPong(Strip):
COLOR_DEFAULTS = ('color', (255, 255, 255)),
def __init__(self, layout, max_led=None, total_pixels=1, fade_delay=1, **kwds):
super().__init__(layout, 0, -1, **kwds)
self._current = 0
self._minLed = 0
self._maxLed = max_led
if self._maxLed is None or self._maxLed < self._minLed:
self._maxLed = self.layout.numLEDs - 1
self._additionalPixels = total_pixels - 1
self._positive = True
self._fade_delay = fade_delay if fade_delay >= 1 else 1
color = self.palette(0)
self._fade_increment = tuple(x / self._fade_delay for x in color)
def pre_run(self):
self._step = 0
def step(self, amt=1):
# fade last frame's pixels
for i in range(0, self._maxLed + 1):
faded_color = tuple(x - self._fade_increment[i] if x > self._fade_increment[i] else 0 for (i, x) in enumerate(self.layout.get(i)))
self.layout.fill(faded_color, i, i)
self.layout.fill(
self.palette(0), self._current,
self._current + self._additionalPixels)
if self._positive:
self._current += 1
else:
self._current -= 1
if self._current + self._additionalPixels == self._maxLed:
self._positive = False
if self._current == self._minLed:
self._positive = True
```
#### File: BiblioPixelAnimations/strip/Twinkle.py
```python
from .. base.Twinkle import TwinkleBase
from bibliopixel.animation.strip import Strip
class Twinkle(Strip):
def __init__(self, layout, **kwds):
super().__init__(layout)
self.base = TwinkleBase(layout, **kwds)
self.pre_run = self.base.pre_run
self.step = self.base.step
```
|
{
"source": "jessicagarrison/py-junos-eznc",
"score": 2
}
|
#### File: unit/factory/test_to_json.py
```python
__author__ = "<NAME>"
try:
import unittest2 as unittest
except ImportError:
import unittest
from nose.plugins.attrib import attr
from mock import patch
import os
import json
from jnpr.junos import Device
from jnpr.junos.factory.to_json import PyEzJSONEncoder, TableJSONEncoder, TableViewJSONEncoder
from jnpr.junos.op.routes import RouteSummaryTable
from ncclient.manager import Manager, make_device_handler
from ncclient.transport import SSHSession
from ncclient.operations.rpc import RPCReply
@attr('unit')
class TestToJson(unittest.TestCase):
@patch('ncclient.manager.connect')
def setUp(self, mock_connect):
mock_connect.side_effect = self._mock_manager
self.dev = Device(host='1.1.1.1', user='rick', password='<PASSWORD>',
gather_facts=False)
self.dev.open()
def test_pyez_encoder_default(self):
with self.assertRaises(TypeError):
PyEzJSONEncoder.default(PyEzJSONEncoder(), 'test')
def test_table_encoder_default(self):
with self.assertRaises(TypeError):
TableJSONEncoder.default(TableJSONEncoder(), 'test')
def test_view_encoder_default(self):
with self.assertRaises(TypeError):
TableViewJSONEncoder.default(TableViewJSONEncoder(), 'test')
@patch('jnpr.junos.Device.execute')
def test_table_json(self, mock_execute):
mock_execute.side_effect = self._mock_manager
rst = RouteSummaryTable(self.dev)
rst.get()
resp = rst.to_json()
j = {'ISP-1.inet.0': {'proto': {'Local': {'count': 1, 'active': 1}, 'Direct': {'count': 3, 'active': 3}},
'dests': 4, 'holddown': 0, 'active': 4, 'hidden': 0, 'total': 4},
'ISP-2.inet.0': {'proto': {'Local': {'count': 1, 'active': 1}, 'Direct': {'count': 3, 'active': 3}},
'dests': 4, 'holddown': 0, 'active': 4, 'hidden': 0, 'total': 4},
'inet.0': {'proto': {'Local': {'count': 4, 'active': 4}, 'Static': {'count': 1, 'active': 1},
'Direct': {'count': 4, 'active': 3}}, 'dests': 8, 'holddown': 0, 'active': 8,
'hidden': 0, 'total': 9}}
self.assertEqual(eval(resp), j)
@patch('jnpr.junos.Device.execute')
def test_view_json(self, mock_execute):
mock_execute.side_effect = self._mock_manager
rst = RouteSummaryTable(self.dev)
rst.get()
resp = rst["ISP-1.inet.0"].to_json()
j = {"ISP-1.inet.0": {"proto": {"Local": {"count": 1, "active": 1}, "Direct": {"count": 3, "active": 3}},
"dests": 4, "holddown": 0, "active": 4, "hidden": 0, "total": 4}}
self.assertEqual(eval(resp), j)
@patch('jnpr.junos.Device.execute')
def test_json_rpc(self, mock_execute):
mock_execute.side_effect = self._mock_manager
resp = self.dev.rpc.get_software_information()
j = {'package-information': {'comment': 'JUNOS Software Release [12.1X46-D15.3]', 'name': 'junos'},
'host-name': 'firefly', 'product-model': 'firefly-perimeter', 'product-name': 'firefly-perimeter'}
self.assertEqual(eval(json.dumps(resp)), j)
def _read_file(self, fname):
from ncclient.xml_ import NCElement
fpath = os.path.join(os.path.dirname(__file__),
'rpc-reply', fname)
foo = open(fpath).read()
reply = RPCReply(foo)
reply.parse()
rpc_reply = NCElement(reply, self.dev._conn.
_device_handler.transform_reply())\
._NCElement__doc[0]
return rpc_reply
def _mock_manager(self, *args, **kwargs):
if kwargs:
if args and ('normalize' in kwargs or 'filter_xml' in kwargs):
return self._read_file(args[0].tag + '.xml')
device_params = kwargs['device_params']
device_handler = make_device_handler(device_params)
session = SSHSession(device_handler)
return Manager(session, device_handler)
if args:
return self._read_file(args[0].tag + '.xml')
```
#### File: tests/unit/test_decorators.py
```python
try:
import unittest2 as unittest
except ImportError:
import unittest
from nose.plugins.attrib import attr
from lxml.etree import XML
from jnpr.junos.device import Device
from jnpr.junos.utils.config import Config
from jnpr.junos.exception import RpcError, ConfigLoadError
from jnpr.junos.decorators import timeoutDecorator, normalizeDecorator
from jnpr.junos.decorators import ignoreWarnDecorator
from mock import patch, MagicMock, PropertyMock, call
from ncclient.operations.rpc import RPCError
from ncclient.manager import Manager, make_device_handler
from ncclient.transport import SSHSession
from ncclient.xml_ import qualify
__author__ = "<NAME>"
@attr('unit')
class Test_Decorators(unittest.TestCase):
@patch('ncclient.manager.connect')
def setUp(self, mock_connect):
mock_connect.side_effect = self._mock_manager_setup
self.dev = Device(host='1.1.1.1', user='rick', password='<PASSWORD>',
gather_facts=False)
self.dev.open()
def test_timeout(self):
with patch('jnpr.junos.Device.timeout',
new_callable=PropertyMock) as mock_timeout:
mock_timeout.return_value = 30
def function(x):
return x
decorator = timeoutDecorator(function)
decorator(self.dev, dev_timeout=10)
calls = [call(), call(10), call(30)]
mock_timeout.assert_has_calls(calls)
def test_timeout_except(self):
with patch('jnpr.junos.Device.timeout',
new_callable=PropertyMock) as mock_timeout:
mock_timeout.return_value = 30
def function(*args, **kwargs):
raise Exception()
decorator = timeoutDecorator(function)
# test to ensure the exception is raised
with self.assertRaises(Exception):
decorator(self.dev, dev_timeout=10)
calls = [call(), call(10), call(30)]
# verify timeout was set/reset
mock_timeout.assert_has_calls(calls)
# Test default of true and passing true keyword
def test_normalize_true_true(self):
with patch('jnpr.junos.Device.transform',
new_callable=PropertyMock) as mock_transform:
self.dev._normalize = True
def function(x):
return x
decorator = normalizeDecorator(function)
decorator(self.dev, normalize=True)
self.assertFalse(mock_transform.called)
# Test default of true and passing true keyword and a func exception
def test_normalize_true_true_except(self):
with patch('jnpr.junos.Device.transform',
new_callable=PropertyMock) as mock_transform:
self.dev._normalize = True
def function(*args, **kwargs):
raise Exception()
decorator = normalizeDecorator(function)
with self.assertRaises(Exception):
decorator(self.dev, normalize=True)
self.assertFalse(mock_transform.called)
# Test default of True and passing false keyword
def test_normalize_true_false(self):
with patch('jnpr.junos.Device.transform',
new_callable=PropertyMock) as mock_transform:
mock_transform.return_value = 'o.g.'
self.dev._normalize = True
def function(x):
return x
decorator = normalizeDecorator(function)
decorator(self.dev, normalize=False)
calls = [call(), call(self.dev._nc_transform), call('o.g.')]
mock_transform.assert_has_calls(calls)
# Test default of True and passing false keyword and a func exception
def test_normalize_true_false_except(self):
with patch('jnpr.junos.Device.transform',
new_callable=PropertyMock) as mock_transform:
mock_transform.return_value = 'o.g.'
self.dev._normalize = True
def function(*args, **kwargs):
raise Exception()
decorator = normalizeDecorator(function)
with self.assertRaises(Exception):
decorator(self.dev, normalize=False)
calls = [call(), call(self.dev._nc_transform), call('o.g.')]
mock_transform.assert_has_calls(calls)
# Test default of false and passing true keyword
def test_normalize_false_true(self):
with patch('jnpr.junos.Device.transform',
new_callable=PropertyMock) as mock_transform:
mock_transform.return_value = 'o.g.'
self.dev._normalize = False
def function(x):
return x
decorator = normalizeDecorator(function)
decorator(self.dev, normalize=True)
calls = [call(), call(self.dev._norm_transform), call('o.g.')]
# print mock_transform.call_args_list
mock_transform.assert_has_calls(calls)
# Test default of false and passing true keyword and a func exception
def test_normalize_false_true_except(self):
with patch('jnpr.junos.Device.transform',
new_callable=PropertyMock) as mock_transform:
mock_transform.return_value = 'o.g.'
self.dev._normalize = False
def function(*args, **kwargs):
raise Exception()
decorator = normalizeDecorator(function)
with self.assertRaises(Exception):
decorator(self.dev, normalize=True)
calls = [call(), call(self.dev._norm_transform), call('o.g.')]
# print mock_transform.call_args_list
mock_transform.assert_has_calls(calls)
# Test default of false and passing false keyword
def test_normalize_false_false(self):
with patch('jnpr.junos.Device.transform',
new_callable=PropertyMock) as mock_transform:
self.dev._normalize = False
def function(x):
return x
decorator = normalizeDecorator(function)
decorator(self.dev, normalize=False)
self.assertFalse(mock_transform.called)
# Test default with ignore_warning not present.
def test_ignore_warning_missing(self):
def method(self, x):
return x
decorator = ignoreWarnDecorator(method)
response = decorator(self.dev, 'foo')
self.assertEqual('foo', response)
# Test default with ignore_warning=False.
def test_ignore_warning_false(self):
def method(self, x):
return x
decorator = ignoreWarnDecorator(method)
response = decorator(self.dev, 'foo', ignore_warning=False)
self.assertEqual('foo', response)
# Test with ignore_warning=True and only warnings.
def test_ignore_warning_true_3snf_warnings(self):
self.dev._conn.rpc = MagicMock(side_effect=
self._mock_manager_3snf_warnings)
cu = Config(self.dev)
config = """
delete interfaces ge-0/0/0
delete protocols ospf
delete policy-options prefix-list foo
"""
self.assertTrue(cu.load(config, ignore_warning=True))
# Test with ignore_warning='statement not found' and 3 snf warnings.
def test_ignore_warning_string_3snf_warnings(self):
self.dev._conn.rpc = MagicMock(side_effect=
self._mock_manager_3snf_warnings)
cu = Config(self.dev)
config = """
delete interfaces ge-0/0/0
delete protocols ospf
delete policy-options prefix-list foo
"""
self.assertTrue(cu.load(config, ignore_warning='statement not found'))
# Test with ignore_warning='statement not found', 1 snf warning,
# and 1 error.
def test_ignore_warning_string_1snf_warning_1err(self):
self.dev._conn.rpc = MagicMock(side_effect=
self._mock_manager_1snf_warning_1err)
cu = Config(self.dev)
config = """
delete interfaces ge-0/0/0
delete protcols ospf
delete policy-options prefix-list foo
"""
with self.assertRaises(ConfigLoadError):
cu.load(config, ignore_warning='statement not found')
# Test with ignore_warning=True, RpcError with no errs attribute.
# I haven't seen this from an actual device, so this is a very contrived
# test.
def test_ignore_warning_string_1snf_warning_1err(self):
def method(self, x):
rpc_error = RPCError(XML('<foo/>'), errs=None)
raise rpc_error
decorator = ignoreWarnDecorator(method)
with self.assertRaises(RPCError):
decorator(self.dev, 'foo', ignore_warning=True)
# Test with ignore_warning=['foo', 'statement not found'] and
# three statement not found warnings.
def test_ignore_warning_list_3snf_warnings(self):
self.dev._conn.rpc = MagicMock(side_effect=
self._mock_manager_3snf_warnings)
cu = Config(self.dev)
config = """
delete interfaces ge-0/0/0
delete protocols ospf
delete policy-options prefix-list foo
"""
self.assertTrue(cu.load(config,
ignore_warning=['foo', 'statement not found']))
# Test with ignore_warning='foo', and three statement not found warnings.
def test_ignore_warning_string_3snf_no_match(self):
self.dev._conn.rpc = MagicMock(side_effect=
self._mock_manager_3snf_warnings)
cu = Config(self.dev)
config = """
delete interfaces ge-0/0/0
delete protcols ospf
delete policy-options prefix-list foo
"""
with self.assertRaises(ConfigLoadError):
cu.load(config, ignore_warning='foo')
# Test with ignore_warning=['foo', 'bar], and
# three statement not found warnings.
def test_ignore_warning_list_3snf_no_match(self):
self.dev._conn.rpc = MagicMock(side_effect=
self._mock_manager_3snf_warnings)
cu = Config(self.dev)
config = """
delete interfaces ge-0/0/0
delete protcols ospf
delete policy-options prefix-list foo
"""
with self.assertRaises(ConfigLoadError):
cu.load(config, ignore_warning=['foo', 'bar'])
# Test with ignore_warning=['foo', 'bar], and
# three warnings which are 'foo boom', 'boom bar', and 'foo bar'
def test_ignore_warning_list_3warn_match(self):
self.dev._conn.rpc = MagicMock(side_effect=
self._mock_manager_3foobar_warnings)
cu = Config(self.dev)
config = """
delete interfaces ge-0/0/0
delete protcols ospf
delete policy-options prefix-list foo
"""
self.assertTrue(cu.load(config,
ignore_warning=['foo', 'bar']))
# Test with ignore_warning=['foo', 'foo bar], and
# three warnings which are 'foo boom', 'boom bar', and 'foo bar'
def test_ignore_warning_list_3warn_no_match(self):
self.dev._conn.rpc = MagicMock(side_effect=
self._mock_manager_3foobar_warnings)
cu = Config(self.dev)
config = """
delete interfaces ge-0/0/0
delete protcols ospf
delete policy-options prefix-list foo
"""
with self.assertRaises(ConfigLoadError):
cu.load(config, ignore_warning=['foo', 'foo bar'])
def _mock_manager_setup(self, *args, **kwargs):
if kwargs:
device_params = kwargs['device_params']
device_handler = make_device_handler(device_params)
session = SSHSession(device_handler)
return Manager(session, device_handler)
def _mock_manager_3snf_warnings(self, *args, **kwargs):
cmd = """
<load-configuration action="set" format="text">
<configuration-set>
delete interfaces ge-0/0/0
delete protocols ospf
delete policy-options prefix-list foo
</configuration-set>
</load-configuration>
"""
rsp_string = """
<rpc-reply xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/16.1R4/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="urn:uuid:1f3dfa00-3434-414a-8aa8-0073590c5812">
<load-configuration-results>
<rpc-error>
<error-severity>warning</error-severity>
<error-message>
statement not found
</error-message>
</rpc-error>
<rpc-error>
<error-severity>warning</error-severity>
<error-message>
statement not found
</error-message>
</rpc-error>
<rpc-error>
<error-severity>warning</error-severity>
<error-message>
statement not found
</error-message>
</rpc-error>
<ok/>
</load-configuration-results>
</rpc-reply>
"""
rsp = XML(rsp_string)
errors = []
for err in rsp.findall('.//'+qualify('rpc-error')):
errors.append(RPCError(err))
raise RPCError(rsp, errs=errors)
def _mock_manager_3foobar_warnings(self, *args, **kwargs):
cmd = """
<load-configuration action="set" format="text">
<configuration-set>
delete interfaces ge-0/0/0
delete protocols ospf
delete policy-options prefix-list foo
</configuration-set>
</load-configuration>
"""
rsp_string = """
<rpc-reply xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/16.1R4/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="urn:uuid:1f3dfa00-3434-414a-8aa8-0073590c5812">
<load-configuration-results>
<rpc-error>
<error-severity>warning</error-severity>
<error-message>
foo boom
</error-message>
</rpc-error>
<rpc-error>
<error-severity>warning</error-severity>
<error-message>
boom bar
</error-message>
</rpc-error>
<rpc-error>
<error-severity>warning</error-severity>
<error-message>
foo bar
</error-message>
</rpc-error>
<ok/>
</load-configuration-results>
</rpc-reply>
"""
rsp = XML(rsp_string)
errors = []
for err in rsp.findall('.//'+qualify('rpc-error')):
errors.append(RPCError(err))
raise RPCError(rsp, errs=errors)
def _mock_manager_1snf_warning_1err(self, *args, **kwargs):
cmd = """
<load-configuration action="set" format="text">
<configuration-set>
delete interfaces ge-0/0/0
delete protcols ospf
delete policy-options prefix-list foo
</configuration-set>
</load-configuration>
"""
rsp_string = """
<rpc-reply xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/16.1R4/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="urn:uuid:1f3dfa00-3434-414a-8aa8-0073590c5812">
<load-configuration-results>
<rpc-error>
<error-severity>warning</error-severity>
<error-message>
statement not found
</error-message>
</rpc-error>
<rpc-error>
<error-type>protocol</error-type>
<error-tag>operation-failed</error-tag>
<error-severity>error</error-severity>
<error-message>syntax error</error-message>
<error-info>
<bad-element>protcols</bad-element>
</error-info>
</rpc-error>
<ok/>
</load-configuration-results>
</rpc-reply>
"""
rsp = XML(rsp_string)
errors = []
for err in rsp.findall('.//'+qualify('rpc-error')):
errors.append(RPCError(err))
raise RPCError(rsp, errs=errors)
```
|
{
"source": "JessicaGarson/MovieSentiment",
"score": 3
}
|
#### File: JessicaGarson/MovieSentiment/bagginglargerrange.py
```python
import pandas as pd
import numpy as np
from ggplot import *
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import cross_val_score
from sklearn.metrics import auc_score
train = pd.read_csv('/Users/jessicagarson/Downloads/Movie Reviews/train.csv')
test = pd.read_csv('/Users/jessicagarson/Downloads/Movie Reviews/test.csv')
def bagmodel(s):
vectorizer = CountVectorizer()
X_dict = vectorizer.fit_transform(s.Phrase)
choices = np.random.choice(range(len(s)), len(s), replace = True)
s = s.ix[choices,:]
X_train = vectorizer.transform(s.Phrase)
model = LogisticRegression().fit(X_train, list(s.Sentiment))
return model
models = []
for i in range(100):
print i
models.append(bagmodel(train))
vectorizer = CountVectorizer()
X_train = vectorizer.fit_transform(train.Phrase)
X_test = vectorizer.transform(test.Phrase)
# results = [x.predict(X_test) for x in models
result = [x.predict(X_test) for x in models]
from collections import Counter
def combination(s):
thing = Counter(s)
return thing.most_common(1)[0]
combination([3,3,2,3,3,])
result_final = []
for i in range(len(test)):
a, b = combination([x[i] for x in result])
result_final.append(a)
result_final[0]
solution = pd.DataFrame({'PhraseId': test.PhraseId, 'Sentiment': result_final})
solution.to_csv('submissionbaggedagain.csv', index=False)
plotout = ggplot(aes(x = 'Sentiment'), data=solution)
plotout + geom_histogram()
```
|
{
"source": "jessicagtz/WorkingFolder",
"score": 3
}
|
#### File: WorkingFolder/ReviewSession_FullStackApp/app.py
```python
import os
from flask import (
Flask,
render_template,
jsonify,
request,
redirect)
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
from flask_sqlalchemy import SQLAlchemy
@app.route("/")
def index():
return:
@app.route("/send", methods= ["GET", "POST"])
def send_data():
if request.method = "POST";
## HTTP verb, " hey I want to give you some data"
```
#### File: working/crime_vis/crime.py
```python
from flask import Flask, jsonify, render_template, request, redirect
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
import pandas as pd
import numpy as np
import datetime as dt
# database setup using automap
engine = create_engine("sqlite:///chi_db.sqlite")
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save references to the tables
AllCrime = Base.classes.all_crime
# Create our session (link) from Python to the DB
session = Session(engine)
# initialize Flask
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///chi_db.sqlite"
@app.route("/crimehistory")
def crime_dict(crime):
results = session.query.(AllCrime.id, AllCrime.crimeGroup, AllCrime.year,AllCrime.nunCrimes).filter(AllCrimes.id)
dict=[]
for result in results:
crime_dict= {}
crime_dict["year"] = result.year
crime_dict["id"] = result.id
crime_dict["crimeGroup"] = result.crimeGroup
crime_dict["nunCrimes"] = result.nunCrimes
dict.append(crime_dict)
return jsonify(dict)
if __name__ == "__main__":
app.run(debug=True)
```
|
{
"source": "JessicaHamilton/PHYS-3210",
"score": 4
}
|
#### File: PHYS-3210/Exam 01/exam01.py
```python
import numpy as np
import matplotlib.pyplot as plt
import numpy.random as rand
#first function for old version of game
def old_cherry(N):
#initialize values for tree,basket, and turns
Cherry_tree = 10
Basket = 0
spins = 1
#set loop for number of spins
for turn in range(N):
#setup if statement to randomly generate a value between 0 and 1 for the weight of the spin
weight = rand.random()
#If lower - generate spin to increase basket
if weight <= 0.60:
spin = rand.randint(0,4)
#determine what happens with each spin
if spin == 0:
Cherry_tree = Cherry_tree - 1
Basket = Basket + 1
if Basket == 10:
spins = spins + 1
break
elif spin == 1:
Cherry_tree = Cherry_tree - 2
Basket = Basket + 2
if Basket == 10:
spins = spins + 1
break
elif spin == 2:
Cherry_tree = Cherry_tree - 3
Basket = Basket + 3
if Basket == 10:
spins = spins + 1
break
elif spin == 3:
Cherry_tree = Cherry_tree - 4
Basket = Basket + 4
if Basket == 10:
spins = spins + 1
break
#if higher, generate spin to decrease basket
elif weight > 0.60:
#determine what happens with each spin
spin = rand.randint(4,7)
if spin == 4 or spin == 5:
if Basket < 2 and Basket > 0:
Cherry_tree = Cherry_tree +1
Basket = Basket -1
spins = spins + 1
elif Basket >= 2:
Cherry_tree = Cherry_tree +2
Basket = Basket - 2
spins = spins + 1
else:
spins = spins + 1
continue
elif spin == 6:
Cherry_tree = Cherry_tree + Basket
Basket = 0
spins = spins + 1
return spins
def new_cherry(N):
#initialize values for the tree and the basket and set range for turns
Cherry_tree = 10
Basket = 0
spins2 = 0
for t in range(N):
#randomly generate integers between 0 and 7 to simulate the different options the spinner can land on
spinnn = rand.randint(0,7)
#Setup if statement to clarify what happens in each spin, may not actually need for loop!!
if spinnn == 0:
Cherry_tree = Cherry_tree - 1
Basket = Basket + 1
spins2 = spins2 + 1
if Basket == 10:
break
elif spinnn == 1:
Cherry_tree = Cherry_tree - 2
Basket = Basket + 2
spins2 = spins2 + 1
if Basket == 10:
break
elif spinnn == 2:
Cherry_tree = Cherry_tree - 3
Basket = Basket + 3
spins2 = spins2 + 1
if Basket == 10:
break
elif spinnn == 3:
Cherry_tree = Cherry_tree - 4
Basket = Basket + 4
spins2 = spins2 + 1
if Basket == 10:
break
elif spinnn == 4 or spinnn == 5:
if Basket < 2 and Basket >=0:
Cherry_tree = Cherry_tree + 1
Basket = Basket - 1
spins2 = spins2 + 1
elif Basket >= 2:
Cherry_tree = Cherry_tree + 2
Basket = Basket - 2
spins2 = spins2 + 1
else:
spins2 = spins2 + 1
continue
elif spinnn == 6:
Cherry_tree = Cherry_tree + Basket
Basket = 0
spins2 = spins2 + 1
return spins2
old_array = []
new_array = []
for each_num in range(10):
test1 = old_cherry(30)
old_array.append(test1)
test2 = new_cherry(30)
new_array.append(test2)
average1 = np.average(old_array)
average2 = np.average(new_array)
print("Average number of Turns for old version:",average1)
print("Average number of Turns for new version:",average2)
```
#### File: PHYS-3210/Exam 02/pendulum.py
```python
import numpy as np
import matplotlib.pyplot as plt
def pend(m,l,theta):
g = 9.8
omega = np.sqrt(g/l)
h = 0.001
tnew = np.radians(10)
tdotnew = 0
time = np.arange(0,5, h)
tnew_array = []
tdotnew_array = []
x_array = []
y_array = []
xdot_array = []
ydot_array = []
for t in time:
tnew_1 = tnew + tdotnew*h
tdotnew_1 = tdotnew - ((g/l)*np.sin(tnew))*h
tnew_array.append(tnew_1)
tdotnew_array.append(tdotnew_1)
x = l*np.sin(tnew_1)
y = l - l*np.cos(tnew_1)
x_dot = l*np.sin(tdotnew_1)
y_dot = l-l*np.cos(tdotnew_1)
x_array.append(x)
y_array.append(y)
xdot_array.append(x_dot)
ydot_array.append(y_dot)
tnew = tnew_1
tdotnew = tdotnew_1
period = 2*np.pi*(np.sqrt(l/g))
return tnew_array, tdotnew_array, time, period, x_array, y_array, xdot_array, ydot_array
a1, a2, a3, a4, a5, a6, a7, a8 = pend(5, 1.5, np.radians(15))
plt.plot(a1, a3)
plt.title('Theta vs. Time')
plt.show()
print('Period:', a4)
plt.plot(a7, a8)
plt.title('Velocity X vs. Velocity Y')
plt.show()
plt.plot(a3, a5)
plt.title('Position X over Time')
plt.show()
plt.plot(a5,a6)
plt.title('Position X by Position Y')
plt.show()
plt.plot(a1,a2)
plt.title('Theta vs. D_Theta')
plt.show()
#Change initial thetas for periods and for theta and d_theta graphs
b1,b2,b3,b4,b5,b6,b7,b8 = pend(1,1.5,np.radians(10))
c1,c2,c3,c4,c5,c6,c7,c8 = pend(1,1.5,np.radians(30))
d1,d2,d3,d4,d5,d6,d7,d8 = pend(1,1.5,np.radians(45))
print('Periods for changing Thetas:', b4,c4,d4)
#Changing rod lengths for periods
e1,e2,e3,e4,e5,e6,e7,e8 = pend(1, 2, np.radians(15))
f1,f2,f3,f4,f5,f6,f7,f8 = pend(1, 5, np.radians(15))
g1,g2,g3,g4,g5,g6,g7,g8 = pend(1, 7, np.radians(15))
print('Periods for changing Rod lengths:', e4,f4,g4)
#plot the tetha vs theta dot for changing thetas
plt.plot(b1,b2)
plt.title('Theta vs D_theta: 10')
plt.show()
plt.plot(c1,c2)
plt.title('Theta vs D_theta: 30')
plt.show()
plt.plot(d1,d2)
plt.title('Theta vs D_theta: 45')
plt.show()
```
#### File: PHYS-3210/Week 03/exercise05.py
```python
import numpy as np
import matplotlib.pyplot as plt
def up_harmonic(value_n):
H_up = 0.0
summ_array1 = []
new_x = value_n + 1
x_array1 = np.arange(1,new_x)
for each_value in x_array1:
numm1 = 1/each_value
H_up = H_up + numm1
summ_array1.append(H_up)
return H_up, summ_array1
#test = up_harmonic(20)
#up_sum = test[0]
#up_values = test[1]
#print("Sum up value is:", test[0])
#print("The Up values are:", up_values)
def down_harmonic(value_n):
H_down = 0.0
summ_array2 = []
new_x = value_n + 1
x_array = np.arange(1,new_x)
x_array2 = x_array[::-1]
for each_value in x_array2:
numm2 = 1/each_value
H_down = H_down + numm2
summ_array2.append(H_down)
return H_down, summ_array2
#test1 = down_harmonic(20)
#down_sum = test1[0]
#down_values = test1[1]
#print("Sum down value is:", test1[0])
#print("The down values are:", down_values)
fraction_array = []
x_values = np.arange(1,50)
for new_value in x_values:
test1 = up_harmonic(new_value)
test2 = down_harmonic(new_value)
up_sum = test1[0]
down_sum = test2[0]
up_array = test1[1]
down_array = test2[1]
print("The up sum is:", up_sum)
print("The down sum is:", down_sum)
sub = up_sum - down_sum
abs_add = np.abs(up_sum) + np.abs(down_sum)
fraction = sub / abs_add
fraction_array.append(fraction)
plt.plot(x_values,fraction_array)
# When looking at the values for the sum up versus sum down, the sum down is more precise due to the fact that
#the larger the number decimal place-wise, the less values the computer can store, it will reach built in limit
#that computer can store values. Therefore when adding smaller and smaller value numbers to already larger
#decimal placed numbers,the computer will just drop them and will not change the value. But With the sum down
#approach, you start with the small numbers and then slowly add more and more larger valued numbers.
```
#### File: PHYS-3210/Week 03/Exercise_06.py
```python
import numpy as np
import numpy.random as rand
import matplotlib.pyplot as plt
# Initiall we need to determine the name of the function, tell python that we
#are making a function and what inputs will be required or optional
def walk(N):
""" Function to compute an N-step random walk
Input:
N :: Total number of steps
Output:
x :: Array of all x positions
y :: Array of all y positions
"""
#Now within the function we need to determine what occurs
# seed random number generator
rand.seed()
# initialize x, y
#This is where we define and create the arrays for x and y and initiate them
x = [0.0]
y = [0.0]
# step in x-y space N times
#This is the for loop that will take each value in the input number given and will perform each task
#listed within the loop
for n in range(N):
#Here we do the calculation where the computer grabs the last value in the x or y array specifically
#then adds the product of a random value generated by rand.random and subtract 0.5 and multiplied by 2
#Once computed, it adds this number to the array
#the loop repeats this process for each value up to the input value twice, once for x and once for y
x.append(x[-1] + (rand.random() - 0.5)*2.0) #random grabs a random number between 0 and 1
y.append(y[-1] + (rand.random() - 0.5)*2.0)
#Once the above for loop is completed, the function will return (give back) the two arrays, x and y
return np.array(x), np.array(y)
# Example simulation for three separate walkers
walker_1 = walk(1000) # compute path for 1000 steps
walker_2 = walk(1000)
walker_3 = walk(1000)
# Example plot of (x, y) pairs from example simulation
plt.plot(walker_1[0], walker_1[1], '-')
plt.plot(walker_2[0], walker_2[1], '-')
plt.plot(walker_3[0], walker_3[1], '-')
plt.title("Three Random Walkers")
plt.xlabel("X Direction")
plt.ylabel("Y Direction")
plt.show()
#With the three random walkers, we can see that the walkers each took different paths
#Even with the fact that each walked took 1000 steps, the random number generator will not generate
#the same numbers for each walker and therefore each path will be different. You can view this by
#looking at the graph, the three different colored lines do not follow each other and are in fact
#are in different parts of the grid. It do find it interesting that sometimes it seems the walker will remain
#in a specific area. This is not meaning they are in the same spot for an extended time but move around
#that area instead of going in a more straight or "forward" direction.
#Calculate for a single walker how far the walker moved from the origin (not total distance traveled).
#No, I do not expect each walker to end up at the same distance from the origin. One walker to end up
#moving backwards or in kind of circles.
x_dir = walker_2[0]
y_dir = walker_2[1]
final_x = x_dir[-1]
final_y = y_dir[-1]
dist_origin = np.sqrt(final_x**2+final_y**2)
print("Distance from Origin for walker 2:", dist_origin)
#Run 100 simulations for a walker that takes N steps, calculate final distance input into array
final_distances = []
for each_walker in range(100):
new_walk = walk(1500)
x_direction = new_walk[0]
y_direction = new_walk[1]
final_x_dir = x_direction[-1]
final_y_dir = y_direction[-1]
distance_orig = np.sqrt(final_x_dir**2+final_y_dir**2)
final_distances.append(distance_orig)
plt.plot(x_direction, y_direction, '-')
print(final_distances)
#According to the array of final distances, each walker does not end up at the same distance which
#is what I expected. I think this is right.
#Makes a histogram of the final distances and find average distance, standard deviation, median distance
#hist, bins = np.histogram(final_distances)
plt.show()
hist = np.histogram(final_distances)
max_value = np.max(hist[0])
#print("This is the median value according to Hist:", max_value)
plt.hist(final_distances)
std_div = np.std(final_distances)
median_value = np.median(final_distances)
average_value = np.average(final_distances)
print("Standard Deviation is:", std_div)
print("Average distance is:", average_value)
print("Median distance is:", median_value)
#For the distribution of distances, it is not quite gaussian as I was expecting, but there is a distance
#and the median distance is actually not too far off typically. This would indicate that there is not
#as much randomness as you would think in the values since there is somewhat of a trend. Each time the code
#runs, there is a Gaussian like or Poissian like shape. This is not quite what I expected. On the surface,
#it all seems random and the numbers would be all over the place. But this is not quite the case. It seems
#there is a typical distance a walker will walk statistically speaking.This type of simulation could be
#used for park or city developers to determine the best flow of traffic, where to build and where not to
#build structures.
```
#### File: PHYS-3210/Week 04/saWalk.py
```python
class SAWalker(object):
def __init__(self, initial_x = 0, initial_y = 0, initial_z = 0, kind='2d'):
""" Initialize self-avoiding random walker """
# set initial / current position of walker (DEFAULT: origin)
if kind == '2d':
self.current_position = [initial_x, initial_y]
elif kind == '3d':
self.current_position = [initial_x, initial_y, initial_z]
else:
raise ValueError("Invalid Walker Kind")
# initialize walker chain
self.chain = [self.current_position] # list of [x,y] coordinates
self.monomer_type = ["blue"]
# whether the walker can step in some direction
self.is_stuck = False
# initialize the chain energy
self.energy = 0.0
def step(self):
""" Take a random step on a fixed lattice """
import numpy.random as nprand
# seed our random number generator
nprand.seed()
# check whether the chain can move
if self.stuck():
return
else:
pass
# make an X or Y step
proposed_position = self.current_position
while proposed_position in self.chain:
# select x or y direction and step
if nprand.random() < 0.5:
dx = nprand.randint(-1, 2)
dy = 0
else:
dx = 0
dy = nprand.randint(-1, 2)
proposed_position = [self.current_position[0] + dx,
self.current_position[1] + dy]
# choose polar (r) vs nonpolar (b)
if nprand.random() < 0.7:
c = "blue"
else:
c = "red"
self.chain.append(proposed_position)
self.monomer_type.append(c)
self.current_position = proposed_position
def stuck(self):
from numpy import array
dx_p1 = [self.current_position[0] + 1, self.current_position[1]]
dx_m1 = [self.current_position[0] - 1, self.current_position[1]]
dy_p1 = [self.current_position[0], self.current_position[1] + 1]
dy_m1 = [self.current_position[0], self.current_position[1] - 1]
if (dx_p1 in self.chain) and (dx_m1 in self.chain) and (dy_p1 in self.chain) \
and (dy_m1 in self.chain):
self.is_stuck = True
self.chain = array(self.chain)
else:
self.is_stuck = False
return self.is_stuck
def change_energy(self):
""" """
```
#### File: PHYS-3210/Week 05/exercise09.py
```python
import numpy as np
import matplotlib.pyplot as plt
import scipy
from scipy import integrate
#Create function to take the integral of / area under the curve
x_values = np.arange(0,10,0.01)
y_values = x_values**2
#Define function that will use definition of the sum of the rectangle areas underneath curve
def integral_funct(h, initial, final):
#determine the rectangle and area using left side y-value of rectangle
area_array = []
y_values = []
x_values = np.arange(initial,final,0.01)
for numm in x_values:
new_value = numm**2
y_values.append(new_value)
y_values = y_values[:-1]
total_sum = np.sum(area_array)
for each_num in y_values:
area = h*each_num
area_array.append(area)
total_sum = np.sum(area_array)
return area_array, total_sum
#Test the integral function for x**2 from x=1 to x=10
test1 = integral_funct(0.01,0,10)
test1_area = test1[0]
#print("This is the individual area array:",test1_area)
test1_sum = test1[1]
print("Total area calculated by my function:",test1_sum)
#This is not as close in value as I thought it would be. Analytically, the value
#for the example test is about 333.33.
#Now compare to the trapz function
test_trapz = np.trapz(y_values,x_values, dx=0.01)
print("Total integral calculated by the Numpy's Trapz funct:",test_trapz)
#This is a better comparison to my function versus the analytical value. The value
#provided by the trapz is in between the two other values.
#Now use different functions from scipy to compute the integral
attempt1 = scipy.integrate.trapz(y_values,x_values, dx=0.01)
attempt2 = scipy.integrate.simps(y_values,x_values, dx=0.01)
def x_squared(x):
y = x**2
return y
attempt3 = scipy.integrate.romberg(x_squared, a=0, b=10)
#sum_attempt3 = np.sum(attempt3)
print("The integral calculated by the Scipy trapz funct:", attempt1)
print("The integral calculated by the Scipy simps funct:", attempt2)
print("The integral calculated by the Scipy romberg function:", attempt3)
#Here we can see the the two scipy functions trapz and simps have similiar values
#as the numpy trapz which is not too far off from my function. But the Scipy
#romberg function actually provides the calculated answer for the analytical value
```
#### File: PHYS-3210/Week 06/exercise13.py
```python
import numpy as np
import matplotlib.pyplot as plt
import numpy.random as rand
#Define a function to calculate the integral by random sampling (pond idea)
def Probint(N, initial, final, limit):
topx = []
topy = []
botx = []
boty = []
N_out = N
#for each iteration, randomly generate the x and y values and calculate actual x**2 value
for n in range(N):
x = rand.uniform(initial,final)
y = rand.uniform(initial,limit)
y2 = x**2
#determine which is greater and append to correct array
if y > y2:
N_out = N_out - 1
topx.append(x)
topy.append(y)
#top_array = [x,y]
elif y < y2:
botx.append(x)
boty.append(y)
#bot_array = [x,y2]
#calculate the total number of points and the results
ratio = N_out/N
area = final*limit
results = area*ratio
return results, topx,topy,botx,boty
test1, topx,topy,botx,boty = Probint(1000000, 0, 10, 100)
print("The estimated value for (x**2) is:", test1)
plt.scatter(topx, topy, s=2, color = 'green')
plt.scatter(botx, boty, s=2, color = 'blue')
plt.title("Distrubition of values for Integral of X**2")
plt.show()
#With 100,000 iterations, my function was able to get around a reasonable answer for
#for the integral of x**2 from 0 to 10. Even better result occurred with 1 million iterations.
#This is a reasonable value for the fact that it is
#calculated from randomly generated numbers. But it is interesting that you can use the distributation
#function to determine the integral of a function. Based on the fact that we have a correlation to
#area.
#Now use the function to calculate the integral for sin(x)
def Probint2(N, initial, final, up_limit, down_limit):
topx = []
topy = []
botx = []
boty = []
N_out = N
for n in range(N):
x = rand.uniform(initial,final)
y = rand.uniform(initial,up_limit)
y2 = np.sin(x)
if (y < y2) and (y >= 0) or (y > y2) and (y <= 0):
topx.append(x)
topy.append(y)
N_out = N - 2
else:
botx.append(x)
boty.append(y)
tot_top = len(topy)
tot_bot = len(boty)
ratio = N/N_out
lowx = np.min(botx)
highx = np.max(botx)
lowy = np.min(boty)
highy = np.max(boty)
xvalue = highx+lowx
yvalue = highy+lowy
area = xvalue*yvalue
result = area*ratio
return result, topx,topy,botx,boty
test2, toppx, toppy, bottx, botty = Probint2(1000000, -2*np.pi, 2*np.pi, 1, -1)
print("The estimated value for sin(x) is:", test2)
plt.scatter(toppx, toppy, s=2, color = 'purple')
plt.scatter(bottx, botty, s=2, color = 'pink')
plt.title("Distrubition of Values for Integral of sin(x)")
plt.ylim(-1,1)
plt.show()
#Here when looking at the calculated value from my function compared to the analytical value,
#my result is smaller by a couple orders of magnitude. It is close,but not quite there. I do not
#believe the function will be able to accurately calculate the integral of sin(x) from 0 to 10.
#With one million iterations alreasy, I do not know how feasible this method would be to precisely
#calculate the value. It is a good estimation though. But, the variation of the results for several runs,
#creates too much uncertainity. Some of the results are outside of pershaps two dtandard deviations.
#Now create a new function to calculate the integral of.
def Probint3(N, initial, final, up_limit):
topx = []
topy = []
botx = []
boty = []
N_out = N
for n in range(N):
x = rand.uniform(initial,final)
y = rand.uniform(initial,up_limit)
y2 = np.exp(x**3)+ (2*x**2)+3
if (y < y2) and (y >= 0) or (y > y2) and (y <= 0):
topx.append(x)
topy.append(y)
N_out = N - 2
else:
botx.append(x)
boty.append(y)
tot_top = len(topy)
tot_bot = len(botx)
ratio = N/N_out
area = up_limit*tot_bot
#area = xvalue*yvalue
result = area*ratio
return result, topx,topy,botx,boty
test3, tx, ty, bx, by = Probint3(1000000, -10, 10, 500)
print("The estimated value of (e**(x**3)+ 2x**2 + 3) is:", test3)
plt.scatter(tx, ty, s=2, color = 'pink')
plt.scatter(bx, by, s=2, color = 'blue')
plt.title("Distrubition of Values for Integral of (e**(x**3)+2x**2+3)")
plt.show()
```
|
{
"source": "JessicaHolm/gothello",
"score": 3
}
|
#### File: JessicaHolm/gothello/table.py
```python
import random
class Table(object):
# Create a array of 20 bits for all possible combinations of piece and position.
def __init__(self):
bit_table = [[0] * 2 for _ in range(25)]
for i in range(25):
for j in range(2):
bit_table[i][j] = random.getrandbits(20)
self.bit_table = bit_table
self.table = {}
# Use the Zobrist hash to get a hash of the board position.
def get_hash(self, grid):
h = 0
count = 0
for i in range(5):
for j in range(5):
if grid[i][j] != 0:
k = grid[i][j] - 1
h = h ^ self.bit_table[count][k]
count += 1
return h
# Check to see if a position is in the table.
def ttLookup(self, grid):
key = self.get_hash(grid)
if key in self.table:
return self.table[key]
else:
return Entry()
# Store a position indexed by its hash.
def ttStore(self, grid, ttEntry):
key = self.get_hash(grid)
self.table[key] = ttEntry
class Entry(object):
# Initalize values for a table entry.
def __init__(self, value = 0, flag = 0, depth = -1):
self.value = value
self.flag = flag
self.depth = depth
```
|
{
"source": "JessicaIsri/WebBot",
"score": 2
}
|
#### File: app/controllers/default.py
```python
from flask import render_template
from app import app
@app.route("/index")
@app.route("/")
def index():
return render_template("app.html")
@app.route("/quemsomos")
def quem_somos():
return render_template("quemsomos.html")
@app.route("/contato")
def contato():
return render_template("contato.html")
```
|
{
"source": "JessicaKaechele/Multi-Type-TD-TSR",
"score": 3
}
|
#### File: Multi-Type-TD-TSR/scripts/deskew.py
```python
import numpy as np
import cv2
import argparse
import os
import sys
class Image():
"""Image operations"""
def __init__(self, input_folder, file_name, output_folder) -> None:
self.input_image_path = f"{input_folder}/{file_name}"
self.output_image_path = f"{output_folder}/{file_name}"
self.__read_image()
self.__blur_image()
self.__threshold_image()
self.__get_coordinates()
def __read_image(self) -> None:
self.original_image = cv2.imread(self.input_image_path)
def __blur_image(self) -> None:
self.blur_image = cv2.medianBlur(self.original_image, 5) # MedianBlur used to remove black artefacts from the image
def __threshold_image(self) -> None:
self.grayscale_image = cv2.bitwise_not(cv2.cvtColor(self.original_image, cv2.COLOR_BGR2GRAY)) # Grayscale convesion to ensure foreground is white and background is black
self.threshold_image = cv2.threshold(
self.grayscale_image, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1] # Set all foreground pixels to 255 and background pixels to 0
def __get_coordinates(self) -> None:
self.coordinates = np.column_stack(np.where(self.threshold_image > 0)) # grab the (x, y) coordinates of all pixel values that are greater than zero and compute a bounding box
def deskew_image(self):
"""
Adapted from:
https://www.pyimagesearch.com/2017/02/20/text-skew-correction-opencv-python/
"""
angle = cv2.minAreaRect(self.coordinates)[-1] # the `cv2.minAreaRect` function returns values in the range [-90, 0); as the rectangle rotates clockwise the returned angle trends to 0 -- in this special case we need to add 90 degrees to the angle
if angle < -45:
angle = -(90 + angle)
else:
angle = -angle
# rotate the image to deskew it
height, width = self.blur_image.shape[:2]
center = (width // 2, height // 2)
rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
self.deskewed_image = cv2.warpAffine(
self.original_image, rotation_matrix, (width, height), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
def write_image(self) -> None:
cv2.imwrite(self.output_image_path, self.deskewed_image)
class Parser():
"""Defining and parsing command-line arguments"""
def __init__(self) -> None:
self.parser = argparse.ArgumentParser('deskew', 'python3 -m deskew --input input_folder --output output_folder', 'Deskew images')
self.__add_arguments()
def __add_arguments(self) -> None:
""" Add arguments to the parser """
self.parser.add_argument("--input", help="Input folder")
self.parser.add_argument("--output", help="Output folder")
return
def parse_arguments(self, args: list) -> argparse.Namespace:
""" Parse arguments """
if args:
return self.parser.parse_args(args)
else:
raise Exception
if __name__ == "__main__":
parser = Parser()
args = parser.parse_arguments(sys.argv[1:])
files = os.listdir(args.folder)
for file_ in files:
print(file_)
print(args.folder)
image = Image(args.input, file_, args.output)
image.deskew_image()
image.write_image()
```
|
{
"source": "jessicakimbril/data-science-medcab",
"score": 3
}
|
#### File: data-science-medcab/web_app/exp.py
```python
import csv
from flask import Flask, render_template
APP = Flask(__name__)
# homepage with full list of strains info
@APP.route("/")
def index():
with open('cannabis.csv') as csv_file:
data = csv.reader(csv_file, delimiter=',')
first_line = True
strains = []
for row in data:
if not first_line:
strains.append({
"strain": row[0],
"type": row[1],
"rating": row[2],
"effects": row[3],
"flavor": row[4],
"description": row[5]
})
else:
first_line = False
return render_template("base.html", strains=strains)
# command line code to run the app:
# FLASK_APP=exp.py flask run
```
#### File: web_app/routes/ParseURL.py
```python
from os import path
import re
import csv
from flask import Flask, Blueprint, render_template, jsonify
# Make Blueprint for __init__.py
ParseURL = Blueprint("ParseURL", __name__)
# Import Leafly csv
file_name = path.join(path.dirname(__file__), "Leafly.csv")
# Route to display dictionary list
@ParseURL.route("/request", methods=['GET', 'POST'])
def request():
'''
For loops the cannabis.csv file appending each row to a list.
Does not include the first line, since that is our headers in the csv file.
Returning that list.
'''
with open(file_name, encoding="utf-8") as csv_file:
data = csv.reader(csv_file, delimiter=',')
first_line = True
strains = []
for row in data:
if not first_line:
strains.append({
"strain": row[0],
"type": row[1],
"rating": row[2],
"effects": row[3],
"flavor": row[4],
"description": row[5]
})
else:
first_line = False
return jsonify(strains)
# Routes to display single dictionary list item as JSON object
@ParseURL.route('/<strain>', methods=['GET', 'POST'])
def strain_url(strain):
'''
Parameters: name of strain from database as a string.
For loops the cannabis.csv file, creating a dictionary.
Returning only the strain that was given as a parameter.
'''
with open(file_name, encoding="utf8") as csv_file:
data = csv.reader(csv_file, delimiter=',')
dict_strain = {}
for row in data:
if row[0] == strain:
dict_strain = {
"strain": row[0],
"type": row[1],
"rating": row[2],
"effects": row[3],
"flavor": row[4],
"description": row[5]
}
break
return jsonify(dict_strain)
# Route to display single dictionary list item via template
@ParseURL.route("/<strain>/strainmenu", methods=['GET', 'POST'])
def pretty_url(strain):
'''
Parameters: name of strain from database as a string.
For loops the cannabis.csv file appending each row to a list.
Returning only the strain that was given as a parameter.
'''
with open(file_name, encoding="utf8") as csv_file:
data = csv.reader(csv_file, delimiter=',')
strains = []
for row in data:
if row[0] == strain:
strains.append({
"strain": row[0],
"type": row[1],
"rating": row[2],
"effects": row[3],
"flavor": row[4],
"description": row[5]
})
break
return render_template("strainmenu.html", strains=strains)
```
#### File: data-science-medcab/wrangling/Leafly_csv_Flavors.py
```python
from os import path
import pandas as pd
# Import Leafly csv
file_name = path.join(path.dirname(__file__), "../data/Leafly.csv")
df = pd.read_csv(file_name)
df = df.dropna()
# Examine the Leafly csv data head
#print(df.head())
# First wrangle for unique flavor
# Check type of Flavor column data
print(type(df.Flavor[1])) # <class 'str'>
# Strip and split the Flavor column string data in order to get unique values
df.Flavor.str.strip('[]').str.split(',')
stripped_flavor = list(set([a for b in df.Flavor.str.strip('[]').str.split(',') for a in b]))
# Verify the Flavor column data had changed from string to set to list
print(type(stripped_flavor))
# Function to get unique values
def unique(flavor):
# Insert the list to the set
flavor_set = set(stripped_flavor)
# Convert the set to the list
unique_list_of_flavor = (list(flavor_set))
for x in unique_list_of_flavor:
print(x)
# Commented out as job is done, and on to second wrangle
print(unique(stripped_flavor))
# 47 Unique Flavors
# Plum
# Chestnut
# Earthy
# Apricot
# Sage
# Skunk
# Berry
# Strawberry
# Blue
# Tropical
# Blueberry
# Honey
# Coffee
# Tea
# Vanilla
# Peach
# Fruit
# Lime
# Menthol
# Tobacco
# Mint
# Cheese
# Lavender
# Grape
# Chemical
# Minty
# Pepper
# Ammonia
# Grapefruit
# Citrus
# Violet
# Pear
# Mango
# Diesel
# Lemon
# Nutty
# Spicy/Herbal
# Butter
# Woody
# Pine
# Apple
# Flowery
# Tar
# Sweet
# Orange
# Rose
# Pungent
# Pineapple
# Tree
## Second wrangle
#
## Make the stripped, split and replace in Flavor column persist (uses strip and split from Wrangle 1)
#
#df["Flavor"] = df["Flavor"].str.replace(","," ").astype(str)
#
## Check type after strip and split, which is <class 'pandas.core.series.Series'>
#
#print(type(df['Flavor']))
#
## Verify changes with printout to terminal
#
#print(df['Flavor'].head())
#
## Set pandas option to show all columns in final printout verification
#
#pd.set_option('display.max_columns', None)
#
#print(df.head())
#
## Export csv for testing in neural network baseline
#
#file_name = r"C:\Users\johnj\OneDrive\Documents\Lambda\BuildWeek3\data-science\Med_Cabinet\data\Leafly_nolistcommas.csv"
#
#df.to_csv(file_name, sep='\t', encoding='utf-8')
```
|
{
"source": "JessicaKwon0121/web-scraping-challenge",
"score": 3
}
|
#### File: JessicaKwon0121/web-scraping-challenge/app.py
```python
from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import scrape_mars
app = Flask(__name__)
mongo = PyMongo(app, uri="mongodb://localhost:27017/mars_app")
@app.route("/")
def home():
mars = mongo.db.mars.find_one()
return render_template("index.html", mars=mars)
@app.route("/scrape")
def scrape():
mars = mongo.db.mars
mars_data = scrape_mars.scrape_info()
mars.replace_one({}, mars_data, upsert=True)
return redirect("/")
if __name__ == "__main__":
app.run(port=5500, debug=True)
```
|
{
"source": "jessicalee127/DineCision",
"score": 3
}
|
#### File: DineCision/app/DineCision.py
```python
import argparse
import json
import pprint
import requests
import sys
import urllib
import random
import os
API_KEY = os.environ["DINECISION_API_KEY"]
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.parse import urlencode
# API constants, you shouldn't have to change these.
API_HOST = 'https://api.yelp.com'
SEARCH_PATH = '/v3/businesses/search'
BUSINESS_PATH = '/v3/businesses/' # Business ID will come after slash.
SEARCH_LIMIT = 5
def yelprequest(host, path, api_key, url_params=None):
url_params = url_params or {}
url = '{0}{1}'.format(host, quote(path.encode('utf8')))
headers = {
'Authorization': 'Bearer %s' % api_key,
}
print(u'Querying {0} ...'.format(url))
response = requests.request('GET', url, headers=headers, params=url_params)
return response.json()
def main():
location_input = input("Please enter the area you want to search for (e.g. 3 Times Square, New York City): ")
rating_input = input("Do you care about ratings (e.g. 4 or 4.5): ")
price_input = input("Do you care about price (e.g. 1 is the lowest, 4 is the highest): ")
url_params = {
'location': location_input.replace(' ', '+'),
'radius': 500,
'is_closed': "false",
'rating': rating_input,
'limit': SEARCH_LIMIT,
'categories': "restaurants, All",
'price': price_input
}
result = yelprequest(API_HOST, SEARCH_PATH, API_KEY, url_params)
business_list = result["businesses"]
random_business = random.choice(business_list)
print("Please go to " + random_business["name"] + " !")
Show_more = input("Do you want to learn more about it (y/n): ")
if Show_more == "y":
print(random_business["name"] + ", located at " + str(random_business["location"]['display_address'][0]) + ", " + str(random_business["location"]['state']) + " " + str(random_business["location"]['zip_code']))
else:
print("enjoy!")
if __name__ == '__main__':
main()
```
#### File: DineCision/app/ui.py
```python
from flask import Flask, request, render_template, flash, redirect, url_for
from wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField
from wtforms.validators import DataRequired
from app.DineCision import yelprequest
from flask_wtf import FlaskForm
import random
import json
import os
API_KEY = os.environ.get("DINECISION_API_KEY") or "Please obtain a Yelp API Key and set it as an environment variable named 'DINECISION_API_KEY'"
SECRET_KEY = os.environ.get("SECRET_KEY") or "my super secret"
# API constants, you shouldn't have to change these.
API_HOST = 'https://api.yelp.com'
SEARCH_PATH = '/v3/businesses/search'
BUSINESS_PATH = '/v3/businesses/'
app = Flask(__name__)
app.secret_key = SECRET_KEY
class NameForm(Form):
location = TextField('location:', validators=[validators.required()])
submit = SubmitField('Submit')
@app.route('/', methods=['GET', 'POST'])
def index():
location = None
form = NameForm(request.form)
if request.method == 'POST' and 'location' in request.form:
if form.validate():
return redirect(url_for('confirm', location=request.form.get('location')))
else:
flash("Please enter a location")
return redirect(url_for('error'))
return render_template('index.html')
@app.route('/error')
def error():
return render_template('error.html')
@app.route('/confirm/<location>')
def confirm(location):
random_business = yelp(location)
if random_business is None:
flash("Sadly there is no good restaurant to recommend in this location due to limited data, please choose another location")
return redirect(url_for('error'))
else:
return render_template('confirm.html', random_business=random_business['name'], where_is_it_0=random_business["location"]['display_address'][0], where_is_it_1=random_business["location"]['display_address'][1], number_review=random_business["review_count"], pic_url=random_business["image_url"])
def yelp(location_input):
url_params = {
'location': location_input.replace(' ', '+'),
'radius': 500,
'is_closed': "false",
'rating': 4,
'limit': 5,
'categories': "restaurants, All",
'price': 2
}
result = yelprequest(API_HOST, SEARCH_PATH, API_KEY, url_params)
business_list = result["businesses"]
try:
random_business = random.choice(business_list)
return random_business
except IndexError:
return None
if __name__ == "__main__":
app.debug = True
app.run()
```
|
{
"source": "jessicaleete/numerical_computing",
"score": 2
}
|
#### File: Labs/ARMA_Models/arma_solution.py
```python
from numpy import array, kron, eye, zeros, log, sqrt, inf, mean, std, allclose
from numpy.linalg import inv
from scipy.stats.distributions import norm
from scipy.optimize import fmin
import numpy as np
# note that this implementation is zero indexed (first time is t=0), although
# the lab is not (first time is t=1) to conform in style with the Kalman
# filter lab
# datasets
a = """17.0 16.6 16.3 16.1 17.1 16.9 16.8 17.4 17.1 17.0 16.7 17.4 17.2 17.4
17.4 17.0 17.3 17.2 17.4 16.8 17.1 17.4 17.4 17.5 17.4 17.6 17.4 17.3
17.0 17.8 17.5 18.1 17.5 17.4 17.4 17.1 17.6 17.7 17.4 17.8 17.6 17.5
16.5 17.8 17.3 17.3 17.1 17.4 16.9 17.3 17.6 16.9 16.7 16.8 16.8 17.2
16.8 17.6 17.2 16.6 17.1 16.9 16.6 18.0 17.2 17.3 17.0 16.9 17.3 16.8
17.3 17.4 17.7 16.8 16.9 17.0 16.9 17.0 16.6 16.7 16.8 16.7 16.4 16.5
16.4 16.6 16.5 16.7 16.4 16.4 16.2 16.4 16.3 16.4 17.0 16.9 17.1 17.1
16.7 16.9 16.5 17.2 16.4 17.0 17.0 16.7 16.2 16.6 16.9 16.5 16.6 16.6
17.0 17.1 17.1 16.7 16.8 16.3 16.6 16.8 16.9 17.1 16.8 17.0 17.2 17.3
17.2 17.3 17.2 17.2 17.5 16.9 16.9 16.9 17.0 16.5 16.7 16.8 16.7 16.7
16.6 16.5 17.0 16.7 16.7 16.9 17.4 17.1 17.0 16.8 17.2 17.2 17.4 17.2
16.9 16.8 17.0 17.4 17.2 17.2 17.1 17.1 17.1 17.4 17.2 16.9 16.9 17.0
16.7 16.9 17.3 17.8 17.8 17.6 17.5 17.0 16.9 17.1 17.2 17.4 17.5 17.9
17.0 17.0 17.0 17.2 17.3 17.4 17.4 17.0 18.0 18.2 17.6 17.8 17.7 17.2
17.4"""
b = """-3 -5 7 3 -3 4 16 14 -3 2 6 1 -2 -1 -6 -1 -11 9 4 -4 -5 -3 -1 1 -2 2 -4 4 -3 0
2 1 -2 -1 -1 0 -2 1 0 0 -9 1 1 4 0 -4 6 8 7 2 -1 0 -4 6 1 2 5 -1 2 -3 -3 1 5 4
9 -2 3 -4 -1 6 4 4 -4 4 8 16 4 -4 -6 4 4 -4 4 -2 -4 -3 -1 -7 -15 10 13 2 -4 3 0
6 6 6 -2 0 3 11 0 -2 6 5 4 1 5 9 4 -4 -3 -11 2 -2 -4 2 9 0 4 0 -1 3 0 -3 0 -3
-4 -6 -6 2 11 -4 -5 -1 5 -3 0 -4 0 1 2 6 -3 -2 -5 -5 4 0 -2 4 5 2 -5 -7 5 -6
-11 -11 7 1 6 1 3 -6 -2 -6 0 0 -6 3 -6 -3 -9 -7 17 13 3 -7 0 1 1 4 0 -9 -1 -2 0
-6 0 -4 2 -2 1 1 6 5 -2 1 1 1 4 0 -1 -1 -1 3 1 -3 0 -6 2 0 -10 2 -1 -5 -8 -12
-3 11 0 0 2 -7 -5 7 -1 3 -1 0 0 -1 -5 -14 -14 -31 8 11 9 4 -11 -16 -8 2 -7 9 -3
5 -8 1 -15 -20 -17 1 -38 22 10 -8 -25 4 1 5 4 -15 -24 -12 -17 27 -3 6 -8 -12 4
12 -12 13 11 -5 11 1 -1 -5 5 9 16 4 -3 6 -12 -5 2 5 1 -10 8 -2 7 11 0 -11 9 0
-5 -7 9 -5 -1 3 7 1 -1 3 5 -1 16 2 -2 -1 -15 -2 -3 8 -9 -4 5 -6 2 -2 1 0 5 7 -3
-6 -3 -6 -13 5 -14 -5 3 -13 10 -1 9 2 0 6 -7 -3 -1 12 -10 4 -6 -7 -5 -13 10 -1
-8 14 7 -6 6 5"""
c = """ 101 82 66 35 31 7 20 92 154 125
85 68 38 23 10 24 83 132 131 118
90 67 60 47 41 21 16 6 4 7
14 34 45 43 48 42 28 10 8 2
0 1 5 12 14 35 46 41 30 24
16 7 4 2 8 17 36 50 62 67
71 48 28 8 13 57 122 138 103 86
63 37 24 11 15 40 62 98 124 96
66 64 54 39 21 7 4 23 55 94
96 77 59 44 47 30 16 7 37 74"""
ta = np.fromstring(a, sep=' ',)
ta = ta.reshape((len(ta),1))
tb = np.fromstring(b, sep=' ')
tb = tb.reshape((len(tb),1))
tc = np.fromstring(c, sep=' ')
tc = tc.reshape((len(tc),1))
# fitted models
fit_a = (array([ 0.90867024]), array([-0.57585945]),
17.065262486340927, 0.31253098628150655)
fit_b = (array([ 0.2350456, -0.3839864, -0.6566961]),
array([-0.20234983, 0.41060419, 0.67314649]), -0.2853804404204241,
7.0334525375368138)
fit_c = (array([ 1.22481184, -0.56007884]), array([ 0.38466735]),
48.462278111207979, 14.622537558888457)
def arma_likelihood(time_series, phis=array([]), thetas=array([]), mu=0.,
sigma=1.):
"""
Return the log-likelihood of the ARMA model parameters, given the time
series.
Parameters
----------
time_series : ndarray of shape (n,1)
The time series in question
phis : ndarray of shape (p,)
The phi parameters
thetas : ndarray of shape (q,)
The theta parameters
mu : float
The parameter mu
sigma : float
The parameter sigma
Returns
-------
log_likelihood : float
The log-likelihood of the model
"""
F, Q, H, dim_states, dim_time_series = state_space_rep(phis, thetas, mu,
sigma)
mus, covs = kalman(F, Q, H, time_series - mu)
likelihood = 0.
for i in xrange(len(mus)):
cond_mu = H.dot(mus[i])
cond_sigma = H.dot(covs[i].dot(H.T))
likelihood += log(norm.pdf(time_series[i] - mu, loc=cond_mu,
scale=sqrt(cond_sigma)))
return float(likelihood)
def arma_fit(time_series):
"""
Return the ARMA model that minimizes AICc for the given time series,
subject to p,q <= 3.
Parameters
----------
time_series : ndarray of shape (n,1)
The time series in question
Returns
-------
phis : ndarray of shape (p,)
The phi parameters
thetas : ndarray of shape (q,)
The theta parameters
mu : float
The parameter mu
sigma : float
The parameter sigma
"""
best_aicc = inf
best_params = [], [], 0, 0
emp_mean = mean(time_series)
emp_sigma = std(time_series)
for p in range(4):
for q in range(4):
print "Optimizing for p={}, q={}".format(p, q)
x = array([0]*p + [0]*q + [emp_mean] + [emp_sigma])
def f(x):
return -1*arma_likelihood(time_series, phis=x[:p],
thetas=x[p:p+q], mu=x[-2], sigma=x[-1])
opt_x = fmin(f, x, maxiter=10000, maxfun=10000)
print "Optimal x {}".format(opt_x)
aicc = 2*len(opt_x)*(1 + (len(opt_x) + 1)/(len(time_series) - \
len(opt_x))) + 2*f(opt_x)
print "AICc {}".format(aicc)
if aicc < best_aicc:
print "New best model found with p={}, q={}".format(p, q)
best_aicc = aicc
best_params = opt_x[:p], opt_x[p:p+q], opt_x[-2], opt_x[-1]
return best_params
def arma_forecast(time_series, phis=array([]), thetas=array([]), mu=0.,
sigma=1., future_periods=20):
"""
Return forecasts for a time series modeled with the given ARMA model.
Parameters
----------
time_series : ndarray of shape (n,1)
The time series in question
phis : ndarray of shape (p,)
The phi parameters
thetas : ndarray of shape (q,)
The theta parameters
mu : float
The parameter mu
sigma : float
The parameter sigma
future_periods : int
The number of future periods to return
Returns
-------
evls : ndarray of shape (future_periods,)
The expected values of z for times n + 1, ..., n + future_periods
sigs : ndarray of shape (future_periods,)
The deviations of z for times n + 1, ..., n + future_periods
"""
F, Q, H, dim_states, dim_time_series = state_space_rep(phis, thetas, mu,
sigma)
mus, covs = kalman(F, Q, H, time_series - mu)
fut_covs = zeros((future_periods + 1, dim_states, dim_states))
fut_mus = zeros((future_periods + 1, dim_states))
evls = zeros(future_periods + 1)
sigs = zeros(future_periods + 1)
# forecast using Kalman filter
yk = (time_series[-1] - mu) - H.dot(mus[-1])
Sk = H.dot(covs[-1]).dot(H.T)
Kk = covs[-1].dot(H.T.dot(inv(Sk)))
fut_mus[0] = mus[-1] + Kk.dot(yk)
fut_covs[0] = (eye(covs[-1].shape[0]) - Kk.dot(H)).dot(covs[-1])
evls[0] = H.dot(fut_mus[0]) + mu
sigs[0] = H.dot(fut_covs[0]).dot(H.T)
for i in xrange(1, future_periods + 1):
fut_mus[i] = F.dot(fut_mus[i-1])
fut_covs[i] = F.dot(fut_covs[i-1]).dot(F.T) + Q
evls[i] = H.dot(fut_mus[i]) + mu
sigs[i] = sqrt(H.dot(fut_covs[i]).dot(H.T))
return evls[1:], sigs[1:]
def kalman(F, Q, H, time_series):
dim_time_series = time_series[0].shape[0]
dim_states = F.shape[0]
# covs[i] = P_{i | i-1}
covs = zeros((len(time_series), dim_states, dim_states))
mus = zeros((len(time_series), dim_states))
covs[0] = inv(eye(dim_states**2) - kron(F,F)).dot(Q.flatten()).reshape(
(dim_states,dim_states))
mus[0] = zeros((dim_states,))
for i in xrange(1, len(time_series)):
t1 = inv(H.dot(covs[i-1]).dot(H.T))
t2 = covs[i-1].dot(H.T.dot(t1.dot(H.dot(covs[i-1]))))
covs[i] = F.dot((covs[i-1] - t2).dot(F.T)) + Q
mus[i] = F.dot(mus[i-1]) + F.dot(covs[i-1].dot(H.T.dot(t1))).dot(
time_series[i-1] - H.dot(mus[i-1]))
return mus, covs
def state_space_rep(phis, thetas, mu, sigma):
dim_states = max(len(phis), len(thetas)+1)
dim_time_series = 1 #hardcoded for 1d time_series
F = zeros((dim_states,dim_states))
Q = zeros((dim_states, dim_states))
H = zeros((dim_time_series, dim_states))
F[0][:len(phis)] = phis
F[1:,:-1] = eye(dim_states - 1)
Q[0][0] = sigma**2
H[0][0] = 1.
H[0][1:len(thetas)+1] = thetas
return F, Q, H, dim_states, dim_time_series
def test(time_series_a):
"""Assert that statements made in lab examples are correct"""
assert allclose(arma_likelihood(time_series_a, array([0.9]), array([]),
17., 0.4), -77.603545, atol=1e-4)
phis, thetas, mu, sigma = fit_time_series_a
evls, sigs = arma_forecast(time_series_a, phis, thetas, mu, sigma,
future_periods=4)
assert allclose(evls, (17.3762, 17.3478, 17.322, 17.2986), atol=1e-4)
assert allclose(sigs, (0.3125, 0.3294, 0.3427, 0.3533), atol=1e-4)
fit_test = arma_fit(time_series_a)
for i in xrange(4):
assert allclose(fit_test[i], fit_time_series_a[i], atol=1e-4)
```
#### File: Labs/ArnoldiIteration/plots.py
```python
import numpy as np
from numpy.random import rand
from cmath import sqrt
from scipy.linalg import eig, inv
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
from matplotlib import pyplot as plt
from solutions import arnoldi
def arnoldi_convergence_plot(A, b, k, view_vals, filename):
difs = np.empty((view_vals, k))
A_eigs = eig(A, right=False)
A_eigs = A_eigs[np.absolute(A_eigs).argsort()[::-1]]
for i in xrange(1, k+1):
H = arnoldi(b, A.dot, i)
H_eigs = eig(H, right=False)
H_eigs = H_eigs[np.absolute(H_eigs).argsort()[::-1]]
difs[:min(view_vals, H_eigs.size),i-1] = np.absolute(H_eigs[:view_vals].real - A_eigs[:min(view_vals,H_eigs.size)].real)
X = np.arange(2, k+2)
difs[difs<1E-16] = 1E-16
for i in xrange(view_vals):
plt.semilogy(X[i:], difs[i,i:] / np.absolute(A_eigs[i].real))
plt.xlim((0, k))
plt.savefig(filename)
plt.clf()
if __name__=='__main__':
m = 500
X = rand(m, m)
A = np.zeros((m, m))
np.fill_diagonal(A, rand(m))
A[:] = X.dot(A).dot(inv(X))
b = rand(m)
arnoldi_convergence_plot(A, b, 300, 15, 'rand_eigs_conv.pdf')
arnoldi_convergence_plot(X, b, 200, 15, 'rand_vals_conv.pdf')
```
#### File: Labs/ComplexitySparseMatrices/solutions.py
```python
import numpy as np
from matplotlib import pyplot as plt
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sl
def Problem1():
# the students should have timed the code 4 times.
# their runtimes should be similar to what is below
runtimes = [8.95, 36.7, 144, 557]
inputs = [1000, 2000, 4000, 8000]
plt.plot(inputs, runtimes, 'go')
plt.show()
# now calculate the average ratio of successive runtimes
return ((36.7/8.95)+(144/36.7)+(557.0/144))/3.0
def Problem2(n):
# this solution imitates the example code given in the lab
return np.diagflat([-1]*(n-1), -1) + np.diagflat([2]*n, 0) + np.diagflat([-1]*(n-1),1)
def Problem3(n):
# build the diagonals
diags = np.array([[-1]*n,[2]*n,[-1]*n])
# create and return the sparse array
return sparse.spdiags(diags, [-1,0,1], n, n, format='csr')
def Problem4(n, sparse=False):
b = np.random.rand(n,1)
if sparse:
A = Problem3(n)
return sl.spsolve(A,b)
else:
A = Problem2(n)
return linalg.solve(A,b)
def Problem5(n):
A = Problem3(n)
eig = sl.eigs(A.asfptype(), k=1, which="SM")[0].min()
return eig*(n**2)
#Problem 6
A = np.random.rand(500,500)
b = np.random.rand(500)
%timeit A.dot(b)
B = sparse.csc_matrix(A)
%timeit B.dot(b)
```
#### File: Labs/ComplexNumbers/complex_intro_solutions_template.py
```python
import numpy as np
def plot_real(f, xbounds=(-1, 1), ybounds=(-1, 1), res=401):
""" Make a surface plot of the real part
of the function 'f' given the bounds and resolution. """
pass
def plot_poly_imag(f, xbounds=(-1, 1), ybounds=(-1, 1), res=401):
""" Plot the imaginary part of the function 'f'
given the bounds and resolution. """
pass
def plot_poly_both(f, xbounds=(-1, 1), ybounds=(-1, 1), res=401):
""" Plot the real and imaginary parts of
the function 'f', given the bounds and resolution. """
pass
def nroot_real(n, res=401):
""" Plot the Riemann surface for the real part
of the n'th root function. """
pass
def nroot_imag(n, res=401):
""" Plot the Riemann surface for the imaginary part
of the n'th root function. """
pass
def contour_int(f, c, t0, t1):
""" Evaluate the integral of the function 'f'
parameterized by the function 'c' with initial
and final parameter values 't0' and 't1'. """
pass
def cauchy_formula(f, c, t0, t1):
""" Compute the integral in Cauchy's Integral formula.
'f' is a callable function parameterized by the contour 'c'.
't0' and 't1' are the initial and final parameter values.
This should return a callable function that evaluates the
integral in the function at any given `z0`. """
pass
```
#### File: Labs/CorrelationCovariance/solutions.py
```python
import numpy as np
from matplotlib import pyplot as plt
def shiftByMean(A):
'''
Shift the columns of the input array by their respective means.
Inputs:
A -- an (m,n) array
Return:
a (m,n) array whose columns are the mean-shifted counterparts to A.
'''
return A - A.mean(axis=0)
def computeVariance(A):
'''
Calculate the variance of each column of the input array.
Inputs:
A -- an (m,n) array, not necessarily mean-shifted.
Return:
a 1-d array with n entries, each giving the variance of the corresponding column.
'''
return (shiftByMean(A)**2).sum(axis=0)/np.float(A.shape[0])
def reportStDev():
'''
Print the answer to the question in problem 1.
You may also include the necessary code to import and process the data, but this
need not necessarily be in the body of this function.
'''
data = np.genfromtxt('weight_age_fat.txt', skip_header=1, usecols=[2,3,4])
col_titles = ['weight', 'age', 'blood fat content']
stds = np.sqrt(computeVariance(data))
ind = np.argmin(stds)
min_std = np.min(stds)
print col_titles[ind], min_std
def corrMatrix(W):
'''
Compute the correlation matrix for the columns of input array W
'''
X = W - W.mean(axis=0)
Y = X/np.sqrt((X**2).sum(axis=0))
return np.dot(Y.T, Y)
def reportCorr():
'''
Print the answers to the questions in problem 3. Ther are three distinct questions,
so there should be three distinct print statements, indicating which question is
being answered. Report the columns as their number in the original data file. The first
column, which we omitted, would be column 0, and the last column (giving the mortality)
would be column 16.
Finally, plot the data as described in the problem statement.
'''
data = np.genfromtxt('mortality.txt', skip_header=17, usecols=range(1,17))
corr_mat = corrMatrix(data)
# find the column whose correlation with the last column is closest to zero
min_col = np.argmin(np.abs(corr_mat[:,-1]))
print "The column most nearly uncorrelated with mortality rate is", min_col + 1
# now find the pair of distinct columns with highest correlation.
# set diagonals to zero, since we want to consider only distinct columns
np.fill_diagonal(corr_mat,0)
max_rows = corr_mat.max(axis=1)
max_row = np.argmax(max_rows)
max_col = np.argmax(corr_mat[max_row,:])
print "The pair of columns with largest correlation is", (max_row+1,max_col+1)
# find column with highest correlation to mortality
min_mort = np.argmin(corr_mat[:,-1])
print "The column that is most negatively correlated with mortality is", min_mort + 1
plt.subplot(311)
plt.scatter(data[:,max_row], data[:, max_col])
plt.subplot(312)
plt.scatter(data[:,min_mort], data[:,-1])
plt.subplot(313)
plt.scatter(data[:,min_col], data[:,-1])
plt.show()
plt.clf()
def covariance():
data = np.genfromtxt('mortality.txt', skip_header=17, usecols=range(1,17))
return np.cov(data, rowvar=0)
```
#### File: Labs/FFT/fft_solutions.py
```python
import scipy as sp
import numpy as np
from scipy.fftpack import fft, ifft
from scipy.io import wavfile
from matplotlib import pyplot as plt
from pyfftw.interfaces import scipy_fftpack as fftw
#=============================================================================
# PROBLEM 1 - Plotting signals, create simple sine signal
#=============================================================================
def plot_signal(filename='pulseramp.wav', verbose=False):
"""Plots the signal of any given .wav file.
Parameters
----------
filename : string, optional
The name of the .wav sound file to be plotted.
Defaults to 'pulseramp.wav'.
verbose : boolean, optional
If True, prints out basic information about the signal.
Defaults to False.
Returns
-------
None
"""
rate, mywave = wavfile.read(filename)
if verbose:
print "file:\t" + filename
print "rate:\t" + str(rate)
print "length:\t" + str(len(mywave))
plt.plot(mywave)
plt.title(filename)
plt.show()
def prob1(freq=60, length=1):
"""Generates a sine wave, saves it as a .wav file, and uses plot_signal()
to plot the signal.
Parameters
----------
freq : integer, optional
The fequency of the sine wave. Defaults to 60.
length : integer, optional
The number of seconds the sine wave lasts. Defaults to 1.
Returns
-------
None
"""
samplerate = 44100
stepsize = freq*2*sp.pi/samplerate
signal = sp.sin(sp.arange(0, stepsize*length*samplerate, stepsize))
scaled_signal = sp.int16(signal/sp.absolute(signal).max() * 32767)
wavfile.write('problem1.wav', samplerate, scaled_signal)
plot_signal('problem1.wav')
#=============================================================================
# PROBLEM 2 - Naive DFT
#=============================================================================
def prob2(vec, verbose=False):
"""A naive implementation of the Discrete Fourier Transform.
Parameters
----------
vec : array_like
The 1 x N-1 vector [f(0),f(1),...,f(N-1)].
verbose : boolean, optional
If True, prints out whether or not the DFT was successful,
comparing with scipy.fft(). Defaults to False.
Returns
-------
c : array_like
The 1 x N-1 vector of the DFT of 'vec'.
"""
vec = sp.array(vec, dtype=sp.complex128)
N = len(vec)
c = sp.zeros(N, dtype=sp.complex128)
for k in xrange(N):
c[k] = 1./N*sp.sum(sp.exp((-2*sp.pi*1j*k*sp.arange(N))/N)*vec)
#c[k] = (vec * sp.exp(-2*sp.pi*1j*k*sp.arange(N)/N)).sum()
if verbose:
if sp.allclose(sp.fft(vec)/float(N), c): print "Success!"
else: print "Failure!"
return c
#=============================================================================
# PROBLEM 3
#=============================================================================
def prob3(filename='pianoclip.wav'):
"""Plots the spectrum of a given .wav file, then calculates the location
and value of the largest spike. For the default value, the exact value is
742.281519994 Hz (f#5 + 5 cents)
Parameters
----------
filename: string, optional
The name of the .wav sound file to be examined.
Defaults to 'pianoclip.wav'.
Returns
-------
None
"""
plot_signal(filename)
rate, signal = wavfile.read(filename)
signal = sp.float32(signal)
fsignal = sp.absolute(fftw.fft(signal.T).T)
# Use if scipy_fftpack is unavailable
#fsignal = sp.absolute(sp.fft(signal, axis=0))
plt.plot(fsignal[0:fsignal.shape[0]/2])
plt.title("Spectrum of " + filename)
plt.show()
loc = fsignal[1:].argmax()
val = fsignal[1:].max()
print "\nSpike location:\t" + str(loc)
print "Spike value:\t" + str(val)
print "Hz:\t\t" + str(float(loc*rate)/signal.shape[0])
#==============================================================================
# Problem 4
#==============================================================================
def prob4(filename='saw.wav', new_rate = 11025, outfile='prob4.wav'):
"""Down-samples a given .wav file to a new rate and saves the resulting
signal as another .wav file.
Parameters
----------
filename : string, optional
The name of the .wav sound file to be down-sampled.
Defaults to 'saw.wav'.
new_rate : integer, optional
The down-sampled rate. Defaults to 11025.
outfile : string, optional
The name of the new file. Defaults to prob4.wav.
Returns
-------
None
"""
old_rate, in_sig = wavfile.read(filename)
fin = fftw.fft(sp.float32(in_sig))
# Use if scipy_fftpack is unavailable
# fin = sp.fft(sp.float32(in_sig))
nsiz = sp.floor(in_sig.size * new_rate / old_rate)
nsizh = sp.floor(nsiz / 2)
fout = sp.zeros(nsiz) + 0j
fout[0:nsizh] = fin[0:nsizh]
fout[nsiz-nsizh+1:] = sp.conj(sp.flipud(fout[1:nsizh]))
out = sp.real(sp.ifft(fout))
out = sp.int16(out/sp.absolute(out).max() * 32767)
plot_signal(filename)
wavfile.write('prob4.wav',new_rate,out)
print ""; plot_signal('prob4.wav')
#===============================================================================
# Problem 5
#==============================================================================
def prob5():
"""Try changing the sampling rate of saw.wav to something other than an
integer factor (36000 Hz).
"""
prob4('saw.wav', 36000, 'prob5.wav')
#===============================================================================
```
#### File: Labs/FiniteDifferenceMethod/plotting.py
```python
from __future__ import division
# import matplotlib
# matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib import cm
import matplotlib.colors as mcolors
import numpy as np
from scipy.sparse import spdiags
from scipy.sparse.linalg import spsolve
from solution import fd_order2_ode, approx_order
from numpy import cos, sin
from math import pi
from scikits import bvp_solver
def prob1():
def u(x):
arg = (x + pi)**2. -1.
return sin(arg)
def up(x):
arg = (x + pi)**2. -1.
return 2.*(x + pi)*cos(arg)
def upp(x):
arg = (x + pi)**2. -1.
return 2.*cos(arg) - (2*(x + pi))**2.*sin(arg)
I = [0.,1.]
N = 5
h = (I[1]-I[0])/N
x = np.linspace(I[0],I[1],N+1)
D,diags = np.ones((1,N-1)), np.array([0,-1,1])
data = np.concatenate((-2.*D,1.*D,1.*D),axis=0) # This stacks up rows
M2 = h**(-2.)*spdiags(data,diags,N-1,N-1).asformat('csr')
ans2 = M2.dot(u(x[1:-1]))
ans2[0] += u(I[0])*h**(-2.) # - (2.*h)**(-1.) )
ans2[-1] += u(I[1])*h**(-2.) #+ (2.*h)**(-1.) )
D,diags = np.ones((1,N-1)), np.array([-1,1])
data = np.concatenate((-1.*D,1.*D),axis=0) # This stacks up rows
M1 = (2.*h)**(-1.)*spdiags(data,diags,N-1,N-1).asformat('csr')
ans1 = M1.dot(u(x[1:-1]))
ans1[0] += -u(I[0])*(2.*h)**(-1.)
ans1[-1] += u(I[1])*(2.*h)**(-1.)
soln = (.5*upp(x) - up(x))[1:-1]
approx = (.5*ans2 - ans1)
print np.max(np.abs(soln-approx))
plt.plot(x[1:-1],soln,'-k',linewidth=1.5)
plt.plot(x[1:-1],approx,'*r',markersize=5.)
plt.show()
return
def prob2():
def bvp(epsilon, subintervals):
# for figure2.pdf
X,Y = fd_order2_ode(func=lambda x:-1.,a1=lambda x:epsilon,
a2=lambda x:-1.,a3=lambda x:0.,
a=0.,b=1., alpha=1.,beta=3.,N=subintervals)
return X,Y
def AnalyticSolution(x,alpha, beta,epsilon):
out = alpha+x+(beta-alpha-1.)*(np.exp(x/epsilon) -1.)/(np.exp(1./epsilon) -1.)
return out
eps, subintervals = 0.1, 20
X,Y = bvp(eps, subintervals)
plt.plot(X,Y,'-k',mfc="None",linewidth=2.0)
plt.ylabel('$y$',fontsize=16)
plt.xlabel('$x$',fontsize=16)
# plt.axis([-.1,1.1,1-.1,3+.1])
# plt.savefig('figure2.pdf')
plt.show()
plt.clf()
num_approx = 6 # Number of Approximations
N = 2560*np.array([2**j for j in range(num_approx)])
approx_order(num_approx,N,bvp,eps)
return
def prob3():
def bvp(epsilon, subintervals):
# for figure3.pdf
X,Y = fd_order2_ode(func=lambda x:np.cos(x),a1=lambda x:epsilon,
a2=lambda x: 0.,a3=lambda x:-4.*(np.pi-x**2.),
a=0.,b=np.pi/2., alpha=0.,beta=1.,N=subintervals)
return X,Y
eps, subintervals = 0.1, 400
X,Y = bvp(eps, subintervals)
plt.plot(X,Y,'-k',mfc="None",linewidth=2.0)
plt.ylabel('$y$',fontsize=16)
plt.xlabel('$x$',fontsize=16)
# plt.axis([-.1,np.pi/2.+.1,-.1,1.5])
# plt.savefig('figure3.pdf')
plt.show()
plt.clf()
num_approx = 6 # Number of Approximations
N = 2560*np.array([2**j for j in range(num_approx)])
approx_order(num_approx,N,bvp,eps)
return
def prob4():
def bvp(epsilon, subintervals):
def g(x):
out = -epsilon*pi**2.*cos(pi*x) - pi*x*sin(pi*x)
return out
X,Y = fd_order2_ode(func=g,a1=lambda x:epsilon,
a2=lambda x: x,a3=lambda x:0.,
a=-1.,b=1., alpha=-2.,beta=0.,N=subintervals)
return X,Y
eps, subintervals = 0.1, 20
X,Y = bvp(eps, subintervals)
plt.plot(X,Y,'-k',mfc="None",linewidth=2.0)
import sys; sys.exit()
eps, subintervals = 0.01, 400
X,Y = bvp(eps, subintervals)
plt.plot(X,Y,'-k',mfc="None",linewidth=2.0)
eps, subintervals = 0.001, 400
X,Y = bvp(eps, subintervals)
plt.plot(X,Y,'-k',mfc="None",linewidth=2.0)
plt.ylabel('$y$',fontsize=16)
plt.xlabel('$x$',fontsize=16)
# plt.savefig('figure4.pdf')
plt.show()
plt.clf()
num_approx = 6 # Number of Approximations
N = 2560*np.array([2**j for j in range(num_approx)])
approx_order(num_approx,N,bvp,eps)
return
def prob5():
def bvp(epsilon, subintervals):
# X,Y = fd_order2_ode(func=lambda x: 0.,a1=lambda x:1.,
# a2=lambda x: 4.*x/(epsilon+x**2.),a3=lambda x:2./(epsilon+x**2.),
# a=-1.,b=1., alpha=1./(1.+epsilon),
# beta=1./(1.+epsilon),N=subintervals)
X,Y = fd_order2_ode(func=lambda x: 0.,a1=lambda x:(epsilon+x**2.),
a2=lambda x: 4.*x,a3=lambda x:2.,
a=-1.,b=1., alpha=1./(1.+epsilon),
beta=1./(1.+epsilon),N=subintervals)
return X,Y
eps, subintervals = 0.01, 100
X,Y = bvp(eps, subintervals)
plt.plot(X,Y,'-k',mfc="None",linewidth=2.0)
# eps, subintervals = 0.02, 200
# X,Y = bvp(eps, subintervals)
# plt.plot(X,Y,'-k',mfc="None",linewidth=2.0)
plt.ylabel('$y$',fontsize=16)
plt.xlabel('$x$',fontsize=16)
# plt.savefig('figure5.pdf')
plt.show()
plt.clf()
# import sys; sys.exit()
num_approx = 5 # Number of Approximations
N = 2560*np.array([2**j for j in range(num_approx)])
approx_order(num_approx,N,bvp,eps)
return
def prob5_again():
"""
Using scikits.bvp_solver to solve the bvp
"""
epsilon = .05
lbc, rbc = 1./(1.+epsilon), 1./(1.+epsilon)
def function1(x , y):
return np.array([y[1] , -4.*x/(epsilon+x**2.)*y[1]-2./(epsilon+x**2.)*y[0] ])
def boundary_conditions(ya,yb):
return (np.array([ya[0] - lbc]),
np.array([yb[0] - rbc]))
problem = bvp_solver.ProblemDefinition(num_ODE = 2,
num_parameters = 0,
num_left_boundary_conditions = 1,
boundary_points = (-1, 1),
function = function1,
boundary_conditions = boundary_conditions)
solution = bvp_solver.solve(problem,
solution_guess = (1./(1.+epsilon),
0.))
A = np.linspace(-1.,1., 200)
T = solution(A)
plt.plot(A, T[0,:],'-k',linewidth=2.0)
plt.show()
plt.clf()
return
def prob3_again():
"""
Using scikits.bvp_solver to solve the bvp
"""
epsilon = .1
lbc, rbc = 0., 1.
def function1(x , y):
return np.array([y[1] , (4./epsilon)*(pi-x**2.)*y[0] + 1./epsilon*cos(x) ])
def boundary_conditions(ya,yb):
return (np.array([ya[0] - lbc]),
np.array([yb[0] - rbc]))
problem = bvp_solver.ProblemDefinition(num_ODE = 2,
num_parameters = 0,
num_left_boundary_conditions = 1,
boundary_points = (0., pi/2.),
function = function1,
boundary_conditions = boundary_conditions)
solution = bvp_solver.solve(problem,
solution_guess = (1.,
0.))
A = np.linspace(0.,pi/2., 200)
T = solution(A)
plt.plot(A, T[0,:],'-k',linewidth=2.0)
plt.show()
plt.clf()
return
# prob1()
# prob2()
# prob3()
# prob4() # Profile correct; taken from 32 test problems for analysis
prob5()
# A check that fd_order2_ode works for problems 3 and 5.
# Uses bvp_solver from scikits
# prob5_again()
# prob3_again()
```
#### File: Labs/FiniteVolume/solution.py
```python
from __future__ import division
import numpy as np
from matplotlib import animation, rcParams
from matplotlib import pyplot as plt
from matplotlib.artist import setp
plt.switch_backend('tkagg')
rcParams['figure.figsize'] = 12, 8.5
from matplotlib.pyplot import Line2D
import time
a = 0
def math_animation(Data,time_steps,view,wait):
X,Array,Constant = Data
fig = plt.figure()
ax = plt.axes(xlim=tuple(view[0:2]), ylim=tuple(view[2:]) )
line, = ax.plot([], [], lw=2.6,c='k')
lines = [line]
lines.append(ax.plot([], [], lw=2.6,c='r')[0])
def initialize_background():
if Constant==None:
lines[0].set_data([], [])
else:
lines[0].set_data(X, Constant)
# line.set_data([], [])
# line += ax.plot(X, Constant, '-', c='k')
return lines
def animate_function(i):
global a
if i==0:
time.sleep(.3)
if a<time_steps:
lines[1].set_data(X, Array[i,:])
setp(lines[1], linewidth=2.6, color='k')
else:
lines[1].set_data(X, Array[-1,:])
a+=1
return lines
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate_function, init_func=initialize_background,
frames=time_steps, interval=wait)#, blit=True)
# frames must be a generator, an iterable, or a number of frames
# Draws a new frame every interval milliseconds.
plt.show()
return
```
#### File: Labs/FloatingPointIEEE/plots.py
```python
import numpy as np
from numpy.random import rand
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
from matplotlib import pyplot as plt
from solutions import sqrt64
def invsqrt64(A, reps):
Ac = A.copy()
if 0 < reps:
Ac2 = A.copy()
Ac2 /= - 2
Ac3 = np.empty_like(Ac)
I = Ac.view(dtype=np.int64)
I >>= 1
I *= -1
I += 0x5fe6ec85e7de30da #hexadecimal representation of the constant
for j in xrange(reps):
Ac3[:] = Ac
Ac3 *= Ac
Ac3 *= Ac2
Ac3 += 1.5
Ac *= Ac3
return Ac
def sqrt0():
X = np.linspace(0, 3, 501)
plt.plot(X, sqrt64(X, 0), X, np.sqrt(X))
plt.savefig("sqrt0.pdf")
plt.clf()
def sqrt1():
X = np.linspace(0, 3, 501)
plt.plot(X, sqrt64(X, 1), X, np.sqrt(X))
plt.savefig("sqrt1.pdf")
plt.clf()
def invsqrt0():
X = np.linspace(.1, 3, 291)
plt.plot(X, invsqrt64(X, 0), X, 1./np.sqrt(X))
plt.savefig("invsqrt0.pdf")
plt.clf()
def invsqrt1():
X = np.linspace(.1, 3, 291)
plt.plot(X, invsqrt64(X, 1), X, 1./np.sqrt(X))
plt.savefig("invsqrt1.pdf")
plt.clf()
if __name__ == "__main__":
sqrt0()
sqrt1()
invsqrt0()
invsqrt1()
```
#### File: Labs/FourierExtensions/helperCode.py
```python
import numpy as np
def hamming(n):
"""
Generate a hamming window of n points as a numpy array.
"""
return 0.54 - 0.46 * np.cos(2 * np.pi / n * (np.arange(n) + 0.5))
def melfb(p, n, fs):
"""
Return a Mel filterbank matrix as a numpy array.
Inputs:
p: number of filters in the filterbank
n: length of fft
fs: sample rate in Hz
Returns:
M: a Mel filterbank matrix satisfying the inputs
Ref. http://www.ifp.illinois.edu/~minhdo/teaching/speaker_recognition/code/melfb.m
"""
f0 = 700.0 / fs
fn2 = int(np.floor(n/2))
lr = np.log(1 + 0.5/f0) / (p+1)
CF = fs * f0 * (np.exp(np.arange(1, p+1) * lr) - 1)
bl = n * f0 * (np.exp(np.array([0, 1, p, p+1]) * lr) - 1)
b1 = int(np.floor(bl[0])) + 1
b2 = int(np.ceil(bl[1]))
b3 = int(np.floor(bl[2]))
b4 = min(fn2, int(np.ceil(bl[3]))) - 1
pf = np.log(1 + np.arange(b1,b4+1) / f0 / n) / lr
fp = np.floor(pf)
pm = pf - fp
M = np.zeros((p, 1+fn2))
for c in np.arange(b2-1,b4):
r = fp[c] - 1
M[r,c+1] += 2 * (1 - pm[c])
for c in np.arange(b3):
r = fp[c]
M[r,c+1] += 2 * pm[c]
return M
```
#### File: Labs/GaussianQuadrature/quadrature_solutions_template.py
```python
def shift_function(f, a, b, n=401):
""" 'f' is a callable funciton, 'a' and 'b' are
the limits of the interval you want to consider."""
pass
# plotting for the example in the function shifting problem
def funcplot(f, a, b, n=401):
""" Constructs and plots the example given in the
problem on shifting the domain of a function to [-1, 1].
'n' is the number of points to use to generate the plot."""
pass
# example in the function shifting problem
def shift_example(n=401):
""" Plot the example given in the function shifting problem."""
pass
# integral estimation problem
def estimate_integral(f, a, b, points, weights):
""" Estimate the value of an integral given
the function 'f', the interval bounds 'a' and 'b',
the nodes to use for sampling, and their
corresponding weights."""
pass
# Jacobi construction problem
def construct_jacobi(a, b, c):
""" Construct the Jacobi matrix given the
sequences 'a', 'b', and 'c' from the
three term recurrence relation."""
pass
# points and weights problem
def points_and_weights(n):
""" Find the set of 'n' nodes and their
corresponding weights for the interval [-1, 1]."""
pass
# normal distribution cdf problem
def normal_cdf(x):
"""Compute the CDF of the standard normal
distribution at the point 'x'."""
pass
```
#### File: Labs/HeatFlow/solution.py
```python
from __future__ import division
import numpy as np
from matplotlib import animation
from matplotlib import cm
from matplotlib import pyplot as plt
from matplotlib.artist import setp
plt.switch_backend('tkagg')
from mpl_toolkits.mplot3d.axes3d import Axes3D
from scipy.sparse import spdiags, coo_matrix, bmat, identity
from scipy.sparse.linalg import spsolve
def heatexplicit(init_conditions,x_subintervals,t_subintervals,x_interval=[-10,10],T=1.,flag3d="off",nu=1.):
'''
Parameters
nu: diffusive constant
L, T: Solve on the Cartesian rectangle (x,t) in x_interval x [0,T]
x_subintervals: Number of subintervals in spatial dimension
t_subintervals: Number of subintervals in time dimension
CFL condition: nu delta_t/delta_x**2 <= 1/2. Where delta_t = T/t_subintervals,
delta_x = (b-a)/x_subintervals,
a, b = x_interval[0], x_interval[1]
In terms of our constants, this means that t_subintervals >=(2.*nu)*(T*x_subintervals**2/(b-a)**2.) or
equivalently that (nu/4.)*(T/L**2.)*(x_subintervals**2./t_subintervals) <= 1/2
'''
a, b = x_interval[0], x_interval[1]
delta_x, delta_t = (b-a)/x_subintervals, T/t_subintervals
if nu*delta_t/delta_x**2 > 1/2:
print "The CFL condition is not satisfied"
print "Must have nu*delta_t/delta_x**2 <= 1/2, i.e."+str(nu*delta_t/delta_x**2) + "<= 1/2"
print "Recommend t_subintervals = "+str(t_subintervals)+">" +str((2.*nu)*(T*x_subintervals**2/(b-a)**2.))
K = nu*delta_t/delta_x**2.
# print str(J)+" subintervals of Space domain.\n"
# print str(int(N))+" subintervals of Time domain."
D0,D1,diags = (1.-2.*K)*np.ones((1,(x_subintervals-1))), K*np.ones((1,(x_subintervals-1))), np.array([0,-1,1])
data = np.concatenate((D0,D1,D1),axis=0) # This stacks up rows
A=spdiags(data,diags,(x_subintervals-1),(x_subintervals-1)).asformat('csr')
U = np.zeros((x_subintervals+1,t_subintervals+1)) #[:,0:-1]
U[:,0] = init_conditions #np.maximum(1.-np.linspace(a,b,J+1)**2,0.) # Initial Conditions
for j in range(0,int(t_subintervals)+1):
if j>0: U[1:-1,j] = A*U[1:-1,j-1]
return np.linspace(a,b,x_subintervals+1), U
def heat_Crank_Nicolson(init_conditions,x_subintervals,t_subintervals,x_interval=[-10,10],T=1.,flag3d="off",nu=1.):
'''
Parameters
nu: diffusive constant
L, T: Solve on the Cartesian rectangle (x,t) in x_interval x [0,T]
x_subintervals: Number of subintervals in spatial dimension
t_subintervals: Number of subintervals in time dimension
a, b = x_interval[0], x_interval[1]
'''
a, b = x_interval[0], x_interval[1]
delta_x, delta_t = (b-a)/x_subintervals, T/t_subintervals
K = .5*nu*delta_t/delta_x**2.
D0,D1,diags = (1-2.*K)*np.ones((1,(x_subintervals-1))), K*np.ones((1,(x_subintervals-1))), np.array([0,-1,1])
data = np.concatenate((D0,D1,D1),axis=0) # This stacks up rows
A=spdiags(data,diags,(x_subintervals-1),(x_subintervals-1)).asformat('csr')
# print K
# print A.todense()
D0,D1,diags = (1.+2.*K)*np.ones((1,(x_subintervals-1))), -K*np.ones((1,(x_subintervals-1))), np.array([0,-1,1])
data = np.concatenate((D0,D1,D1),axis=0) # This stacks up rows
B=spdiags(data,diags,(x_subintervals-1),(x_subintervals-1)).asformat('csr')
U = np.zeros((x_subintervals+1,t_subintervals+1))
U[:,0] = init_conditions
for j in range(0,int(t_subintervals)+1):
if j>0: U[1:-1,j] = spsolve(B,A*U[1:-1,j-1] )
return np.linspace(a,b,x_subintervals+1), U
a = 0
def math_animation(Data,time_steps,view,interval_length):
X,Array = Data
fig = plt.figure()
ax = plt.axes(xlim=tuple(view[0:2]), ylim=tuple(view[2:]) )
line, = ax.plot([], [], lw=2)
def initialize_background():
line.set_data([], [])
return line,
def animate_function(i):
global a
if a<time_steps:
line.set_data(X, Array[i,:])
setp(line, linewidth=2, color='k')
# line = Line2D(X, Array[:,i], color='red', linewidth=2)
else:
line.set_data(X, Array[-1,:])
a+=1
return line
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate_function, init_func=initialize_background,
frames=time_steps, interval=interval_length)#, blit=True)
# frames must be a generator, an iterable, or a number of frames
# Draws a new frame every interval milliseconds.
plt.show()
# Solving u_t = nu u_{xx}, t \in [0,T], x \in [-L, L]
# import numpy as np
# from solution import heatexplicit, math_animation
# import matplotlib.pyplot as plt
# from matplotlib import cm
def plot3d():
T, L, J, nu = 1., 10., 320, 1.
flag3d = "3d_plot"
X,U = heatexplicit(J,nu,L,T,flag3d)
N = U.shape[1]-1
# #Produce 3D plot of solution
xv,tv = np.meshgrid(np.linspace(-L,L,J/4+1), np.linspace(0,T,N/8+1))
Z = U[::4,::8].T
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(tv, xv, Z, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.set_xlabel('Y'); ax.set_ylabel('X'); ax.set_zlabel('Z')
plt.show()
def animate_heat1d():
x_subintervals, t_subintervals = 20,1600
x_interval,t_final = [0,1],2
init_conditions = np.sin(2.*np.pi*(np.linspace(0,1,x_subintervals+1)))
Data = heatexplicit(init_conditions,x_subintervals,t_subintervals,x_interval,T=t_final,flag3d="on",nu=1.)
Data = Data[0], Data[1].T[0:-1,:]
time_steps, view = Data[1].shape[0], [-.1, 1.1,-1.1, 1.1]
interval=40
math_animation(Data,time_steps,view,interval)
def plot_error():
# Compare solutions for various discretizations
X, U, L = [], [], 10.
List, LineStyles = [40,80,160,320,640,1280,2560], ['--k','-.k',':k','-k','-k','-k']
for points in List:
x,u = heatexplicit(J=points,nu=1.,L=10.,T=1.,flag3d="off")
X.append(x);
U.append(u[:,-1])
del x, u
print points
# # Plot of solutions for varying discretizations
# for j in range(0,len(LineStyles)): plt.plot(X[j],U[j],LineStyles[j],linewidth=1)
# plt.axis([-10,10,-.1,.4])
# plt.show()
# plt.clf()
h = [2.*L/item for item in List[:-1]]
def graph(X,U,List,h):
# # Error Chart 1: Plot approximate error for each solution
# for j in range(0,len(List)-1): plt.plot(X[j],abs(U[-1][::2**(6-j)]-U[j]),'-k',linewidth=1.2)
for j in range(0,len(List)-1): plt.plot(X[j],abs(U[-1][::List[-1]/List[j]]-U[j]),'-k',linewidth=1.2)
plt.savefig("ApproximateError.pdf")
plt.clf()
# # Error Chart 2: Create a log-log plot of the max error of each solution
# MaxError = [max(abs(U[-1][::2**(6-j)]-U[j] )) for j in range(0,len(List)-1)]
MaxError = [max(abs(U[-1][::List[-1]/List[j]]-U[j] )) for j in range(0,len(List)-1)]
plt.loglog(h,MaxError,'-k',h,MaxError,'ko')
plt.ylabel("Max Error")
plt.xlabel('$\Delta x$')
h, MaxError = np.log(h)/np.log(10),np.log(MaxError)/np.log(10)
print '\n'*2, "Approximate order of accuracy = ", (MaxError[-1]-MaxError[0])/(h[-1]-h[0])
plt.savefig("MaximumError.pdf")
# plt.show()
graph(X,U,List,h)
if __name__=="__main__":
# plot3d()
# plot_error()
animate_heat1d()
```
#### File: Labs/HMM/hmm_solution.py
```python
import numpy as np
from matplotlib import pyplot as plt
import string
class hmm(object):
"""
Finite state space hidden markov model.
"""
def __init__(self):
"""
Initialize model parameters.
Parameters
----------
A : ndarray of shape (n,n)
Column-stochastic state transition matrix.
B : ndarray of shape (m,n)
Column-stochastic observation matrix
pi : ndarray of shape (n,)
Initial state distribution
"""
self.A = None
self.B = None
self.pi = None
def _log_prob(self, c):
"""
Calculate the probability of an observation sequence given model parameters,
using the output of the forward pass.
Parameters
----------
c : ndarray of shape (T,)
The scaling numbers from forward pass
Returns
-------
out : float
The log probability of obs given model parameters
"""
return -(np.log(c)).sum()
def _forward(self, obs):
T = len(obs)
n = self.A.shape[0]
alpha = np.zeros((T,n))
c = np.zeros(T)
alpha[0,:] = self.pi*self.B[obs[0],:]
c[0] = 1./(alpha[0,:].sum())
alpha[0,:] *= c[0]
for i in xrange(1,T):
alpha[i,:] = (self.A.dot(alpha[i-1,:]))*self.B[obs[i],:]
c[i] = 1./(alpha[i,:].sum())
alpha[i,:] *= c[i]
return alpha, c
def _backward(self, obs, c):
T = len(obs)
n = self.A.shape[0]
beta = np.zeros((T,n))
beta[-1,:] = c[-1]
for i in xrange(T-2,-1,-1):
beta[i,:] = c[i]*((self.A.T).dot(self.B[obs[i+1],:]*beta[i+1,:]))
return beta
def _delta(self, obs, alpha, beta):
T, n = alpha.shape
delta = np.zeros((T-1,n,n))
gamma = np.zeros((T,n))
for t in xrange(T-1):
delta[t,:,:] = (self.B[obs[t+1],:]*beta[t+1,:])*(self.A[:,:].T)*alpha[t,:].reshape((n,1))
delta[t,:,:] /= delta[t,:,:].sum()
gamma[:-1,:] = delta.sum(axis=2)
gamma[-1,:] = alpha[-1,:]*beta[-1,:]/(alpha[-1,:]*beta[-1,:]).sum()
return delta, gamma
def _estimate(self, obs, delta, gamma):
self.pi = gamma[0,:]
self.A = delta.sum(axis=0).T/gamma[:-1,:].sum(axis=0)
for j in xrange(self.B.shape[0]):
self.B[j,:] = gamma[obs==j].sum(axis=0)
self.B /= gamma.sum(axis=0)
def fit(self, obs, A, B, pi, max_iter=100, tol=1e-3):
"""
Use EM to fit model parameters to a given observation sequence.
Parameters
----------
obs : ndarray of shape (T,)
Observation sequence on which to train the model.
A : stochastic ndarray of shape (N,N)
Initialization of state transition matrix
B : stochastic ndarray of shape (M,N)
Initialization of state-observation matrix
pi : stochastic ndarray of shape (N,)
Initialization of initial state distribution
max_iter : integer
The maximum number of iterations to take
tol : float
The convergence threshold for change in log-probability
"""
self.A = A.copy()
self.B = B.copy()
self.pi = pi.copy()
old_ll = 0
ll = 0
log_liks = []
for i in xrange(max_iter):
alpha, c = self._forward(obs)
beta = self._backward(obs, c)
delta, gam = self._delta(obs, alpha, beta)
self._estimate(obs, delta, gam)
ll = -np.log(c).sum()
log_liks.append(ll)
if abs(ll-old_ll) < tol:
break
else:
old_ll = ll
def trainHMM():
# load and process the data
with open("declaration.txt", 'r') as f:
dec = f.read(-1).lower()
dec = dec.translate(string.maketrans("",""), string.punctuation+"\n")
char_map = list(set(dec))
obs = []
for i in dec:
obs.append(char_map.index(i))
obs = np.array(obs)
# train the HMM on the declaration dataset
N = 2
M = len(char_map)
A = np.random.dirichlet(np.ones(N), size=N).T
B = np.random.dirichlet(np.ones(M), size=N).T
pi = np.random.dirichlet(np.ones(N))
h = hmm()
h.fit(obs,A, B, pi, max_iter=200)
return h
```
#### File: Labs/ImageSegmentation/getNeighbors.py
```python
def getNeighbors(index, radius, height, width):
'''
Calculate the indices and distances of pixels within radius
of the pixel at index, where the pixels are in a (height, width) shaped
array. The returned indices are with respect to the flattened version of the
array. This is a helper function for adjacency.
Inputs:
index -- denotes the index in the flattened array of the pixel we are
looking at
radius -- radius of the circular region centered at pixel (row, col)
height, width -- the height and width of the original image, in pixels
Returns:
indices -- a flat array of indices of pixels that are within distance r
of the pixel at (row, col)
distances -- a flat array giving the respective distances from these
pixels to the center pixel.
'''
# Find appropriate row, column in unflattened image for flattened index
row, col = index/width, index%width
# Cast radius to an int (so we can use arange)
r = int(radius)
# Make a square grid of side length 2*r centered at index
# (This is the sup-norm)
x = np.arange(max(col - r, 0), min(col + r+1, width))
y = np.arange(max(row - r, 0), min(row + r+1, height))
X, Y = np.meshgrid(x, y)
# Narrows down the desired indices using Euclidean norm
# (i.e. cutting off corners of square to make circle)
R = np.sqrt(((X-np.float(col))**2+(Y-np.float(row))**2))
mask = (R<radius)
# Return the indices of flattened array and corresponding distances
return (X[mask] + Y[mask]*width, R[mask])
```
#### File: Labs/InteriorPoint1/IntPointSolutions.py
```python
import numpy as np
from scipy import linalg as la
from matplotlib import pyplot as plt
def startingPoint(A, b, c):
'''
Calculate an initial guess to the solution of the
linear program min c^T x, Ax = b, x>=0.
Inputs:
A -- array of shape (m,n) with linearly independent rows
b -- array of length m
c -- array of length n
Returns:
x -- array of length n
lam -- array of length m
s -- array of length n
Ref: Nocedal and Wright, p. 410
'''
# first calculate x, lam, s of minimal norm satisfying the primal and dual constraints
B = la.inv(A.dot(A.T))
x = A.T.dot(B.dot(b))
lam = B.dot(A.dot(c))
s = c - A.T.dot(lam)
# perturb x and s so they are nonnegative
dx = max((-3./2)*x.min(), 0)
ds = max((-3./2)*s.min(), 0)
x += dx*np.ones(x.shape)
s += ds*np.ones(s.shape)
# perturb x and s so they are not too close to zero, not too dissimilar
dx = .5*(x*s).sum()/s.sum()
ds = .5*(x*s).sum()/x.sum()
x += dx*np.ones(x.shape)
s += ds*np.ones(s.shape)
return x, lam, s
def interiorPoint(A, b, c, niter=20, verbose=False, starting_point=None, pts=False):
'''
Solve the linear programming problem min c^T x, Ax = b, x>=0
using an Interior Point method. This code is not optimized, but
forms the basis for a common practical approach known as the
Predictor-Corrector Algorithm.
Inputs:
A -- array of shape (m,n) with linearly independent rows
b -- array of length m
c -- array of length n
niter -- positive integer giving the number of iterations
starting_point -- tuple of arrays giving the initial values for x, l, and s.
if unspecified, the function startingPoint is used.
Returns:
x -- the optimal point
val -- the minimum value of the objective function
(pts -- list of points traced by the algorithm, returned if pts=True)
Ref: <NAME> Wright, p. 411
'''
pts = []
# initialize variables
m,n = A.shape
if starting_point:
x, l, s = starting_point
else:
x,l,s = startingPoint(A,b,c)
pts.append(x)
N = np.zeros((n+m+n, n+m+n))
N[:n, n:n+m] = A.T
N[:n, n+m:] = np.eye(n)
N[n:n+m, :n] = A
sol = np.empty(n+m+n)
for k in xrange(niter):
# finish initializing parts of the step equation
N[n+m:, :n] = np.diag(s)
N[n+m:, n+m:] = np.diag(x)
r_c = (A.T).dot(l)+s-c
r_b = A.dot(x)-b
rhs = np.hstack((-r_c.ravel(), -r_b.ravel(), -x*s))
# solve dx_aff, dl_aff, ds_aff using LU decomposition
lu_piv = la.lu_factor(N)
sol[:] = la.lu_solve(lu_piv, rhs)
dx_aff = sol[:n]
dl_aff = sol[n:n+m]
ds_aff = sol[n+m:]
# calculate a_p, a_d, mu_aff
mask1 = dx_aff < 0
if mask1.sum() > 0:
a_p = min(1, ((-x/dx_aff)[mask1]).min())
else:
a_p = 1
mask2 = ds_aff < 0
if mask2.sum() > 0:
a_d = min(1, (-s/ds_aff)[mask2].min())
else:
a_d = 1
mu_aff = ((x+a_p*dx_aff)*(s+a_d*ds_aff)).sum()/np.float(n)
# calculate mu times the centering parameter sig
mu = (x*s).sum()/n
musig = mu_aff**3/mu**2
# calculate dx, dl, ds
rhs[n+m:] += - dx_aff*ds_aff + musig
sol[:] = la.lu_solve(lu_piv, rhs)
dx = sol[:n]
dl = sol[n:n+m]
ds = sol[n+m:]
# calculate ap, ad
nu = 1-.1/(k+1)
mask3 = dx < 0
if mask3.sum() > 0:
ap_max = (-x/dx)[mask3].min()
ap = min(1, nu*ap_max)
else:
ap = 1
mask4 = ds < 0
if mask4.sum() > 0:
ad_max = (-s/ds)[mask4].min()
ad = min(1, nu*ad_max)
else:
ad = 1
# step to new point
x = x + ap*dx
l = l + ad*dl
s = s + ad*ds
pts.append(x)
if verbose:
print '{0:f} {1:f}'.format((c*x).sum(), mu)
if pts:
return pts
else:
return x, (c*x).sum()
def randomLP(m,n):
'''
Generate a linear program min c^T x s.t. Ax = b, x>=0.
First generate m feasible constraints, then add
slack variables to convert it into the above form.
Inputs:
m -- positive integer >= n, number of desired constraints
n -- dimension of space in which to optimize
Outputs:
A -- array of shape (m,n+m)
b -- array of shape (m,)
c -- array of shape (n+m,), with m trailing 0s
v -- the solution to the LP
'''
# generate random constraints (each row corresponds to the normal vector defining
# a linear constraint)
A = np.random.random((m,n))*20 - 10
# adjust so that the normal vector of each constraint lies in the upper half-space.
# this ensures that the constraints permit a feasible region
A[A[:,-1]<0] *= -1
# adjust so that the solution to the program is a prescribed point v in the first
# quadrant.
v = np.random.random(n)*10
#k = np.random.randint(n,m+1)
k = n
b = np.zeros(m)
b[:k] = A[:k,:].dot(v)
b[k:] = A[k:,:].dot(v) + np.random.random(m-k)*10
# now create the appropriate c vector, a weighted sum of the first k constraints
c = np.zeros(n+m)
c[:n] = A[:k,:].sum(axis=0)/k
# at this point, we should have a program max c^T x s.t. Ax <= b, x >= 0
# we need to convert it to standard equality form by adding slack variables
A = np.hstack((A, np.eye(m)))
# we now have the program min -c^T x s.t. Ax = b, x>=0.
# the optimal solution has x[:n] = v
return A, b, -c, v
def leastAbsoluteDeviations():
"""
This code should be fairly close to what the students submit for the least absolute deviations
problem.
"""
data = np.loadtxt('simdata.txt')
m = data.shape[0]
n = data.shape[1] - 1
c = np.zeros(3*m + 2*(n + 1))
c[:m] = 1
y = np.empty(2*m)
y[::2] = -data[:, 0]
y[1::2] = data[:, 0]
x = data[:, 1:]
A = np.ones((2*m, 3*m + 2*(n + 1)))
A[::2, :m] = np.eye(m)
A[1::2, :m] = np.eye(m)
A[::2, m:m+n] = -x
A[1::2, m:m+n] = x
A[::2, m+n:m+2*n] = x
A[1::2, m+n:m+2*n] = -x
A[::2, m+2*n] = -1
A[1::2, m+2*n+1] = -1
A[:, m+2*n+2:] = -np.eye(2*m, 2*m)
sol = interiorPoint(A, y, c, niter=10, verbose=True)[-1]
beta = (sol[m:m+n] - sol[m+n:m+2*n])[0]
b = sol[m+2*n] - sol[m+2*n+1]
dom = np.linspace(0,10,2)
plt.scatter(data[:,1], data[:,0])
plt.plot(dom, beta*dom+b)
plt.show()
print 'Beta:', beta
print 'b:', b
```
#### File: Labs/KalmanFilter/plots.py
```python
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
import numpy as np
import solutions
from matplotlib import pyplot as plt
def evolution():
u = np.array([0,0,0,-.98])
H = np.array([[1,0,0,0],[0,1.,0,0]])
Q = 0.1*np.eye(4)
R = 5000*np.eye(2)
F = np.array([[1,0,.1,0],[0,1,0,.1],[0,0,1,0],[0,0,0,1]])
x0 = np.array([0,0,300.,600])
kal = solutions.KalmanFilter(F,Q,H,R,u)
s,o = kal.evolve(x0,1250)
plt.plot(s[0,:],s[1,:])
plt.ylim(0,1.1*(s[1,:].max()))
plt.savefig("states_evolution.pdf")
plt.clf()
plt.plot(o[0,::8],o[1,::8], 'r.')
plt.ylim(0,1.1*(s[1,:].max()))
plt.savefig("obs_evolution.pdf")
plt.clf()
def norms():
u = np.array([0,0,0,-.98])
H = np.array([[1,0,0,0],[0,1.,0,0]])
Q = 0.1*np.eye(4)
R = 5000*np.eye(2)
F = np.array([[1,0,.1,0],[0,1,0,.1],[0,0,1,0],[0,0,0,1]])
x0 = np.array([0,0,300.,600])
kal = solutions.KalmanFilter(F,Q,H,R,u)
s,o = kal.evolve(x0,1250)
ave_vel = np.diff(o[:,200:210], axis=1).mean(axis=1)/.1
x = np.zeros(4)
x[:2] = o[:,200]
x[2:] = ave_vel
P = 10**6*Q
estimates, norms = kal.estimate(x,P,o[:,201:801], return_norms=True)
plt.plot(norms)
plt.savefig("norms.pdf")
plt.clf()
def estimates():
u = np.array([0,0,0,-.98])
H = np.array([[1,0,0,0],[0,1.,0,0]])
Q = 0.1*np.eye(4)
R = 5000*np.eye(2)
F = np.array([[1,0,.1,0],[0,1,0,.1],[0,0,1,0],[0,0,0,1]])
x0 = np.array([0,0,300.,600])
kal = solutions.KalmanFilter(F,Q,H,R,u)
s,o = kal.evolve(x0,1250)
ave_vel = np.diff(o[:,200:210], axis=1).mean(axis=1)/.1
x = np.zeros(4)
x[:2] = o[:,200]
x[2:] = ave_vel
P = 10**6*Q
estimates = kal.estimate(x,P,o[:,201:801])
plt.plot(norms)
plt.savefig("norms.pdf")
plt.clf()
plt.plot(s[0,:][np.where(s[1,:]>=0)], s[1,:][np.where(s[1,:]>=0)])
plt.plot(o[0,201:801], o[1,201:801], 'r.')
plt.plot(estimates[0,:],estimates[1,:], 'g')
plt.savefig("estimate_macro.pdf")
plt.clf()
S = 250
E = S+50
plt.plot(s[0,S:E], s[1,S:E])
plt.plot(o[0,S:E], o[1,S:E], 'r.')
plt.plot(estimates[0,S-201:E-201],estimates[1,S-201:E-201], 'g')
plt.savefig("estimate_micro.pdf")
plt.clf()
def impact():
u = np.array([0,0,0,-.98])
H = np.array([[1,0,0,0],[0,1.,0,0]])
Q = 0.1*np.eye(4)
R = 5000*np.eye(2)
F = np.array([[1,0,.1,0],[0,1,0,.1],[0,0,1,0],[0,0,0,1]])
x0 = np.array([0,0,300.,600])
kal = solutions.KalmanFilter(F,Q,H,R,u)
s,o = kal.evolve(x0,1250)
ave_vel = np.diff(o[:,200:210], axis=1).mean(axis=1)/.1
x = np.zeros(4)
x[:2] = o[:,200]
x[2:] = ave_vel
P = 10**6*Q
estimates = kal.estimate(x,P,o[:,201:801])
predicted = kal.predict(estimates[:,-1],450)
plt.plot(s[0,:], s[1,:])
plt.plot(predicted[0,:], predicted[1,:], 'y')
plt.ylim(0)
plt.savefig("impact_macro.pdf")
plt.clf()
x1 = s[0,:][np.where(s[1,:]>=0)][-1]
x2 = predicted[0,:][np.where(predicted[1,:]>=0)][-1]
plt.plot(s[0,:], s[1,:])
plt.plot(predicted[0,:], predicted[1,:], 'y')
plt.ylim(0,100)
plt.xlim(min(x1,x2)-50, max(x1,x2)+50)
plt.savefig("impact_micro.pdf")
plt.clf()
def origin():
u = np.array([0,0,0,-.98])
H = np.array([[1,0,0,0],[0,1.,0,0]])
Q = 0.1*np.eye(4)
R = 5000*np.eye(2)
F = np.array([[1,0,.1,0],[0,1,0,.1],[0,0,1,0],[0,0,0,1]])
x0 = np.array([0,0,300.,600])
kal = solutions.KalmanFilter(F,Q,H,R,u)
s,o = kal.evolve(x0,1250)
ave_vel = np.diff(o[:,200:210], axis=1).mean(axis=1)/.1
x = np.zeros(4)
x[:2] = o[:,200]
x[2:] = ave_vel
P = 10**6*Q
estimates = kal.estimate(x,P,o[:,201:801])
rewound = kal.rewind(estimates[:,49],300)
plt.plot(s[0,:],s[1,:])
plt.plot(rewound[0,:],rewound[1,:])
plt.ylim(0)
plt.savefig("origin_macro.pdf")
plt.clf()
x1 = s[0,:][np.where(s[1,:]>=0)][0]
x2 = rewound[0,:][np.where(rewound[1,:]>=0)][0]
plt.plot(s[0,:],s[1,:])
plt.plot(rewound[0,:],rewound[1,:])
plt.ylim(0,100)
plt.xlim(min(x1,x2)-50, max(x1,x2)+50)
plt.savefig("origin_micro.pdf")
plt.clf()
if __name__ == "__main__":
norms()
```
#### File: Labs/LinearRegression/plots.py
```python
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model
housing = np.load('housingprices.npy')
challenger = np.load('challenger.npy')
def raw():
plt.plot(housing[:,1], housing[:,0], 'o')
plt.savefig("california.pdf")
plt.clf()
def linear():
X=np.ones((42,2))
X[:,1]=housing[:,1]
Y = housing[:,0]
betahat = np.linalg.inv(X.T.dot(X)).dot(X.T.dot(Y))
plt.plot(X[:,1],Y,'o')
xseq=np.arange(0,12,.1)
plt.plot(xseq,betahat[0]+betahat[1]*xseq)
plt.savefig("cali-linear.pdf")
plt.clf()
def cubic():
X=np.ones((42,4))
X[:,1]=housing[:,1]
X[:,2]=X[:,1]**2
X[:,3]=X[:,1]**3
Y = housing[:,0]
betahat = np.linalg.inv(X.T.dot(X)).dot(X.T.dot(Y))
plt.plot(X[:,1],Y,'o')
xseq=np.arange(0,12,.1)
plt.plot(xseq,betahat[0]+betahat[1]*xseq+betahat[2]*xseq**2+betahat[3]*xseq**3)
plt.savefig("cali-quadratic.pdf")
plt.clf()
def quartic():
X=np.ones((42,5))
X[:,1]=housing[:,1]
X[:,2]=X[:,1]**2
X[:,3]=X[:,1]**3
X[:,4]=X[:,1]**4
Y=housing[:,0]
betahat = np.linalg.inv(X.T.dot(X)).dot(X.T.dot(Y))
plt.plot(X[:,1],Y,'o')
xseq=np.arange(0,12,.1)
plt.plot(xseq,betahat[0]+betahat[1]*xseq
+betahat[2]*xseq**2+betahat[3]*xseq**3+betahat[4]*xseq**4)
plt.ylim([0,600000])
plt.savefig("cali-quartic.pdf")
def challenger_cubic():
plt.plot(challenger[:,0], challenger[:,1], 'o')
plt.xlim(30,100)
plt.xlabel('Ambient Temperature (F)')
plt.ylim(-0.5,1.5)
plt.ylabel('O-ring Damage Present')
plt.title('Potential for Shuttle Damage - With Cubic Approximation')
X=np.ones((challenger.shape[0],4))
X[:,1] = challenger[:,0]
Y=challenger[:,1]
X[:,2]=X[:,1]**2
X[:,3]=X[:,1]**3
betahat = np.linalg.inv(X.T.dot(X)).dot(X.T.dot(Y))
xseq=np.arange(30,100,.5)
plt.plot(xseq,betahat[0]+betahat[1]*xseq+betahat[2]*xseq**2+betahat[3]*xseq**3)
plt.savefig('cubicthrulogitpoints.pdf')
plt.clf()
def challenger_logistic():
###Logreg plot #2
plt.plot(challenger[:,0], challenger[:,1],'o')
plt.xlim(30,100)
plt.xlabel('Ambient Temperature (F)')
plt.ylim(-0.5,1.5)
plt.ylabel('O-ring Damage Present')
plt.title('Potential for Shuttle Damage - With Logistic Regression Prediction')
#X=np.ones((dat.shape[0],2))
#X[:,1]=dat[:,0]
X=challenger[:,0].reshape((23,1))
Y=challenger[:,1]
logreg = linear_model.LogisticRegression(C=1000000,penalty="l2")
logreg.fit(X,Y)
coef=logreg.coef_[0]
xseq=np.arange(30,100,.5)[:,np.newaxis]
#xseqmat=np.ones((len(xseq),2))
#xseqmat[:,1]=xseq
xB=logreg.intercept_[0]+logreg.coef_[0][0]*xseq
#plt.plot(xseq,1/(np.exp(-xB)+1))
plt.plot(xseq,logreg.predict_proba(xseq)[:,1])
plt.savefig("logreg.pdf")
plt.clf()
if __name__ == "__main__":
raw()
linear()
cubic()
quartic()
challenger_cubic()
challenger_logistic()
```
#### File: Labs/LineSearch/plots.py
```python
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
from matplotlib import pyplot as plt
import numpy as np
from scipy import linalg as la
from scipy import optimize as opt
def newtonsMethod1d(f, df, ddf, x, niter=10):
'''
Perform Newton's method to minimize a function from R to R.
Inputs:
f -- objective function (twice differentiable)
df -- first derivative
ddf -- second derivative
x -- initial guess
niter -- integer, giving the number of iterations
Returns:
the approximated minimizer
'''
for i in xrange(niter):
x = x-df(x)/ddf(x)
return x, f(x)
def backtracking(f, slope, x, p, a=1, rho=.9, c=10e-4):
'''
Perform a backtracking line search to satisfy the Wolfe Conditions.
Return the step length.
Inputs:
f -- the objective function
slope -- equal to grad(f)^T p
x -- current iterate
p -- current direction
a -- intial step length (set to 1 in Newton and quasi-Newton methods)
rho -- number in (0,1)
c -- number in (0,1)
Returns:
the computed step size
'''
b = f(x)
while f(x+a*p) > b + c*a*slope:
a = rho*a
return a
def gradientDescent(f, Df, x, niter=10):
'''
Minimize a function using gradient descent.
Inputs:
f -- differentiable real-valued function
Df -- the gradient of the function
x -- initial point
niter -- integer giving the number of iterations to run.
Returns:
a list, the sequence of points generated
'''
pts = []
pts.append(x.copy())
for i in xrange(niter):
p = -Df(x)
slope = (p**2).sum()
a = .2/np.sqrt(slope)
x += a*p
pts.append(x.copy())
return pts
def newtonsMethod(f, Df, DDf, x, niter=10):
'''
Minimize a function using Newton's method.
Inputs:
f -- real-valued, twice-differentiable function
Df -- the gradient of the function
DDf -- the Hessian of the function
x -- initial point
niter -- integer giving the number of iterations
Returns:
a list, the sequence of points generated
'''
pts = [x.copy()]
for i in xrange(niter):
p = la.solve(DDf(x),-Df(x))
slope = (p**2).sum()
a = backtracking(f, slope, x, p)
x += a*p
pts.append(x.copy())
return pts
def myFunc(x):
return 4*x**2 - 13*x + 40 + 6*np.sin(4*x)
def myDFunc(x):
return 8*x - 13+24*np.cos(4*x)
def myDDFunc(x):
return 8-96*np.sin(4*x)
def f(x):
w = np.array([1,4])
return np.exp((x**2*w).sum())
def Df(x):
w = np.array([1,4])
return 2*np.exp((x**2*w).sum())*x*w
def DDf(x):
w = np.array([1,4])
A = np.diag(np.exp((x**2*w).sum())*(2*w+4*(x*w)**2))
A[0,1] = 4*((x*w).prod())*np.exp((x**2*w).sum())
A[1,0] = A[0,1]
return A
def newton():
x1,f1 = newtonsMethod1d(myFunc, myDFunc, myDDFunc, 1, niter=200)
x2,f2 = newtonsMethod1d(myFunc, myDFunc, myDDFunc, 4, niter=200)
dom = np.linspace(-10,10,100)
plt.plot(dom, myFunc(dom))
plt.plot(x1, f1, '*')
plt.plot(x2, f2, '*')
plt.annotate('Global Minimum', xy=(x1, f1), xytext=(-4, 200),
arrowprops=dict(facecolor='black', shrink=0.1),)
plt.annotate('Local Minimum', xy=(x2,f2), xytext=(2, 175),
arrowprops=dict(facecolor='black', shrink=0.1),)
plt.savefig('newton.pdf')
plt.clf()
def comparison():
pts1 = np.array(newtonsMethod(f, Df, DDf, np.array([2.,1.]), niter=10))
pts2 = np.array(gradientDescent(f, Df, np.array([2.,1.]), niter=10))
w = np.array([1,4])
dom = np.linspace(-2, 2, 100)
X, Y = np.meshgrid(dom, dom)
Z = np.exp(w[0]*X**2+w[1]*Y**2)
vals = np.exp((pts2**2*w).sum(axis=1))
plt.contour(X,Y,Z, vals)
plt.plot(pts1[:,0], pts1[:,1], '*-')
plt.plot(pts2[:,0], pts2[:,1], '*-')
plt.savefig('comparison.pdf')
plt.clf()
def gaussNewton(f, Df, Jac, r, x, niter=10, backtrack=True):
'''
Solve a nonlinear least squares problem with Gauss-Newton method.
Inputs:
f -- the objective function
Df -- gradient of f
Jac -- jacobian of residual vector
r -- the residual vector
x -- initial point
niter -- integer giving the number of iterations
Returns:
the minimizer
'''
a=0
for i in xrange(niter):
#print i
J = Jac(x)
g = J.T.dot(r(x))
#print J.T.dot(J)
p = la.solve(J.T.dot(J), -g)
slope = (g*p).sum()
if backtrack:
a = backtracking(f, slope, x, p)
else:
a = opt.line_search(f, Df, x, p)[0]
x += a*p
print x, f(x), a
return x
y = 3*np.sin(0.5*np.arange(10))+ 0.5*np.random.randn(10)
def model(x, t):
return x[0]*np.sin(x[1]*t)
def residual(x):
return model(x, np.arange(10)) - y
def jac(x):
ans = np.empty((10,2))
ans[:,0] = np.sin(x[1]*np.arange(10))
ans[:,1] = x[0]*np.arange(10)*np.cos(x[1]*np.arange(10))
return ans
def objective(x):
return .5*(residual(x)**2).sum()
def grad(x):
return jac(x).T.dot(residual(x))
x0 = np.array([2.5,.6])
x = gaussNewton(objective, grad, jac, residual, x0, niter=10, backtrack=False)
def gaussNewton():
ax1,= plt.plot(np.arange(10), y, '*')
ax2, = plt.plot(np.linspace(0,10,100), 3*np.sin(.5*np.linspace(0,10,100)), '--')
ax3, =plt.plot(np.linspace(0,10,100), x[0]*np.sin(x[1]*np.linspace(0,10,100)))
plt.legend([ax1, ax2, ax3], ['data', 'generating curve', 'fitted curve'])
plt.savefig('gaussNewton.pdf')
plt.clf()
#newton()
#comparison()
gaussNewton()
```
#### File: Labs/LineSweep/linesweep_solutions.py
```python
import numpy as np
import scipy.spatial as st
from matplotlib import pyplot as plt
from math import sqrt
import heapq as hq
from edge_intersections import edge_intersections, inside
# optimized metric function for simplified linesweep
# Consider giving this one to them.
def metric(p, X):
# Finds distance between point 'p' and each of the rows of 'X'.
# Works assuming 'p' is either 1-dimensional or a row vector.
# 'X' can be a single 1-dimensional vector, a single row-vector,
# or 2-dimensional array.
dif = (X - p)
return np.sqrt((dif * dif).sum(axis=-1))
# simplified linesweep
def pymindist_simple(Y, metric):
""" Run the simple minimum distance algorithm explained in the lab.
'Y' is the array of points. One point for each row.
'metric' is a distance function."""
# Sort by first coordinate.
X = Y.take(Y[:,0].argsort(), axis=0)
r = metric(X[0], X[1])
# Use indices to track which points in the list are "active".
low = 0
for i in range(2, len(X)):
# Update the 'low' index to reflect which points
# still need further processing.
while X[low,0] < X[i,0] - r:
low += 1
# If there really are any points to process,
# update the minimum accordingly.
if low < i:
r = min(r, np.min(metric(X[i], X[low:i])))
return r
# full linesweep
def pymindist(Y):
""" Run the full minimum distance line sweep algorithm.
'Y' is an array of points. One point for each row."""
# Sort by first coordinate.
X = Y.take(Y[:,0].argsort(), axis=0)
# Use indices to track which points in the list are "active".
low = 0
dim = X.shape[1]
n = X.shape[0]
# Compute the starting distance.
r = 0.
for i in xrange(dim):
dif = X[0,i] - X[1,i]
r += dif * dif
r = sqrt(r)
# Process the rest of the points.
for i in xrange(2, n):
# Update the 'low' index to reflect which points
# still need further processing.
while X[low,0] + r < X[i,0]:
low += 1
# Process each point, rejecting it as soon as possible.
for k in xrange(low, i):
# Set a flag so the first coordinate is processed.
# Don't process it at the beginning of the for-loop
# since we already know those coordinates are close enough.
proc = True
# Start computing the distance.
d = 0.
for j in xrange(1, dim):
# Compute absolute difference, then add in the
# square of the difference if it is still in-bounds.
dif = abs(X[k,j] - X[i,j])
# Reject the point if it is already too far.
if r < dif:
proc = False
break
d += dif * dif
# Finish processing the point if it hasn't been rejected yet.
if proc:
dif = X[k,0] - X[i,0]
r = min(r, sqrt(d + dif * dif))
return r
# farthest point problem
def farthest(pts, xlims, ylims, n):
""" Find the 'n' points that lie farthest from the points given
in the region bounded by 'xlims' and 'ylims'.
'pts' is an array of points.
'xlims' and 'ylims are tuples storing the maximum and minimum
values to consider along the x and y axes."""
# There are a ton of ways to do this, this is a shorter one.
# The 'inside' function tests whether or not a point is on
# the interior of the given square.
ins = lambda pt: inside(pt, xlims, ylims)
# Construct the Voronoi diagram.
V = st.Voronoi(pts)
# Construct the KD Tree.
KD = st.cKDTree(pts)
# Now we'll construct a list of tuples where the first
# entry is the distance from a point to the nearest node
# and the second entry is a tuple with the coordinates for the point.
# Process the vertices of the Voronoi diagram.
Q = [(KD.query(pt)[0], pt) for pt in V.vertices if ins(pt)]
# Process the intersections of the edges of the
# Voronoi diagram and the edges of the box.
Q += [(KD.query(pt)[0], pt) for pt in edge_intersections(V, xlims, ylims)[0]]
# Process the corners of the box.
Q += [(KD.query(pt)[0], (x, y)) for x in xlims for y in ylims]
# Return the 'n' points with farthest distance from the points
# used to generate the Voronoi diagram.
return np.array([pair[1] for pair in hq.nlargest(n, Q)])
# triangulation of the unit squre problem
def triangulate(n):
""" Triangulate the square [0,1]x[0,1] using a grid with
'n' equispaced points along each of its edges."""
# Generate a grid of points.
X = np.linspace(0, 1, n)
Y = X.copy()
X, Y = np.meshgrid(X, Y, copy=False)
# Restructure the points generated so you can pass them
# to the Delaunay class constructor.
A = np.column_stack((X.flat, Y.flat))
# Make a Delaunay triangulation.
D = st.Delaunay(A)
# Plot it.
plt.triplot(A[:,0], A[:,1], D.simplices.copy())
plt.show()
```
#### File: Labs/LineSweep/plots.py
```python
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
from matplotlib import pyplot as plt
import numpy as np
from numpy.random import rand
import bisect as bs
# This generates the plots for the simplified linsweep.
# It generates more plots than are actually used in the lab.
# I just picked the plots that were useful in illustrating the algorithm.
def multidist(p0, p1):
l = len(p0)
return (sum([(p0[i] - p1[i])**2 for i in range(l)]))**(.5)
def mindist_simple_plot(Y):
X = Y.take(Y[:,0].argsort(), axis=0)
n = len(X)
actives = []
pt = tuple(X[0])
actives.append(pt)
pt = tuple(X[1])
actives.append(pt)
r = multidist(actives[0], actives[1])
for i in xrange(2, len(X)):
pt = tuple(X[i])
l = len(actives)
while l > 0:
if actives[0][0] > pt[0] + r:
actives.pop(0)
l -= 1
else:
break
plt.scatter(X[:,0], X[:,1])
res = 15
T = np.linspace(-.2, 1.2, res)
res2 = 201
theta = np.linspace(np.pi/2, 3*np.pi/2, res2)
plt.plot([pt[0]]*res, T, color='r')
plt.plot([pt[0]-r]*res, T, color='r')
X0 = np.array([pt + r * np.array([np.cos(t), np.sin(t)]) for t in theta])
plt.plot(X0[:,0], X0[:,1], color='g')
plt.xlim((-.2, 1.2))
plt.ylim((-.2, 1.2))
plt.show()
for k in xrange(len(actives)):
d = multidist(pt, actives[k])
if d < r:
r = d
actives.append(pt)
return r
# This generates the plots for the full version.
# It generates more plots than are actually used in the lab.
# I just picked the plots that were useful in illustrating the algorithm.
def mindist_plot(Y):
X = Y.take(Y[:,0].argsort(), axis=0)
n = len(X)
actives = []
pt = X[0]
actives.insert(bs.bisect_left(actives, tuple(reversed(tuple(pt)))), tuple(reversed(tuple(pt))))
pt = X[1]
actives.insert(bs.bisect_left(actives, tuple(reversed(tuple(pt)))), tuple(reversed(tuple(pt))))
r = multidist(actives[0], actives[1])
for i in xrange(2, n):
plt.scatter(X[:,0], X[:,1])
pt = tuple(X[i])
res = 1401
x = np.linspace(-.2, 1.2, res)
plt.plot(x, [pt[1] - r] * res, color='r')
plt.plot(x, [pt[1] + r] * res, color='r')
plt.plot([pt[0]] * res, x, color='b')
plt.plot([pt[0] - r] * res, x, color='b')
T = np.linspace(np.pi / 2, 3 * np.pi / 2, res)
pt = np.array(pt)
X0 = np.array([pt + r * np.array([np.cos(t), np.sin(t)]) for t in T])
plt.plot(X0[:,0], X0[:,1], color='g')
block = actives[bs.bisect_left(actives, (pt[1] - r, pt[0] - r)): bs.bisect_right(actives, (pt[1] + r, pt[0]))]
for k in xrange(len(block)):
d = multidist(tuple(reversed(tuple(pt))), block[k])
if d < r:
r = d
removalidx = 0
while removalidx < len(actives):
if abs(actives[removalidx][1] - pt[0]) > r:
actives.pop(removalidx)
else:
removalidx += 1
if len(actives) > 0:
plt.scatter(np.fliplr(np.array(actives))[:,0], np.fliplr(np.array(actives))[:,1])
if len(block) > 0:
plt.scatter(np.fliplr(np.array(block))[:,0], np.fliplr(np.array(block))[:,1])
plt.show()
actives.insert(bs.bisect_left(actives, tuple(reversed(tuple(pt)))), tuple(reversed(tuple(pt))))
return r
def pnorm(pt, X, p=2):
# Take the p-norm distance between a point 'pt'
# and an array of points 'X'.
if p == "inf":
return np.absolute(pt - X).max(axis=-1)
return (np.absolute(pt - X)**p).sum(axis=-1)**(1./p)
def brute_force_voronoi(n, res, p=2, filename=None):
# Generates a grid of points and tests to find the nearest
# neighbor for each of them.
pts = rand(n, 2)
X = np.linspace(0, 1, res)
# Make an array to store the indices of the nearest points.
indices = np.zeros((res, res))
for i in xrange(res):
for j in xrange(res):
indices[i, j] = pnorm(np.array([X[j], X[i]]), pts, p).argmin()
# Make a colorplot of the results.
X, Y = np.meshgrid(X, X, copy=False)
plt.pcolormesh(X, Y, indices)
plt.scatter(pts[:,0], pts[:,1])
plt.xlim((0,1))
plt.ylim((0,1))
plt.show()
if filename is None:
plt.show()
else:
plt.savefig(filename)
plt.clf()
if __name__=="__main__":
# Generate the plots for the simplified algorithm.
X = rand(10, 2)
mindist3plot(X)
# Generate the plots for the full algorithm.
X = rand(25, 2)
mindistplot(X)
# The 1-norm voronoi diagram.
brute_force_voronoi(10, 401, 1, "voronoi_1norm.png")
# The oo-norm voronoi diagram.
brute_force_voronoi(10, 401, "inf", "voronoi_supnorm.png")
```
#### File: Labs/LorenzEquations/solutions.py
```python
import numpy as np
from mayavi import mlab
from matplotlib import pyplot as plt
from numpy.random import rand, seed
from scipy.integrate import odeint
from scipy.stats import linregress
# ODE used in other functions.
def lorenz_ode((x, y, z), t, sigma=10., beta=8./3, rho=28.0):
return sigma * (y - x), x * (rho - z) - y, x * y - beta * z
def lorenz_plot(N=10, res=2000, t=10, seed_=120, atol=1E-15, rtol=1E-13,
sigma=10., beta=8./3, rho=28.):
""" Plot the trajectories given by the Lorenz equations for 'N' starting points.
Choose random x, y, and z values between -15 and 15.
Seed the random number generator with 'seed_'.
Use a resolution of 'res' for the points in the plot.
Plot the time values between 0 ant 't'.
When computing the trajectories, pass the tolerance 'atol' to the ODE solver.
Use different colors for each trajectory.
Use the values of 'sigma', 'beta', and 'rho' in the Lorenz ODE. """
# Get initial conditions.
seed(seed_)
x0 = -15 + 30 * rand(N, 3)
# Solve for the trajectories.
t = np.linspace(0, t, res)
pts = np.empty((N, res, 3))
for i, x in enumerate(x0):
pts[i] = odeint(lorenz_ode, x, t,
args=(sigma, beta, rho), atol=atol, rtol=rtol)
# Select the colors for the different curves.
colors = np.zeros((N, 3))
colors[:,1] = np.linspace(0, 1, N)
colors = map(tuple, colors.tolist())
# Plot the different trajectories.
for x, color in zip(pts, colors):
mlab.plot3d(x[:,0], x[:,1], x[:,2], tube_radius=.2, color=color)
# Position the view for the plot.
mlab.gcf().scene.camera.position = [127.23761585, -108.28736806, 6.35191272]
mlab.gcf().scene.camera.focal_point = [-1.7792501449584961, -3.6287221908569336, 23.397351264953613]
mlab.gcf().scene.camera.view_up = [-0.078467260964232038, -0.20339450183237351, 0.97594752194015633]
mlab.gcf().scene.camera.clipping_range = [128.64624663718814, 328.22549479639167]
# Show the plot.
mlab.show()
def lorenz_animation(N=10, res=1000, step=2, t=10, seed_=120, atol=1E-15,
rtol=1E-13, delay=10, sigma=10., beta=8./3, rho=28.):
""" Animate the trajectories given by the Lorenz equations for 'N' starting points.
Choose random x, y, and z values between -15 and 15.
Seed the random number generator with 'seed_'.
Use a resolution of 'res' for the points in the plot.
Plot the time values between 0 ant 't'.
When computing the trajectories, pass the tolerances
'atol' and 'rtol' to the ODE solver.
At each update, add 'step' points to the plot.
Use a delay of 'delay' at each update in the animation.
Use different colors for each trajectory.
Use the values of 'sigma', 'beta', and 'rho' in the Lorenz ODE. """
# Get initial conditions.
seed(seed_)
x0 = -15 + 30 * rand(N, 3)
# Solve for the trajectories.
t = np.linspace(0, t, res)
pts = np.empty((N, res, 3))
for i, x in enumerate(x0):
pts[i] = odeint(lorenz_ode, x, t,
args=(sigma, beta, rho), rtol=rtol, atol=atol)
# Select the colors for the different curves.
colors = np.zeros((N, 3))
colors[:,1] = np.linspace(0, 1, N)
colors = map(tuple, colors.tolist())
# Plot the different trajectories.
contours = [mlab.plot3d(x[:1,0], x[:1,1], x[:1,2], tube_radius=.15, color=color)
for x, color in zip(pts, colors)]
# Position the view for the plot.
mlab.gcf().scene.camera.position = [127.23761585, -108.28736806, 6.35191272]
mlab.gcf().scene.camera.focal_point = [-1.7792501449584961, -3.6287221908569336, 23.397351264953613]
mlab.gcf().scene.camera.view_up = [-0.078467260964232038, -0.20339450183237351, 0.97594752194015633]
mlab.gcf().scene.camera.clipping_range = [128.64624663718814, 328.22549479639167]
# Define the animation.
@mlab.show
@mlab.animate(delay=delay)
def trace_curve():
for i in xrange(step, res, step):
for c, x, color in zip(contours, pts, colors):
c.mlab_source.reset(x=x[:i,0], y=x[:i,1], z=x[:i,2])
yield
# Run the animation.
trace_curve()
def lorenz_tolerance_change(res=10000, step=5, t=50, seed_=120, atol1=1E-14,
atol2=1E-15, rtol1=1E-12, rtol2=1E-13, delay=10,
sigma=10., beta=8./3, rho=28.):
""" Animate the trajectories given by the Lorenz equations.
Plot two trajectories, one computed using the tolerances 'atol1' and 'rtol1',
and one computed using the tolerances 'atol2' and 'rtol2'.
Choose random x, y, and z values between -15 and 15.
Seed the random number generator with 'seed_'.
Use a resolution of 'res' for the points in the plot.
Plot the time values between 0 ant 't'.
At each update, add 'step' points to the plot.
Use a delay of 'delay' at each update in the animation.
Use different colors for each trajectory.
Use the values of 'sigma', 'beta', and 'rho' in the Lorenz ODE. """
# Get initial conditions.
seed(seed_)
x = -15 + 30 * rand(3)
# Solve for the trajectories.
# Plot them.
t = np.linspace(0, t, res)
y1 = odeint(lorenz_ode, x, t, args=(sigma, beta, rho), rtol=rtol1, atol=atol1)
c1 = mlab.plot3d(y1[:1,0], y1[:1,1], y1[:1,2], tube_radius=.2, color=(1, 0, 0))
y2 = odeint(lorenz_ode, x, t, args=(sigma, beta, rho), rtol=rtol2, atol=atol2)
c2 = mlab.plot3d(y2[:1,0], y2[:1,1], y2[:1,2], tube_radius=.2, color=(0, 0, 1))
# Position the view for the plot.
mlab.gcf().scene.camera.position = [127.23761585, -108.28736806, 6.35191272]
mlab.gcf().scene.camera.focal_point = [-1.7792501449584961, -3.6287221908569336, 23.397351264953613]
mlab.gcf().scene.camera.view_up = [-0.078467260964232038, -0.20339450183237351, 0.97594752194015633]
mlab.gcf().scene.camera.clipping_range = [128.64624663718814, 328.22549479639167]
# Define the animation.
@mlab.show
@mlab.animate(delay=delay)
def trace_curve():
for i in xrange(step, res, step):
c1.mlab_source.reset(x=y1[:i,0], y=y1[:i,1], z=y1[:i,2])
c2.mlab_source.reset(x=y2[:i,0], y=y2[:i,1], z=y2[:i,2])
yield
# Run the animation.
trace_curve()
def lorenz_perturbed(N=10, res=10000, step=5, t=50, seed_=120, atol=1E-15,
rtol=1E-13, epsilon=2.2e-16, delay=10,
sigma=10., beta=8./3, rho=28.):
""" Animate the trajectories given by the Lorenz equations.
Plot two trajectories, one with the initial value given by the
random number generator after you seed it,
and another that is equal to (1 + epsilon) times the other initial value.
Choose random x, y, and z values between -15 and 15.
Seed the random number generator with 'seed_'.
Use a resolution of 'res' for the points in the plot.
Plot the time values between 0 ant 't'.
Pass the tolerances 'atol' and 'rtol' to the ODE solver.
At each update, add 'step' points to the plot.
Use a delay of 'delay' at each update in the animation.
Use different colors for each trajectory.
Use the values of 'sigma', 'beta', and 'rho' in the Lorenz ODE. """
# Get initial conditions.
seed(seed_)
x1 = -15 + 30 * rand(3)
x2 = x1 * (1. + epsilon)
# Solve for the trajectories.
# Plot them.
t = np.linspace(0, t, res)
y1 = odeint(lorenz_ode, x1, t, args=(sigma, beta, rho), atol=atol, rtol=rtol)
c1 = mlab.plot3d(y1[:1,0], y1[:1,1], y1[:1,2], tube_radius=.2, color=(1, 0, 0))
y2 = odeint(lorenz_ode, x2, t, args=(sigma, beta, rho), atol=atol, rtol=rtol)
c2 = mlab.plot3d(y2[:1,0], y2[:1,1], y2[:1,2], tube_radius=.2, color=(0, 0, 1))
# Position the view for the plot.
mlab.gcf().scene.camera.position = [127.23761585, -108.28736806, 6.35191272]
mlab.gcf().scene.camera.focal_point = [-1.7792501449584961, -3.6287221908569336, 23.397351264953613]
mlab.gcf().scene.camera.view_up = [-0.078467260964232038, -0.20339450183237351, 0.97594752194015633]
mlab.gcf().scene.camera.clipping_range = [128.64624663718814, 328.22549479639167]
# Define the animation.
@mlab.show
@mlab.animate(delay=delay)
def trace_curve():
for i in xrange(2, res, step):
c1.mlab_source.reset(x=y1[:i,0], y=y1[:i,1], z=y1[:i,2])
c2.mlab_source.reset(x=y2[:i,0], y=y2[:i,1], z=y2[:i,2])
yield
# Run the animation.
trace_curve()
def lyapunov_plot(res=10001, initial_time=10., t=10, seed_=5,
epsilon=1E-8, atol=1E-15, rtol=1E-13,
sigma=10., beta=8./3, rho=28.):
""" Plot the separation between two trajectories through the Lorenz system.
Use a logarithmic scale on the y-axis.
Seed the random number generator with 'seed_'.
Run the ODE solver through 'initial_time' using the given tolerances and resolution.
Run the ODE solver an aditional 't' units of time on two new sets of initial conditions.
One should be the final value of the previous computation.
The other should be (1 + epsilon) times the other point.
Use the resolutions 'res' and tolerances 'atol' and 'rtol' again
when solving using the new initial values.
Plot a fitting exponential curve through the points.
On the log-scale, it will look like a line.
Show the plot, and return the resulting approximation to the Lyapunov exponent.
Use the values of 'sigma', 'beta', and 'rho' in the Lorenz ODE. """
# Get starting points.
seed(seed_)
x1 = -15 + 30 * rand(3)
# Run till the point is already in the attractor.
x1 = odeint(lorenz_ode, x1, np.linspace(0, initial_time, res),
args=(sigma, beta, rho), atol=atol, rtol=rtol)[-1]
# Change it slightly.
x2 = x1 * (1. + epsilon)
# Find the trajectories.
t = np.linspace(0, t, res)
y1 = odeint(lorenz_ode, x1, t, atol=atol, rtol=rtol, args=(sigma, beta, rho))
y2 = odeint(lorenz_ode, x2, t, atol=atol, rtol=rtol, args=(sigma, beta, rho))
# Plot the separation.
plt.semilogy(t, np.sqrt(((y1 - y2)**2).sum(axis=1)))
# Compute the regression.
slope, intercept, r_value, p_value, std_err = linregress(t, np.log(np.sqrt(((y1 - y2)**2).sum(axis=1))))
# Compute the approximation.
yapprox = slope * t + intercept
# Plot the line.
plt.semilogy(t, np.exp(yapprox))
# Label the axes.
plt.xlabel('Time')
plt.ylabel('Separation')
# Show it.
plt.show()
return slope
```
#### File: Labs/MST/new_mst_solutions.py
```python
import numpy as np
import networkx as nx
import scipy.ndimage
from scipy import linalg as la
from operator import itemgetter
from collections import counter
from matplotlib import pyplot as plt
def make_edges(n):
A = la.triu(np.random.randint(1,50,(n,n))*(np.random.rand(n,n)>.5))
S = []
for index, x in np.ndenumerate(A):
if x != 0:
S.append((str(index[0]), str(index[1]), x))
return S
def formChanger(oldData):
newData = []
for i in oldData:
newData.append((i[0],i[1],int(i[2])))
return newData
# Problem 1
def kruskal(edges):
# Empty list of edges for MST
tree = []
# Dictionary that points each node towards its root, initially itself
nodes = {node:node for node in ({edge[0] for edge in edges} | {edge[1] for edge in edges})}
# Set number of nodes to be processed to n-1
remaining = len(nodes)-1
# Find the root of the given node
def track(node):
# Node whose root we are finding
temp = node
# While temp does not point to itself in the dictionary
while nodes[temp] is not temp:
# Update temp to be the node it currently points to in nodes
temp = nodes[temp]
return temp
for n1, n2, weight in sorted(edges, key=itemgetter(2)):
# Root node of n1
root = track(n1)
# Root node of n2
remove = track(n2)
if root is not remove:
# Add the edge to the tree
tree.append((n1, n2, weight))
# Lower remaining by 1
remaining -= 1
if remaining == 0:
return tree
# Change the value associated with remove to root
nodes[remove] = root
# Problem 2
oldData = np.load('MSTdata.npy')
data = formChanger(oldData)
# Timing for kruskal(data): 338 microseconds per loop
G = nx.Graph()
for i in data:
G.add_edge(i[0], i[1], weight=int(i[2]))
# Timing for nx.minimum_spanning_tree(G): 2.4 milliseconds per loop
# Problem 3
def convert(filename):
picture = scipy.ndimage.imread(filename)
A = picture[:,:,0]
edges = []
a = A.shape
for index, x in np.ndenumerate(A):
i = index[0]
j = index[1]
# Avoid the pixels on the edges
if i < a[0]-1 and j < a[1]-1:
# Only do the i+1 and j+1 cases since it's bidirectional
edges.append((A[i,j], A[i,j+1], abs(A[i,j]-A[i,j+1])))
edges.append((A[i+1,j], A[i,j], abs(A[i+1,j]-A[i,j])))
return edges
# Problem 4
def modifiedkruskal(edges, div):
# Create dictionary that points each node towards its root, initially itself
nodes = {node:node for node in ({edge[0] for edge in edges} | {edge[1] for edge in edges})}
# Set number of nodes to be processed to n-div
end = len(nodes)-div
# Tracking function- same as in original Kruskal
def track(node):
temp = node
while nodes[temp] is not temp:
temp = nodes[temp]
return temp
for n1, n2, weight in sorted(edges, key=itemgetter(2)):
root = track(n1)
remove = track(n2)
if root is not remove:
end -=1
if end == 0:
# Makes sure you get the right number of divisions
nodes[remove] = root
# Return dict with nodes as keys and their roots as values
return {node:track(node) for node in nodes}
# Change the value associated with remove to root
nodes[remove] = root
def segment(filename, div):
# Read in the image
image = scipy.ndimage.imread(filename)[:,:,0]
# Create the list of edges
edges = convert(filename)
# Get the nodes dictionary
nodes_dict = modifiedkruskal(edges, div)
# Count the roots and get the ten most common roots
d = Counter(nodes_dict.values())
segments = d.most_common(10)
# Create numpy arrays image1, image2, and image3 such that all the pixels that are in the
# most, second most, or third largest segments maintain their values and are set to zero
# otherwise. The convert function might need tweaking; somehow trying to segment the original
# image used in this lab results in the node None being the most common and nodes along the
# bottom row being the rest of the most common, and this doesn't seem correct.
# Plot the images
plt.subplot(221)
plt.imshow(image)
plt.subplot(222)
plt.imshow(image1)
plt.gray()
plt.subplot(223)
plt.imshow(image2)
plt.gray()
plt.subplot(224)
plt.imshow(image3)
plt.gray()
plt.show()
```
#### File: Labs/NearestNeighbor/graphs.py
```python
from collections import deque
def bfs(G, v, d, track=True):
Q = deque([v])
marked = set([v])
visited = list()
while len(Q) > 0:
t = Q.popleft()
visited.append(t)
if t == d:
return t, visited
else:
for e in adjedges_list(G, t):
if track:
if e not in marked:
marked.add(e)
Q.append(e)
else:
Q.append(e)
def dfs(G, v, d, track=True):
Q = list([v])
marked = set([v])
visited = list()
while len(Q) > 0:
t = Q.pop()
visited.append(t)
if t == d:
return t, visited
else:
for e in adjedges_list(G, t):
if track:
if e not in marked:
marked.add(e)
Q.append(e)
else:
Q.append(e)
def adjedges_mat(M, e):
for i, n in enumerate(M[e]):
if n == 1:
yield i
def adjedges_list(M, e):
for n in M[e]:
yield n
```
#### File: Labs/NetworkFlow/NetFlow.py
```python
import numpy as np
from scipy import optimize
A = np.array([[-1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, -1, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, -1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, -1, 1, 0, 0, -1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -1, 0, 0, -1, 1, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, -1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, -1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, -1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1]])
#[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
b = np.array([[-7669,],[-16680],[7593],[9358],[19929],[0],
[0],[-15089],[-5136],[8080],[-5379],[4993]])#,[1000]])
A = np.array([[-1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, -1, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, -1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, -1, 1, 0, 0, -1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -1, 0, 0, -1, 1, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, -1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, -1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, -1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
B = np.array([[-7669,],[-16680],[7593],[9358],[19929],[0],
[0],[-15089],[-5136],[8080],[-5379],[4993],[13400],[3050],[4200],
[1200],[2300]])
b = np.array([[-7669,],[-16680],[7593],[9358],[19929],[0],
[0],[-15089],[-5136],[8080],[-5379],[4993]])
#dependent 4,9,10,15,16,17
c = np.linalg.lstsq(A,B)[0]
#print np.dot(A,c)
#c = c[0:12][:]
A = np.zeros((12,17))
A[0][0] = 1
A[1][0] = -1
A[1][1] = 1
A[2][1] = -1
A[2][2] = 1
A[3][2] = -1
A[0][3] = -1
A[4][3] = 1
A[1][4] = 1
A[5][4] = -1
A[2][5] = -1
A[6][5] = 1
A[3][6] = 1
A[7][6] = -1
A[4][7] = -1
A[5][7] = 1
A[5][8] = -1
A[6][8] = 1
A[6][9] = -1
A[7][9] = 1
A[4][10] = -1
A[8][10] = 1
A[5][11] = 1
A[9][11] = -1
A[6][12] = -1
A[10][12] = 1
A[7][13] = 1
A[11][13] = -1
A[8][14] = 1
A[9][14] = -1
A[9][15] = 1
A[10][15] = -1
A[10][16] = 1
A[11][16] = -1
#print A
x0 = np.array([[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]])
x0 = np.array([1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
print x0.shape, A.shape, b.shape, x0.T.shape
def con1(x):
return np.subtract(np.dot(A,x),b.squeeze())[0]
def con2(x):
x = x.T
return np.subtract(b,np.dot(A,x))
x = x0.T
c = c.T
print np.dot(c,x)
d = np.subtract(np.dot(A,x0),b.squeeze())[0]
print d
def opt(x):
x = x.T
return np.dot(c,x)
print optimize.fmin_cobyla(opt, x0, con1)
'''
#print c
x = np.dot(A,c)
d = c
for i in range(0,17):
d[i][0] = d[i][0] + 11000
print c
#print c
#print d
#print np.dot(A,d), x
-1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0
1 -1 0 0 -1 0 0 0 0 0 0 0 0 0 0 0 0
0 1 -1 0 0 1 0 0 0 0 0 0 0 0 0 0 0
0 0 1 0 0 0 -1 0 0 0 0 0 0 0 0 0 0
0 0 0 -1 0 0 0 1 0 0 1 0 0 0 0 0 0
0 0 0 0 1 0 0 -1 1 0 0 -1 0 0 0 0 0
0 0 0 0 0 -1 0 0 -1 1 0 0 1 0 0 0 0
0 0 0 0 0 0 1 0 0 -1 0 0 0 -1 0 0 0
0 0 0 0 0 0 0 0 0 0 -1 0 0 0 -1 0 0
0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 -1 0
0 0 0 0 0 0 0 0 0 0 0 0 -1 0 0 1 -1
0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1
-7669
-16680
7593
9358
19929
0
0
-15089
-5136
8080
-5379
4993
'''
```
#### File: Labs/NewtonsMethod/newton.py
```python
import numpy as np
import scipy.linalg as la
import matplotlib.pyplot as plt
from scipy.misc import derivative
# Derivative function from the numerical derivative lab.
def der(fc, x, h=.0001, degree=1, type='centered', accuracy=2):
""" Computes the numerical of the callable function 'fc at all the
points in array 'x'. 'degree' is the degree of the derivative to be
computed. 'type' can be 'centered', 'forward', or 'backward'.
'accuracy' is the desired order of accuracy. For forward and backward
differences it can take a value of 1, 2, or 3. For centered differences
it can take a value of 2, 4, or 6."""
# Use these lists to manage the different coefficient options.
A = np.array([[[0., 0., -.5, 0., .5, 0., 0.],
[0., 1/12., -2/3., 0., 2/3., -1/12., 0.],
[-1/60., 3/20., -3/4., 0., 3/4., -3/20., 1/60.]],
[[0., 0., 1., -2., 1., 0., 0.],
[0., -1/12., 4/3., -5/2., 4/3., -1/12., 0.],
[1/90., -3/20., 3/2., -49/18., 3/2., -3/20., 1/90.]]])
B = np.array([[[-1., 1., 0., 0., 0.],
[-1.5, 2., -.5, 0., 0.],
[-11/6., 3., -1.5, 1/3., 0.]],
[[1., -2., 1., 0., 0.],
[2., -5., 4., -1., 0.],
[35/12., -26/3., 19/2., -14/3., 11/12.]]])
if type == "centered":
acc = int(accuracy/2) - 1
else:
acc = int(accuracy) - 1
if int(degree) not in [1, 2]:
raise ValueError ("Only first and second derivatives are supported")
if acc not in [0, 1, 2]:
raise ValueError ("Invalid accuracy")
if type == 'centered':
xdifs = np.array([fc(x+i*h) for i in xrange(-3, 4)])
return np.inner(A[degree-1,acc], xdifs.T) / h**degree
elif type == 'forward':
xdifs = np.array([fc(x+i*h) for i in xrange(5)])
return np.inner(B[degree-1,acc], xdifs.T) / h**degree
elif type == 'backward':
xdifs = np.array([fc(x-i*h) for i in xrange(5)])
return np.inner(B[degree-1,acc], xdifs.T) / (-h)**degree
else:
raise ValueError ("invalid type")
# Partial derivative function used in the Jacobian function.
def partial(fc, x, i, h=.0001, ty="centered", ac=2):
""" Computes a partial derivative with respect to index 'i'.
The rest of the options are the same as the numerical derivative function."""
def fcpart(y):
add = np.zeros(x.shape[0])
add[i] = y
return fc(x+add)
return der(fcpart, 0., h=h, type=ty, accuracy=ac)
# Numerical Jacobian function from the MultiDeriv lab.
def jac(fc, x, ty="centered", ac=2, h=.0001):
"""Compute the Jacobian matrix of a function.
'fc' is a callable function that operates on a 1D array.
'x' is where to evaluate the Jacobian matrix.
Dimensions of the domain and range are infered from 'x'
and the output of 'fc'."""
return np.array([partial(fc, x, [i], h=h, ty=ty, ac=ac) for i in xrange(x.size)]).T
# Newton's method for an array of points.
def newton(G, f, f1=None, maxiters=100, tol=1E-8, h=1E-7):
""" Perform Newton's method for function 'f' at the points
in the array 'G'.
'f1' is an optional derivative function.
'maxiters' is the maximum number of iterations.
'tol' is the tolerance used as a stopping criterion.
'h' is the difference used for the numerical derivatives."""
A = np.array(G, order='C')
C = np.zeros_like(G, dtype=bool, order='C')
convergence = False
if f1 is not None:
fder = f1
else:
fder = lambda x: (f(x+h) - f(x)) / h
for index, value in np.ndenumerate(A):
if maxiters > 0:
previous = value
value -= f(value) / fder(value)
if abs(value - previous) < tol:
convergence = True
for i in xrange(maxiters-1):
previous = value
value -= f(value) / fder(value)
if abs(previous - value) < tol:
C[index] = True
break
A[index] = value
return A, C
# Multi-dimensional Newton's method
def multinewton(v, f, jacobian=None, maxiters=5, tol=1E-5, h=1E-7):
""" Performs Newton's method in multiple dimensions.
'v' is the starting vector.
'f' is the function that accepts 'v' as an argument.
'jacobian' is an optional function that computes the Jacobian matrix.
'maxiters' is the maximum number of iterations.
'tol' is the tolerance used as a stopping criterion.
'h' is the difference used for the numerical derivatives."""
arr = v.copy()
prev = np.empty_like(v)
convergence = False
if jacobian is not None:
j = jacobian
else:
j = lambda v: jac(f, v, h=h)
for i in xrange(maxiters):
prev[:] = arr
arr -= la.solve(j(arr), f(arr))
prev -= arr
prev *= prev
print f(arr)
if prev.max() < tol:
convergence=True
break
return arr, convergence
# Julia set problem.
def polyjulia(p, xmin, xmax, ymin, ymax, res=401, iters=100, tol=1E-12):
""" Plot the Julia set of a polynomial.
Use a 'res'x'res' grid of complex numbers with real part
ranging from 'xmin' to 'xmax' and imaginary part
ranging from 'ymin' to 'ymax'.
'p' is assumed to be a numpy poly1d object, or
at least some callable object with a 'deriv' method that
returns its derivative and a 'roots' attribute that
contains an array with the values of all the functions roots.
'iters' is the number of iterations to perform.
'tol' is the tolerance used to distinguish between
the roots of the polynomial."""
x = np.linspace(xmin, xmax, res)
y = np.linspace(ymin, ymax, res)
X, Y = np.meshgrid(x, y, copy=False)
Z = X + 1.0j * Y
p2 = p.deriv()
for i in xrange(500):
Z -= p(Z) / p2(Z)
colors = np.zeros_like(Z)
for index, root in np.ndenumerate(p.roots):
colors[np.absolute(Z-root)<tol] = index
colors[np.isnan(Z)] = p.roots.size
plt.pcolormesh(X, Y, colors, cmap=plt.get_cmap('winter'))
plt.show()
# Examples from Julia set problem.
def polyplot():
""" Plot the examples in the lab."""
for coefs, xmin, xmax, ymin, ymax in [
([1, -2, -2, 2], -.5, 0, -.25, .25),
([3, -2, -2, 2], -1, 1, -1, 1),
([1, 3, -2, -2, 2], -1, 1, -1, 1),
([1, 0, 0, -1], -1, 1, -1, 1)]:
polyjulia(np.poly1d(coefs), xmin, xmax, ymin, ymax)
# Mandelbrot set problem.
def mandelbrot(xmin=-1.5, xmax=.5, ymin=-1, ymax=1, guess=complex(0,0), res=401, iters=200):
""" Plot the Mandelbrot set."""
x = np.linspace(xmin, xmax, res)
y = np.linspace(ymin, ymax, res)
X, Y = np.meshgrid(x, y, copy=False)
Z = X + 1.0j * Y
vals = np.empty_like(Z)
vals[:] = guess
for i in xrange(iters):
vals[:] = vals**2 + Z
vals[np.isnan(vals)] = 1
vals[np.absolute(vals)>1] = 1
vals[np.absolute(vals)<1] = 0
plt.pcolormesh(X, Y, np.absolute(vals), cmap=plt.get_cmap('winter'))
plt.show()
# Show the plots if the script is run.
if __name__=='__main__':
polyplot()
mandelbrot()
```
#### File: Labs/PCA_LSI/plots.py
```python
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn import decomposition
from scipy import linalg as la
iris = load_iris()
def iris_base():
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, aspect='equal')
plt.plot(iris.data[50:150,0], iris.data[50:150,2], 'k.')
plt.xlim([2, 8])
plt.ylim([2, 8])
plt.xlabel(r"Sepal Length (cm)")
plt.ylabel(r"Petal Length (cm)")
return fig
def ibase():
iris_base()
plt.savefig('iris0.pdf')
plt.clf()
def iris1():
fig = iris_base()
pca = decomposition.PCA(n_components=2)
pca.fit(iris.data[50:150, np.array([0, 2])])
mean = np.mean(iris.data[50:150, np.array([0, 2])], 0)
stds = np.std(iris.data[50:150, np.array([0, 2])], 0)
components = pca.components_
plt.quiver(mean[0], mean[1], 1.5 * stds[0], 0, scale_units='xy', angles='xy', scale=1)
plt.quiver(mean[0], mean[1], 0, 1.5 * stds[1], scale_units='xy', angles='xy', scale=1)
plt.savefig('iris1.pdf')
plt.clf()
def iris2():
fig = iris_base()
pca = decomposition.PCA(n_components=2)
pca.fit(iris.data[50:150, np.array([0, 2])])
mean = np.mean(iris.data[50:150, np.array([0, 2])], 0)
stds = np.std(iris.data[50:150, np.array([0, 2])], 0)
components = pca.components_
variance_ratio = pca.explained_variance_ratio_
plt.quiver(mean[0], mean[1],
-2 * variance_ratio[0] * components[0,0],
-2 * variance_ratio[0]*components[0,1],
scale_units='xy', angles='xy', scale=1)
plt.quiver(mean[0], mean[1],
5 * variance_ratio[1] * components[1,0],
5 * variance_ratio[1] * components[1,1],
scale_units='xy', angles='xy', scale=1)
plt.savefig('iris2.pdf')
plt.clf()
def iris_pca():
X = iris.data
# pre-process
Y = X - X.mean(axis=0)
# get SVD
U,S,VT = la.svd(Y,full_matrices=False)
# project onto the first two principal components
Yhat = U[:,:2].dot(np.diag(S[:2]))
# plot results
setosa = iris.target==0
versicolor = iris.target==1
virginica = iris.target==2
p1, p2 = Yhat[:,0], Yhat[:,1]
plt.scatter(p1[setosa],p2[setosa], marker='.', color='blue', label='Setosa')
plt.scatter(p1[versicolor],p2[versicolor], marker='.', color='red', label='Versicolor')
plt.scatter(p1[virginica],p2[virginica], marker='.', color='green', label='Virginica')
plt.legend(loc=2)
plt.ylim([-4,5])
plt.xlim([-4,4])
plt.xlabel("First Principal Component")
plt.ylabel("Second Principal Component")
plt.savefig('iris_pca.pdf')
plt.clf()
def iris_scree():
X = iris.data
# pre-process
Y = X - X.mean(axis=0)
# get SVD
U,S,VT = la.svd(Y,full_matrices=False)
L = S**2
plt.plot(L/L.sum(dtype=float), 'o-')
plt.xlabel("Principal Components")
plt.ylabel("Percentage of Variance")
plt.savefig('iris_scree.pdf')
plt.clf()
if __name__ == "__main__":
iris_scree()
iris_pca()
```
#### File: Labs/PoissonEquation/plots.py
```python
from __future__ import division
import matplotlib
# matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
import numpy as np
from solution import general_secondorder_ode_fd, poisson_square
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib import cm
import matplotlib.colors as mcolors
from scipy.sparse import spdiags
from scipy.sparse.linalg import spsolve
from solution import poisson_square
def ExercisePoisson():
from numpy import sin, cos, pi
# Domain: [0,1]x[0,1]
a1,b1 = 0.,1.
c1,d1 = 0.,1.
n=100
# Example1: Laplace's equation (Poisson with no source)
def bcs(x,y):
return x**3.
def source(x,y):
return 0.
# # Example2: Poisson's equation
# def bcs(x,y): return sin(pi*x)*cos(2.*pi*y)
# def source(x,y): return -5.*(pi**2.)*bcs(x,y)
# # Example3: Poisson's equation
# def bcs(x,y): return sin(2.*pi*y)*cos(pi*x)
# def source(x,y): return -5.*(pi**2.)*bcs(x,y)
# # Example4: Poisson's equation
# def bcs(x,y): return 1.-x +x*y + (1./2)*sin(pi*x)*sin(pi*y)
#
# def source(x,y): return -(pi**2)*sin(pi*x)*sin(pi*y)
z=poisson_square(a1,b1,c1,d1,n,bcs,source)
print '---------------'
print "Computation successful"
print '---------------'
# Plotting data
fig = plt.figure()
#---- First subplot: Numerical Solution
# ax = fig.add_subplot(121, projection='3d')
ax = fig.gca(projection='3d')
ax.set_xlabel('X'); ax.set_ylabel('Y'); ax.set_zlabel('Z')
x, y = np.linspace(a1,b1,n+1), np.linspace(c1,d1,n+1)
xv, yv = np.meshgrid(x, y)
xv, yv = xv.T, yv.T
surf = ax.plot_surface(xv, yv, z, rstride=2, cstride=2, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
# #---- Second subplot: Exact Solution
# ax2 = fig.add_subplot(122, projection='3d')
# ax2.set_xlabel('X'); ax2.set_ylabel('Y'); ax2.set_zlabel('Z')
# surf2 = ax2.plot_surface(xv, yv, bcs(xv,yv), rstride=2, cstride=2, cmap=cm.coolwarm,
# linewidth=0, antialiased=False)
print "Maximum Error = \n", np.max(np.abs( z-bcs(xv,yv) ) )
# plt.savefig('Laplace.png',dpi=100)
# plt.clf()
plt.show()
# if True: return
#
# num_approx = 7 # Number of Approximations
# N = np.array([10*2**(j) for j in range(num_approx)])
# h, max_error = (b1-a1)/N[:-1], np.ones(num_approx-1)
#
# num_sol_best = poisson_square(a1,b1,c1,d1,N[-1],bcs,source)
# for j in range(len(N)-1):
# num_sol = poisson_square(a1,b1,c1,d1,N[j],bcs,source)
# max_error[j] = np.max(np.abs( num_sol- num_sol_best[::2**(num_approx-j-1), ::2**(num_approx-j-1)] ) )
# plt.loglog(h,max_error,'.-r',label="$E(h)$")
# plt.loglog(h,h**(2.),'-k',label="$h^{\, 2}$")
# plt.xlabel("$h$")
# plt.legend(loc='best')
# print "The order of the finite difference approximation is about ", ( (np.log(max_error[0]) -
# np.log(max_error[-1]) )/( np.log(h[0]) - np.log(h[-1]) ) ), "."
# plt.savefig('./Poisson_Error.pdf')
# plt.show()
return
def plotRhos():
def source(X,Y):
"""
Takes arbitrary arrays of coordinates X and Y and returns an array of the same shape
representing the charge density of nested charged squares
"""
src = np.zeros(X.shape)
src[ np.logical_or(
np.logical_and( np.logical_or(abs(X-1.5) < .1,abs(X+1.5) < .1) ,abs(Y) < 1.6),
np.logical_and( np.logical_or(abs(Y-1.5) < .1,abs(Y+1.5) < .1) ,abs(X) < 1.6))] = 1
src[ np.logical_or(
np.logical_and( np.logical_or(abs(X-0.9) < .1,abs(X+0.9) < .1) ,abs(Y) < 1.0),
np.logical_and( np.logical_or(abs(Y-0.9) < .1,abs(Y+0.9) < .1) ,abs(X) < 1.0))] = -1
return src
#Generate a color dictionary for use with LinearSegmentedColormap
#that places red and blue at the min and max values of data
#and white when data is zero
def genDict(data):
zero = 1/(1 - np.max(data)/np.min(data))
cdict = {'red': [(0.0, 1.0, 1.0),
(zero, 1.0, 1.0),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.0, 0.0),
(zero, 1.0, 1.0),
(1.0, 0.0, 0.0)],
'blue': [(0.0, 0.0, 0.0),
(zero, 1.0, 1.0),
(1.0, 1.0, 1.0)]}
return cdict
a1 = -2.
b1 = 2.
c1 = -2.
d1 = 2.
n =100
X = np.linspace(a1,b1,n)
Y = np.linspace(c1,d1,n)
X,Y = np.meshgrid(X,Y)
rho= source(X,Y)
plt.imshow(rho,cmap = mcolors.LinearSegmentedColormap('cmap', genDict(rho)))
plt.colorbar(label="Relative Charge")
# plt.savefig("./pipesRho.pdf",dpi=100)
plt.show()
plt.clf()
return
def plotVs():
#
# def poisson_square(a1,b1,c1,d1,n,bcs, source):
# #n = number of subintervals
# # We discretize in the x dimension by a1 = x_0 < x_1< ... < x_n=b1, and
# # we discretize in the y dimension by c1 = y_0 < y_1< ... < y_n=d1.
# # This means that we have interior points {x_1, ..., x_{n-1}}\times {y_1, ..., y_{n-1}}
# # or {x_1, ..., x_m}\times {y_1, ..., y_m} where m = n-1.
# # In Python, this is indexed as {x_0, ..., x_{m-1}}\times {y_0, ..., y_{m-1}}
# # We will have m**2 pairs of interior points, and m**2 corresponding equations.
# # We will organize these equations by their y coordinates: all equations centered
# # at (x_i, y_0) will be listed first, then (x_i, y_1), and so on till (x_i, y_{m-1})
# delta_x, delta_y, h, m = (b1-a1)/n, (d1-c1)/n, (b1-a1)/n, n-1
#
# #### Here we construct the matrix A ####
# ############################## Slow #################################
# # D, diags = np.ones((1,m**2)), np.array([-m,m])
# # data = np.concatenate((D, D),axis=0)
# # A = h**(-2)*spdiags(data,diags,m**2,m**2).asformat('lil')
# # D = np.ones((1,m))
# # diags, data = np.array([0,-1,1]), np.concatenate((-4.*D,D,D),axis=0)
# # temp = h**(-2)*spdiags(data,diags,m,m).asformat('lil')
# # for i in xrange(m): A[i*m:(i+1)*m,i*m:(i+1)*m] = temp
#
# ############################## Much Faster ################################
# D1,D2,D3 = -4*np.ones((1,m**2)), np.ones((1,m**2)), np.ones((1,m**2))
# Dm1, Dm2 = np.ones((1,m**2)), np.ones((1,m**2))
# for j in range(0,D2.shape[1]):
# if (j%m)==m-1: D2[0,j]=0
# if (j%m)==0: D3[0,j]=0
# diags = np.array([0,-1,1,-m,m])
# data = np.concatenate((D1,D2,D3,Dm1,Dm2),axis=0) # This stacks up rows
# A = 1./h**2.*spdiags(data, diags, m**2,m**2).asformat('csr') # This appears to work correctly
#
# #### Here we construct the vector b ####
# b, Array = np.zeros(m**2), np.linspace(0.,1.,m+2)[1:-1]
# # In the next line, source represents the inhomogenous part of Poisson's equation
# for j in xrange(m): b[j*m:(j+1)*m] = source(a1+(b1-a1)*Array, c1+(j+1)*h*np.ones(m) )
#
# # In the next four lines, bcs represents the Dirichlet conditions on the boundary
# # y = c1+h, d1-h
# b[0:m] = b[0:m] - h**(-2.)*bcs(a1+(b1-a1)*Array,c1*np.ones(m))
# b[(m-1)*m : m**2] = b[(m-1)*m : m**2] - h**(-2.)*bcs(a1+(b1-a1)*Array,d1*np.ones(m))
# # x = a1+h, b1-h
# b[0::m] = b[0::m] - h**(-2.)*bcs(a1*np.ones(m),c1+(d1-c1)*Array)
# b[(m-1)::m] = b[(m-1)::m] - h**(-2.)*bcs(b1*np.ones(m),c1+(d1-c1)*Array)
#
# #### Here we solve the system A*soln = b ####
# soln = spsolve(A,b) # Using the conjugate gradient method: (soln, info) = cg(A,b)
#
# z = np.zeros((m+2,m+2) )
# for j in xrange(m): z[1:-1,j+1] = soln[j*m:(j+1)*m]
#
# x, y = np.linspace(a1,b1,m+2), np.linspace(c1,d1,m+2)
# z[:,0], z[:,m+1] = bcs(x,c1*np.ones(len(x)) ), bcs(x,d1*np.ones(len(x)) )
# z[0,:], z[m+1,:] = bcs(a1*np.ones(len(x)),y), bcs(b1*np.ones(len(x)),y)
# return z
#
#
def source(X,Y):
"""
Takes arbitrary arrays of coordinates X and Y and returns an array of the same shape
representing the charge density of nested charged squares
"""
src = np.zeros(X.shape)
src[ np.logical_or(
np.logical_and( np.logical_or(abs(X-1.5) < .1,abs(X+1.5) < .1) ,abs(Y) < 1.6),
np.logical_and( np.logical_or(abs(Y-1.5) < .1,abs(Y+1.5) < .1) ,abs(X) < 1.6))] = 1
src[ np.logical_or(
np.logical_and( np.logical_or(abs(X-0.9) < .1,abs(X+0.9) < .1) ,abs(Y) < 1.0),
np.logical_and( np.logical_or(abs(Y-0.9) < .1,abs(Y+0.9) < .1) ,abs(X) < 1.0))] = -1
return src
#Generate a color dictionary for use with LinearSegmentedColormap
#that places red and blue at the min and max values of data
#and white when data is zero
def genDict(data):
zero = 1/(1 - np.max(data)/np.min(data))
cdict = {'red': [(0.0, 1.0, 1.0),
(zero, 1.0, 1.0),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.0, 0.0),
(zero, 1.0, 1.0),
(1.0, 0.0, 0.0)],
'blue': [(0.0, 0.0, 0.0),
(zero, 1.0, 1.0),
(1.0, 1.0, 1.0)]}
return cdict
a1 = -2.
b1 = 2.
c1 = -2.
d1 = 2.
n = 5
# X = np.linspace(a1,b1,n)
# Y = np.linspace(c1,d1,n)
# X,Y = np.meshgrid(X,Y)
#
# rho= source(X,Y)
V = poisson_square(a1,b1,c1,d1,100,lambda x, y:0, lambda X,Y: source(X,Y))
cdict = genDict(V)
plt.imshow(V,cmap = mcolors.LinearSegmentedColormap('CustomMap', cdict))
plt.colorbar(label="Voltage")
# plt.savefig("./pipesV.png",dpi=100)
plt.show()
plt.clf()
return
if __name__ == "__main__":
# example()
# Exercise1()
# ExercisePoisson()
plotRhos()
# plotVs()
```
#### File: Labs/PolicyFunctionIteration/plots.py
```python
import scipy as sp
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
import matplotlib.pyplot as plt
import numpy as np
import math
from discretelognorm import discretelognorm
def reservation_wage():
m = 20
v = 200
N = 500
Wmax = 100
Wmin = 0
gamma = .10
alpha = .5
beta = .9
e_params = (m, v)
u = lambda c: np.sqrt(c)
w = np.linspace(Wmin, Wmax, N)
uaw = u(alpha*w).reshape((N,1))
uw = u(w)
f = discretelognorm(w, *e_params)
VE = np.zeros(N)
EVU = np.zeros(N)
VU = np.zeros((N,N))
MVE = np.empty((N,N)) #tiled version of VE
MEVU = np.empty((N,N)) #tiled version of EVU
delta = 1.
i = 0
while delta >= 1e-9:
i+=1
#update tiled value functions
MVE[:,:] = VE.reshape((1,N))
MEVU[:,:] = EVU.reshape((N,1))
#calculate new value functions
VU1 = uaw + beta*np.max(np.dstack([MEVU, MVE]), axis=2)
VE1 = uw + beta*((1-gamma)*VE + gamma*EVU)
#test for convergence
d1 = ((VE1-VE)**2).sum()
d2 = ((VU1-VU)**2).sum()
delta = max(d1,d2)
#update
VU = VU1
VE = VE1
EVU = np.dot(VU,f).ravel()
#calculate policy function
PSI = np.argmax(np.dstack([MEVU,MVE]), axis=2)
#calculate and plot reservation wage function
wr_ind = np.argmax(np.diff(PSI), axis = 1)
wr = w[wr_ind]
plt.plot(w,wr)
plt.savefig('reservation_wage.pdf')
plt.clf()
#plot discrete policy function
def disc_policy():
#First compute policy function...==========================================
N = 500
w = sp.linspace(0,100,N)
w = w.reshape(N,1)
u = lambda c: sp.sqrt(c)
util_vec = u(w)
alpha = 0.5
alpha_util = u(alpha*w)
alpha_util_grid = sp.repeat(alpha_util,N,1)
m = 20
v = 200
f = discretelognorm(w,m,v)
VEprime = sp.zeros((N,1))
VUprime = sp.zeros((N,N))
EVUprime = sp.zeros((N,1))
psiprime = sp.ones((N,1))
gamma = 0.1
beta = 0.9
m = 15
tol = 10**-9
delta = 1+tol
it = 0
while (delta >= tol):
it += 1
psi = psiprime.copy()
arg1 = sp.repeat(sp.transpose(VEprime),N,0)
arg2 = sp.repeat(EVUprime,N,1)
arg = sp.array([arg2,arg1])
psiprime = sp.argmax(arg,axis = 0)
for j in sp.arange(0,m):
VE = VEprime.copy()
VU = VUprime.copy()
EVU = EVUprime.copy()
VEprime = util_vec + beta*((1-gamma)*VE + gamma*EVU)
arg1 = sp.repeat(sp.transpose(VE),N,0)*psiprime
arg2 = sp.repeat(EVU,N,1)*(1-psiprime)
arg = arg1+arg2
VUprime = alpha_util_grid + beta*arg
EVUprime = sp.dot(VUprime,f)
delta = sp.linalg.norm(psiprime -psi)
wr_ind = sp.argmax(sp.diff(psiprime), axis = 1)
wr = w[wr_ind]
print w[250],wr[250]
#Then plot=================================================================
plt.plot(w,psiprime[250,:])
plt.ylim([-.5,1.5])
plt.xlabel(r'$w\prime$')
plt.yticks([0,1])
plt.savefig('disc_policy.pdf')
plt.clf()
if __name__ == "__main__":
reservation_wage()
disc_policy()
```
#### File: Labs/PolicyFunctionIteration/policy_solutions.py
```python
import numpy as np
import scipy as sp
from scipy import sparse
from scipy.sparse import linalg
import math
from matplotlib import pyplot as plt
from scipy import linalg as la
def u(x):
return np.sqrt(x).flatten()
def policyIter(beta, N, Wmax=1.):
"""
Solve the infinite horizon cake eating problem using policy function iteration.
Inputs:
beta -- float, the discount factor
N -- integer, size of discrete approximation of cake
Wmax -- total amount of cake available
Returns:
values -- converged value function (Numpy array of length N)
psi -- converged policy function (Numpy array of length N)
"""
W = np.linspace(0,Wmax,N) #state space vector
I = sparse.identity(N, format='csr')
#precompute u(W-W') for all possible inputs
actions = np.tile(W, N).reshape((N,N)).T
actions = actions - actions.T
actions[actions<0] = 0
rewards = np.sqrt(actions)
rewards[np.triu_indices(N, k=1)] = -1e10 #pre-computed reward function
psi_ind = np.arange(N)
rows = np.arange(0,N)
tol = 1.
while tol >= 1e-9:
columns = psi_ind
data = np.ones(N)
Q = sparse.coo_matrix((data,(rows,columns)),shape=(N,N))
Q = Q.tocsr()
values = linalg.spsolve(I-beta*Q, u(W-W[psi_ind])).reshape(1,N)
psi_ind1 = np.argmax(rewards + beta*values, axis=1)
tol = math.sqrt(((W[psi_ind] - W[psi_ind1])**2).sum())
psi_ind = psi_ind1
return values.flatten(), W[psi_ind]
def modPolicyIter(beta, N, Wmax=1., m=15):
"""
Solve the infinite horizon cake eating problem using modified policy function iteration.
Inputs:
beta -- float, the discount factor
N -- integer, size of discrete approximation of cake
Wmax -- total amount of cake available
Returns:
values -- converged value function (Numpy array of length N)
psi -- converged policy function (Numpy array of length N)
"""
W = np.linspace(0,Wmax,N) #state space vector
#precompute u(W-W') for all possible inputs
actions = np.tile(W, N).reshape((N,N)).T
actions = actions - actions.T
actions[actions<0] = 0
rewards = np.sqrt(actions)
rewards[np.triu_indices(N, k=1)] = -1e10 #pre-computed reward function
psi_ind = np.arange(N)
values = np.zeros(N)
tol = 1.
while tol >= 1e-9:
for i in xrange(m):
values = u(W - W[psi_ind]) + beta*values[psi_ind]
psi_ind1 = np.argmax(rewards + beta*values.reshape(1,N), axis=1)
tol = math.sqrt(((W[psi_ind] - W[psi_ind1])**2).sum())
psi_ind = psi_ind1
return values.flatten(), W[psi_ind]
```
#### File: Labs/PolicyFunctionIteration/solutionstester.py
```python
import scipy as sp
from scipy.sparse.linalg import spsolve
from matplotlib import pyplot as plt
def Problem2Real():
beta = 0.95
N = 1000
u = lambda c: sp.sqrt(c)
psi_ind = sp.arange(0,N)
W = sp.linspace(0,1,N)
X, Y = sp.meshgrid(W,W)
Wdiff = Y-X
index = Wdiff <0
Wdiff[index] = 0
util_grid = u(Wdiff)
I = sp.sparse.identity(N)
delta = 1
z = 0
while (delta > 10**-9):
z = z+1
#print(z)
psi_prev = psi_ind.copy()
rows = sp.arange(0,N)
columns = psi_ind
data = sp.ones(N)
Q = sp.sparse.coo_matrix((data,(rows,columns)),shape = (N,N))
Q = Q.tocsr()
#Solve for Value Function
V = spsolve(I-beta*Q,u(W-W[psi_ind]))
#Find Policy Function
arg = util_grid + beta*V
arg[index] = -10**10
psi_ind = sp.argmax(arg,axis = 1)
delta = sp.amax(sp.absolute(W[psi_ind]-W[psi_prev]))
return W[psi_ind]
def Problem3Real():
beta = 0.95
N = 1000
u = lambda c: sp.sqrt(c)
W = sp.linspace(0,1,N)
W = W.reshape(N,1)
X, Y = sp.meshgrid(W,W)
Wdiff = Y-X
index = Wdiff <0
Wdiff[index] = 0
util_grid = u(Wdiff)
V = sp.zeros((N,1))
z = 0
r = 15
delta =1
while (delta > 10**-9):
z += 1
#print(z)
#Update Policy Function
arg = util_grid + beta*sp.transpose(V)
arg[index] = -10**10
psi_ind = sp.argmax(arg,axis = 1)
V_prev = V
#Iterate on Value Function
for j in sp.arange(0,r):
V = u(W-W[psi_ind]) + beta*V[psi_ind]
delta = sp.dot(sp.transpose(V_prev - V),V_prev-V)
return W[psi_ind]
import solutions as sol
prob2=Problem2Real()
prob3=Problem3Real()
x=sol.Problem2()
if(np.allclose(prob2,x)):
print("Problem2 Passed")
else:
print("Problem2 Falied")
x=sol.Problem3()
if(np.allclose(prob3,x)):
print("Problem3 Passed")
else:
print("Problem3 Falied")
sol.Problem4()
import scipy as sp
from discretelognorm import discretelognorm
from matplotlib import pyplot as plt
def Problem5Real():
N = 500
w = sp.linspace(0,100,N)
w = w.reshape(N,1)
u = lambda c: sp.sqrt(c)
util_vec = u(w)
alpha = 0.5
alpha_util = u(alpha*w)
alpha_util_grid = sp.repeat(alpha_util,N,1)
m = 20
v = 200
f = discretelognorm(w,m,v)
VEprime = sp.zeros((N,1))
VUprime = sp.zeros((N,N))
EVUprime = sp.zeros((N,1))
gamma = 0.1
beta = 0.9
tol = 10**-9
delta1 = 1+tol
delta2 = 1+tol
it = 0
while ((delta1 >= tol) or (delta2 >= tol)):
it += 1
VE = VEprime.copy()
VU = VUprime.copy()
EVU = EVUprime.copy()
VEprime = util_vec + beta*((1-gamma)*VE + gamma*EVU)
arg1 = sp.repeat(sp.transpose(VE),N,0)
arg2 = sp.repeat(EVU,N,1)
arg = sp.array([arg2,arg1])
VUprime = alpha_util_grid + beta*sp.amax(arg,axis = 0)
psi = sp.argmax(arg,axis = 0)
EVUprime = sp.dot(VUprime,f)
delta1 = sp.linalg.norm(VEprime - VE)
delta2 = sp.linalg.norm(VUprime - VU)
#print(delta1)
wr_ind = sp.argmax(sp.diff(psi), axis = 1)
wr = w[wr_ind]
return wr
def Problem6Real():
N = 500
w = sp.linspace(0,100,N)
w = w.reshape(N,1)
u = lambda c: sp.sqrt(c)
util_vec = u(w)
alpha = 0.5
alpha_util = u(alpha*w)
alpha_util_grid = sp.repeat(alpha_util,N,1)
m = 20
v = 200
f = discretelognorm(w,m,v)
VEprime = sp.zeros((N,1))
VUprime = sp.zeros((N,N))
EVUprime = sp.zeros((N,1))
psiprime = sp.ones((N,1))
gamma = 0.1
beta = 0.9
m = 15
tol = 10**-9
delta = 1+tol
it = 0
while (delta >= tol):
it += 1
psi = psiprime.copy()
arg1 = sp.repeat(sp.transpose(VEprime),N,0)
arg2 = sp.repeat(EVUprime,N,1)
arg = sp.array([arg2,arg1])
psiprime = sp.argmax(arg,axis = 0)
for j in sp.arange(0,m):
VE = VEprime.copy()
VU = VUprime.copy()
EVU = EVUprime.copy()
VEprime = util_vec + beta*((1-gamma)*VE + gamma*EVU)
arg1 = sp.repeat(sp.transpose(VE),N,0)*psiprime
arg2 = sp.repeat(EVU,N,1)*(1-psiprime)
arg = arg1+arg2
VUprime = alpha_util_grid + beta*arg
EVUprime = sp.dot(VUprime,f)
delta = sp.linalg.norm(psiprime -psi)
#print(delta)
wr_ind = sp.argmax(sp.diff(psiprime), axis = 1)
wr = w[wr_ind]
plt.plot(w,wr)
plt.show()
return wr
import solutions as sol
prob1=Problem5Real()
prob2=Problem6Real()
x=sol.Problem5()
if(np.allclose(prob1,x)):
print("Problem5 Passed")
else:
print("Problem5 Falied")
x=sol.Problem6()
if(np.allclose(prob2,x)):
print("Problem6 Passed")
else:
print("Problem6 Falied")
sol.Problem7()
```
#### File: Labs/QR/ct.py
```python
import numpy as np
from scipy import linalg as la
from math import copysign
def hqr(A):
"""Finds the QR decomposition of A using Householder reflectors.
input: A, mxn array with m>=n
output: Q, orthogonal mxm array
R, upper triangular mxn array
s.t QR = A
"""
# This is just a pure Python implementation.
# It's not fully optimized, but it should
# have the right asymptotic speed.
# initialize Q and R
# start Q as an identity
# start R as a C-contiguous copy of A
# take a transpose of Q to start out
# so it is C-contiguous when we return the answer
Q = np.eye(A.shape[0]).T
R = np.array(A, order="C")
# initialize m and n for convenience
m, n = R.shape
# avoid reallocating v in the for loop
v = np.empty(A.shape[1])
for k in xrange(n-1):
# get a slice of the temporary array
vk = v[k:]
# fill it with corresponding values from R
vk[:] = R[k:,k]
# add in the term that makes the reflection work
vk[0] += copysign(la.norm(vk), vk[0])
# normalize it so it's an orthogonal transform
vk /= la.norm(vk)
# apply projection to R
R[k:,k:] -= 2 * np.outer(vk, vk.dot(R[k:,k:]))
# Apply it to Q
Q[k:] -= 2 * np.outer(vk, vk.dot(Q[k:]))
# note that its returning Q.T, not Q itself
return Q.T, R
def hess(A):
"""Computes the upper Hessenberg form of A using Householder reflectors.
input: A, mxn array
output: Q, orthogonal mxm array
H, upper Hessenberg
s.t. Q.dot(H).dot(Q.T) = A
"""
# similar approach as the householder function.
# again, not perfectly optimized, but good enough.
Q = np.eye(A.shape[0]).T
H = np.array(A, order="C")
# initialize m and n for convenience
m, n = H.shape
# avoid reallocating v in the for loop
v = np.empty(A.shape[1]-1)
for k in xrange(n-2):
# get a slice of the temporary array
vk = v[k:]
# fill it with corresponding values from R
vk[:] = H[k+1:,k]
# add in the term that makes the reflection work
vk[0] += copysign(la.norm(vk), vk[0])
# normalize it so it's an orthogonal transform
vk /= la.norm(vk)
# apply projection to H on the left
H[k+1:,k:] -= 2 * np.outer(vk, vk.dot(H[k+1:,k:]))
# apply projection to H on the right
H[:,k+1:] -= 2 * np.outer(H[:,k+1:].dot(vk), vk)
# Apply it to Q
Q[k+1:] -= 2 * np.outer(vk, vk.dot(Q[k+1:]))
return Q, H
```
#### File: Labs/RSA/rsa_tools.py
```python
from itertools import izip_longest
def partition(iterable, n, fillvalue=None):
"""Partition data into blocks of length 'n', padding with 'fillvalue'
if needed. Return a list of the partitions.
Example:
>>> partition('ABCDEFG, 3, 'x')
"""
args = [iter(iterable)] * n
pieces = izip_longest(fillvalue=fillvalue, *args)
return [''.join(block) for block in pieces]
def string_size(n):
"""Return the maximum number of characters that can be encoded with the
public key (e, n). In other words, find the largest integer L such that
if 'string' has at most L characters, then string_to_int('string') will
be less than 'n'.
"""
L=0
max_int = 0
while max_int < n:
max_int += sum([2**i for i in range(8*L, 8*L+8)])
L += 1
return L-1
def string_to_int(msg):
"""Convert the string 'msg' to an integer.
This function is the inverse of int_to_string().
"""
# bytearray will give us the ASCII values for each character
if not isinstance(msg, bytearray):
msg = bytearray(msg)
binmsg = []
# convert each character to binary
for c in msg:
binmsg.append(bin(c)[2:].zfill(8))
return int(''.join(binmsg), 2)
def int_to_string(msg):
"""Convert the integer 'msg' to a string.
This function is the inverse of string_to_int().
"""
# convert to binary first
binmsg = bin(msg)[2:]
# pad the message so length is divisible by 8
binmsg = "0"*(8-(len(binmsg)%8)) + binmsg
msg = bytearray()
# convert block of 8 bits back to ASCII
for block in partition(binmsg, 8):
msg.append(int(block, 2))
return str(msg)
```
#### File: Labs/Spectral2/solution.py
```python
from __future__ import division
import numpy as np
def cheb(N):
def p(j1):
if (j1==0 or j1 == N): return 2.
else: return 1.
x = np.cos(np.pi*np.arange(N+1)/N)
D = np.zeros((N+1,N+1))
# j represents column index
for j in range(0,N+1):
for i in range(0,j)+range(j+1,N+1):
D[i,j] = ((-1.)**(i+j))*p(i)/( p(j)*(x[i]- x[j]) )
# Values on the main diagonal
for j in xrange(1,N):
D[j,j] = -x[j]/(2.*(1-x[j]**2.))
D[0,0] = (1.+2.*N**2.)/6.
D[N,N] = -(1.+2.*N**2.)/6.
return D,x
```
#### File: Labs/ValueFunctionIteration/tauchenhussey.py
```python
import scipy.stats as st
import scipy as sp
def tauchenhussey(N,mu,rho,sigma, baseSigma):
"""
Function tauchenhussey
Purpose: Finds a Markov chain whose sample paths
approximate those of the AR(1) process
z(t+1) = (1-rho)*mu + rho * z(t) + eps(t+1)
where eps are normal with stddev sigma
Format: {Z, Zprob} = TauchenHussey(N,mu,rho,sigma,m)
Input: N scalar, number of nodes for Z
mu scalar, unconditional mean of process
rho scalar
sigma scalar, std. dev. of epsilons
baseSigma scalar, std. dev. used to calculate Gaussian
quadrature weights and nodes, i.e. to build the
grid. I recommend that you use
baseSigma = w*sigma +(1-w)*sigmaZ where sigmaZ = sigma/sqrt(1-rho^2),
and w = 0.5 + rho/4. Tauchen & Hussey recommend
baseSigma = sigma, and also mention baseSigma = sigmaZ.
Output: Z N*1 vector, nodes for Z
Zprob N*N matrix, transition probabilities
Author: <NAME>, Brigham Young University (python)
<NAME>, Stockholm School of Economics (original)
January 2007 (updated August 2007)
This procedure is an implementation of Tauchen and Hussey's
algorithm, Econometrica (1991, Vol. 59(2), pp. 371-396)
"""
Z = sp.zeros((N,1))
Zprob = sp.zeros((N,N))
[Z,w] = gaussnorm(N,mu,baseSigma**2)
for i in range(N):
for j in range(N):
EZprime = (1-rho)*mu + rho*Z[i]
Zprob[i,j] = w[j] * st.norm.pdf(Z[j],EZprime,sigma) / st.norm.pdf(Z[j],mu,baseSigma)
for i in range(N):
Zprob[i,:] = Zprob[i,:] / sum(Zprob[i,:])
return Z.T,Zprob
def gaussnorm(n,mu,s2):
"""
Find Gaussian nodes and weights for the normal distribution
n = # nodes
mu = mean
s2 = variance
"""
[x0,w0] = gausshermite(n)
x = x0*sp.sqrt(2.*s2) + mu
w = w0/sp.sqrt(sp.pi)
return [x,w]
def gausshermite(n):
"""
Gauss Hermite nodes and weights following 'Numerical Recipes for C'
"""
MAXIT = 10
EPS = 3e-14
PIM4 = 0.7511255444649425
x = sp.zeros((n,1))
w = sp.zeros((n,1))
m = int((n+1)/2)
for i in range(m):
if i == 0:
z = sp.sqrt((2.*n+1)-1.85575*(2.*n+1)**(-0.16667))
elif i == 1:
z = z - 1.14*(n**0.426)/z
elif i == 2:
z = 1.86*z - 0.86*x[0]
elif i == 3:
z = 1.91*z - 0.91*x[1]
else:
z = 2*z - x[i-1]
for iter in range(MAXIT):
p1 = PIM4
p2 = 0.
for j in range(n):
p3 = p2
p2 = p1
p1 = z*sp.sqrt(2./(j+1))*p2 - sp.sqrt(float(j)/(j+1))*p3
pp = sp.sqrt(2.*n)*p2
z1 = z
z = z1 - p1/pp
if sp.absolute(z-z1) <= EPS:
break
if iter>MAXIT:
error('too many iterations'), end
x[i,0] = z
x[n-i-1,0] = -z
w[i,0] = 2./pp/pp
w[n-i-1,0] = w[i]
x = x[::-1]
return [x,w]
```
#### File: Labs/WeightLoss/solution.py
```python
import numpy as np
from scipy.special import lambertw
from scipy.integrate import ode
# Global variables. May be updated in weightloss4
class c(object):
pass
#
# Fixed Constants
#
c.rho_F = 9400. #
c.rho_L = 1800. #
c.gamma_F = 3.2 #
c.gamma_L = 22. #
c.eta_F = 180. #
c.eta_L = 230. #
c.C = 10.4 # Forbes constant
c.beta_AT = 0.14 # Adaptive Thermogenesis
c.beta_TEF = 0.1 # Thermic Effect of Feeding
K = 0
# def getBW(F,L,T,EI,PAL):
# t, y = compute_weight_curve(F,L,T,EI,PAL)
# out = np.sum(y[-1,:])
# return out
# def dBW(Fi,EIi,PALi,EIf,PALf):
# #
# Given an intervention (EI,PAL), find the dBW achieved in equilibrium
# #
# deltaEI = EIf - EIi
# psi = (1/PALf - 1/PALi)*EIi + (1/PALf-c.beta_AT)*deltaEI + c.gamma_F*Fi
# phi = c.gamma_F * Fi / (c.gamma_L * c.C)
# out = (psi - c.gamma_L*Fi + c.gamma_L*c.C*(c.gamma_L-c.gamma_F)/c.gamma_F * lambertw(phi*np.exp(psi/(c.C*c.gamma_L))))/c.gamma_L
# return out
#
#
# def dEI(Fi,deltaBW,EIi,PALi,PALf):
# #
# Given a desired BW, find the dEI needed to achieve that in equilibrium
# #
# Ff = c.C*lambertw(np.exp(Fi/c.C)*np.exp(deltaBW/c.C)*Fi/c.C)
# chi = EIi/PALi + c.gamma_L*deltaBW+(c.gamma_F-c.gamma_L)*(Ff-Fi)
# out = (chi*PALf-EIi)/(1-c.beta_AT*PALf)
# return out
#
# def generic_RMR(BW,age,H,sex):
# #
# Mufflin equation
# #
# if sex=='male':
# out = 9.99*BW + 625*H - 4.92*age+5
# else:
# out = 9.99*BW + 625*H - 4.92*age-161
# return out
# def getK(F,L,EI,PAL,EB):
# if EB==0:
# p = 0
# else:
# p = Forbes(F)
# K = (1./PAL-c.beta_AT)*EI-c.gamma_L*L-c.gamma_F*F-((c.eta_F/c.rho_F)*(1-p)+(c.eta_L/c.rho_L)*p+1./PAL)*EB
# return K
def fat_mass(BW, age, H, sex):
BMI = BW / H ** 2.
if sex == 'male':
return BW * (-103.91 + 37.31 * np.log(BMI) + 0.14 * age) / 100
else:
return BW * (-102.01 + 39.96 * np.log(BMI) + 0.14 * age) / 100
def compute_weight_curve(F, L, T, EI, PAL):
y0 = np.array([F, L])
ode_f = lambda t, y: weight_odes(t, y, EI, PAL)
ode_object = ode(ode_f).set_integrator('dopri5', rtol=1e-6, atol=1e-8)
ode_object.set_initial_value(y0, 0.)
t = np.linspace(0., T, 151)
y = np.zeros((len(t), len(y0)))
y[0,:] = y0
for j in range(1, len(t)):
y[j,:] = ode_object.integrate(t[j])
return t, y
def weight_odes(t, y, EI, PAL):
F, L = y[0], y[1]
p, EB = Forbes(F), EnergyBalance(F, L, EI(t), PAL(t))
return np.array([(1 - p) * EB / c.rho_F, p * EB / c.rho_L])
def EnergyBalance(F, L, EI, PAL):
p = Forbes(F)
a1 = (1. / PAL - c.beta_AT) * EI - K - c.gamma_F * F - c.gamma_L * L
a2 = (1 - p) * c.eta_F / c.rho_F + p * c.eta_L / c.rho_L + 1. / PAL
return a1 / a2
def Forbes(F):
C1 = c.C * c.rho_L / c.rho_F
return 1. * C1 / (C1 + F)
# Compare with Forbes and Mufflin
# Estimated initial body fat mass
# Jackson AS et al., Int J Obes Relat Metab Disord. 2002 Jun;26(6):789-96
#
# Implementation of the Runge Kutta fourth order method
def RK4(func, a, b, n, y0, dim):
x, y = np.linspace(a, b, n + 1), np.zeros((n + 1, dim))
y[0,:], h = y0, 1.*(b-a)/n
for j in range(0, len(x) - 1):
k1 = h*func(x[j], y[j,:])
k2 = h*func(x[j]+h/2., y[j,:]+(1/2.)*k1)
k3 = h*func(x[j]+h/2., y[j,:]+(1/2.)*k2)
k4 = h*func(x[j+1], y[j,:]+k3)
y[j+1,:] = y[j,:] + (1/6.)*(k1 + 2*k2 + 2*k3 + k4)
return y
```
#### File: Python/Arrays/solutions_Arrays.py
```python
import math
import timeit
import numpy as np
from numpy.random import randn
from scipy.misc import factorial
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#problem 1
def arrmul(A,B):
new = []
for i in range(len(A)):
newrow = []
for k in range(len(B[0])):
tot = 0
for j in range(len(B)):
tot += A[i][j] * B[j][k]
newrow.append(tot)
new.append(newrow)
return new
def timefunction(f, *args, **kwargs):
pfunc = lambda: f(*args, **kwargs)
print min(timeit.repeat(pfunc, number = 1, repeat = 1))
k = 100
A = [range(i, i+k) for i in range(0, k**2, k)]
B = [range(i, i+k) for i in range (0, k**2, k)]
#timefunction(numpy.dot, NumA, NumA
#timefunction(arrmul, A, B)
'''
Lists
k=100: 0.195740438121
k=200: 1.96796994247
k=300: 7.87688692047
Arrays
k=100: 0.000890023231932
k=200: 0.00714212847242
k=300: 0.0233234591569
It takes significantly less time to square a two dimensional NumPy array
than it does to square a two dimensional list. This is because Python is a
high level interpreted language and thus slower than lower level compiled
languages. NumPy has heavily optimized algorithms that use Python to run code that
has been written and optimized in other languages (usually C or Fortran)
'''
#problem 2
def problem2():
A = rand(1000,1000)
#timeit A.reshape(A.size)
#timeit A.flatten()
#timeit A.reshape((1, A.size))
print "A.reshape(A.size) had a best time of 5.65 microseconds"
print "A.flatten() had a best time of 26.2 milliseconds"
print "A.reshape((1, A.size)) had a best time of 3.15 microseconds"
'''
A.flatten() takes longer because it is allocating a new array
in memory and copying all of the values from the input array into
the new array. Will return a copy (which takes more time).
A.reshape() only changes the way the array is read from memory by changing
the shape of the array. It doesn't touch any of the data of the array.
Will return a view (which takes less time) if possible.
'''
# problem 3
def laplace(U, tol):
new = U.copy()
dif = tol
while tol <= dif:
new[1:-1,1:-1] = (U[:-2,1:-1] + U[2:,1:-1] + U[1:-1,:-2] + U[1:-1,2:])/4.0
dif = np.max(np.absolute(U-new))
U[:] = new
n = 100
tol = .0001
U = np.ones ((n , n ))
U [:,0] = 100 # set north boundary condition
U [:,-1] = 100 # set south boundary condition
U [0] = 0 # set west boundary condition
U [-1] = 0 # set east boundary condition
laplace(U, tol) # U has been changed in place
x = np.linspace (0, 1, n)
y = np.linspace (0, 1, n)
X, Y = np.meshgrid (x, y)
fig = plt.figure()
ax = fig.gca( projection = '3d')
ax.plot_surface (X, Y, U, rstride=5)
plt.show()
# problem 4
#as n increases the variance approaches 0.
def large_numbers(n):
# demonstrates law of large numbers
# as n increases, variance goes to 0.
A = randn(n, n)
return A.mean(axis=1).var()
# problem 5
def rgb():
A = np.random.randint(0, 256, (100, 100, 3))
A * [.5, .5, 1]
# problem 6
def arcsin_approx():
n = 70
s = 1. * np.arange(70,-1,-1)
r = factorial(2*s)/((2*s+1)*(factorial(s)**2)*(4**s)) # computes coefficients
q = np.zeros(142)
q[0::2] = r
P = np.poly1d(q)
return P(1/math.sqrt(2))*4
def W_approx():
n = 20
s = 1. * np.arange(20,0,-1)
r = ((-s)**(s-1))/(factorial(s)) # computes coefficients
q = np.zeros(21)
q[0:-1] = r
P = np.poly1d(q)
return P(.25)
print W_approx()*math.e**W_approx() #verification!
# The problems from here on are no longer in the first lab.
# problem 6
def egcd(a, b):
'''
Extended Euclidean algorithm
Returns (b, x, y) such that mx + ny = b
Source: http://en.wikibooks.org/wiki/Algorithm_Implementation/Mathematics/Extended_Euclidean_algorithm
'''
x,y, u,v = 0,1, 1,0
while a != 0:
q,r = b//a,b%a; m,n = x-u*q,y-v*q
b,a, x,y, u,v = a,r, u,v, m,n
return b, x, y
def modinv(a, m):
'''
Find the modular inverse.
Source: http://en.wikibooks.org/wiki/Algorithm_Implementation/Mathematics/Extended_Euclidean_algorithm
'''
g, x, y = egcd(a, m)
if g != 1:
return None # modular inverse does not exist
else:
return x % m
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
def blockize(msg, n):
lut = {a:i for i ,a in enumerate(string.lowercase)}
msg = "".join(msg.lower().split())
return list(map(np.array, grouper(map(lut.__getitem__, msg), n, fillvalue=lut['x'])))
def inv_mat(n):
tries = 0
while True:
a = np.random.randint(1000, size=(n, n)) % 26
d = round(linalg.det(a))
if gcd(int(d), 26) == 1:
break
tries += 1
return a, d
def encode(msg, k):
ciphertext = []
n = k.shape[0]
ilut = {i:a for i, a in enumerate(string.lowercase)}
for i in blockize(msg, n):
s = i.dot(k) % 26
ciphertext.append("".join(map(ilut.__getitem__, s)))
return "".join(ciphertext)
def inv_key(key):
d = round(linalg.det(key))
inv_d = modinv(int(d), 26)
ik = np.round(d*linalg.inv(key))
return (ik*inv_d) % 26
def decode(msg, k):
ik = inv_key(k)
n = ik.shape[0]
plaintext = []
ilut = {i:a for i, a in enumerate(string.lowercase)}
for i in blockize(msg, n):
s = i.dot(ik) % 26
plaintext.append("".join(map(ilut.__getitem__, s)))
return "".join(plaintext)
def prob5():
im = np.random.randint(1,256,(100,100,3))
b = np.array([0.5,0.5,1])
im_bluer = (im * b).astype(int)
def broadcast_1():
"""All input arrays have exactly the same shape"""
a = np.random.rand(4, 5)
b = np.random.rand(4, 5)
r = a * b
print "Case 1: {} * {} = {}".format(a.shape, b.shape, r.shape)
def broadcast_2():
"""All input arrays are of the same dimension and
the length of corresponding dimensions match or is 1"""
a = np.random.rand(5, 4, 1, 6)
b = np.random.rand(5, 4, 1, 1)
r = a * b
print "Case 2: {} * {} = {}".format(a.shape, b.shape, r.shape)
def broadcast_3():
"""All input arrays of fewer dimension can have 1
prepended to their shapes to satisfy the second criteria."""
a = np.random.rand(1, 6)
b = np.random.rand(5, 4, 1, 6)
r = a * b
print "Case 3: {} * {} = {}".format(a.shape, b.shape, r.shape)
def series_problem_a():
c = np.arange(70, -1, -1) # original values for n
c = factorial(2*c) / ((2*c+1) * factorial(c)**2 * 4**c) #series coeff's
p = np.zeros(2*c.size) # make space for skipped zero-terms
p[::2] = c # set nonzero polynomial terms to the series coeff's
P = np.poly1d(p) # make a polynomial out of it
return 6 * P(.5) #return pi (since pi/6 = arcsin(1/2))
def series_problem_b():
p = np.arange(20, -1, -1) # original values for n
p = (-p)**(p-1) / factorial(p) #compute coefficients
p[-1] = 0. # get rid of NAN in the zero-term
P = np.poly1d(p) # Make a polynomial
print P(.25) * np.exp(P(.25)) # test it
return P(.25) # return the computed value
```
#### File: Python/cvxopt/cvxoptsolutions.py
```python
from cvxopt import matrix ,solvers
import numpy as np
# <codecell>
def Problem1():
c = matrix([2., 1., 3.])
G= matrix([[-1., -2., -1., 0.,0.],[-2., -1., 0., -1.,0.],[0., -3., 0., 0.,-1.]])
h = matrix([ -3., -10., 0., 0.,0.])
sol = solvers.lp(c,G,h)
return sol['x'],sol['primal objective']
# <codecell>
#x,y=Problem3()
#print x
#print y
# <codecell>
def Problem2():
c = matrix([4., 7., 6., 8., 8., 9])
G= matrix([[-1., 0., 0., -1., 0., -1., 0., 0., 0., 0., 0.],[-1., 0., 0., 0., -1., 0., -1., 0., 0., 0., 0.], [0., -1., 0., -1., 0., 0., 0., -1., 0., 0., 0.], [0., -1., 0., 0., -1., 0., 0., 0., -1., 0., 0.], [0., 0., -1., -1., 0., 0., 0., 0., 0., -1., 0.],[0., 0., -1., 0., -1., 0., 0., 0., 0., 0., -1.]])
h = matrix([-7., -2., -4., -5., -8., 0., 0., 0., 0., 0., 0.])
sol = solvers.lp(c,G,h)
return sol['x'],sol['primal objective']
# <codecell>
def Problem3():
Q= matrix([[3., 2., 1.],[2., 4., 2.],[1., 2., 3. ]])
p=matrix([3., 0., 1.])
sol=solvers .qp(Q, p)
return sol['x'],sol['primal objective']
# <codecell>
def Problem4():
datam=np.load('ForestData.npy')
c=matrix(datam[:,3]*-1)
G=np.zeros((21,7+3+21))
h=np.zeros(7+3+21)
G[:,-21:]=-1*np.eye(21)
h[:7]=datam[::3,1]
for i in xrange(7):
G[i*3:(i+1)*3,i]=np.ones(3)
G[:,7]=-1*datam[:,4]
G[:,8]=-1*datam[:,5]
G[:,9]=-1*datam[:,6]
h[7]=-40000
h[8]=-5
h[9]=-70*788
G=G.T
c = matrix(c)
G = matrix(G)
h = matrix(h)
sol = solvers.lp(c,G,h)
return sol['x'],sol['primal objective']*-1000
# <codecell>
#x,y=Problem4()
#print x
#print y
# <codecell>
'''
forest=np.array([[1,75.,1,503.,310.,0.01,40],
[0,0,2,140,50,0.04,80],
[0,0,3,203,0,0,95],
[2,90.,1,675,198,0.03,55],
[0,0,2,100,46,0.06,60],
[0,0,3,45,0,0,65],
[3,140.,1,630,210,0.04,45],
[0,0,2,105,57,0.07,55],
[0,0,3,40,0,0,60],
[4,60.,1,330,112,0.01,30],
[0,0,2,40,30,0.02,35],
[0,0,3,295,0,0,90],
[5,212.,1,105,40,0.05,60],
[0,0,2,460,32,0.08,60],
[0,0,3,120,0,0,70],
[6,98.,1,490,105,0.02,35],
[0,0,2,55,25,0.03,50],
[0,0,3,180,0,0,75],
[7,113.,1,705,213,0.02,40],
[0,0,2,60,40,0.04,45],
[0,0,3,400,0,0,95]])
'''
#np.save('ForestData',forest)
# <codecell>
```
#### File: Python/cython_wrapping/plots.py
```python
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
png_size = (800, 600)
def laplace(U, tol):
new = U.copy()
dif = tol
while tol <= dif:
new[1:-1,1:-1] = (U[:-2,1:-1] + U[2:,1:-1] + U[1:-1,:-2] + U[1:-1,2:])/4.0
dif = np.max(np.absolute(U-new))
U[:] = new
def cywrap_sol():
resolution = 301
U = np.zeros((resolution, resolution))
X = np.linspace(0, 1, resolution)
U[0] = np.sin(2 * np.pi * X)
U[-1] = -U[0]
laplace(U, .000001)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
u = np.linspace(0, 1, resolution)
v = np.linspace(0, 1, resolution)
u,v = np.meshgrid(u,v)
ax.plot_surface(u, v, U, color='b')
plt.savefig("solution.png", dpi=99)
plt.clf()
if __name__ == "__main__":
cywrap_sol()
```
#### File: Python/DataStructures1/spec.py
```python
from Node import LinkedListNode
from WordList import create_word_list
# Problem 1: in Node.py, add magic methods to the Node class.
# Problems 2, 3, 4: Complete the implementation of this class for creating linked lists.
class LinkedList(object):
"""Singly-linked list data structure class.
The first node in the list is referenced to by 'head'.
"""
def __init__(self):
"""Create a new empty linked list. Create the head
attribute and set it to None since the list is empty.
"""
self.head = None
def add(self, data):
"""Create a new Node containing 'data' and add it to
the end of the list.
Example:
>>> my_list = LinkedList()
>>> my_list.add(1)
>>> my_list.head.data
1
>>> my_list.add(2)
>>> my_list.head.next.data
2
"""
new_node = LinkedListNode(data)
if self.head is None:
# If the list is empty, point the head attribute to the new node.
self.head = new_node
else:
# If the list is not empty, traverse the list
# and place the new_node at the end.
current_node = self.head
while current_node.next is not None:
# This moves the current node to the next node if it is not
# empty. Then when we break out of the loop, current_node
# points to the last node in the list.
current_node = current_node.next
current_node.next = new_node
# Problem 2: Implement the __str__ method so that a LinkedList instance can
# be printed out the same way that Python lists are printed.
def __str__(self):
"""String representation: the same as a standard Python list.
Example:
>>> my_list = LinkedList()
>>> my_list.add(1)
>>> my_list.add(2)
>>> my_list.add(3)
>>> print(my_list)
[1, 2, 3]
>>> str(my_list) == str([1,2,3])
True
"""
raise NotImplementedError("Problem 2 incomplete.")
# Problem 3: Finish implementing LinkedList.remove() so that if the node
# is not found, an exception is raised.
def remove(self, data):
"""Remove the node containing 'data'. If the list is empty, or if the
target node is not in the list, raise a ValueError with error message
"<data> is not in the list."
Example:
>>> print(my_list)
[1, 2, 3]
>>> my_list.remove(2)
>>> print(my_list)
[1, 3]
>>> my_list.remove(2)
2 is not in the list.
>>> print(my_list)
[1, 3]
"""
# First, check if the head is the node to be removed. If so, set the
# new head to be the first node after the old head. This removes
# the only reference to the old head, so it is then deleted.
if self.head.data == data:
self.head = self.head.next
else:
current_node = self.head
# Move current_node through the list until it points to the node
# that precedes the target node.
while current_node.next.data != data:
current_node = current_node.next
# Point current_node to the node after the target node.
new_next_node = current_node.next.next
current_node.next = new_next_node
# Problem 4: Implement LinkedList.insert().
def insert(self, data, place):
"""Create a new Node containing 'data'. Insert it into the list before
the first Node in the list containing 'place'. If the list is empty, or
if there is no node containing 'place' in the list, raise a ValueError
with error message "<place> is not in the list."
Example:
>>> print(my_list)
[1, 3]
>>> my_list.insert(2,3)
>>> print(my_list)
[1, 2, 3]
>>> my_list.insert(2,4)
4 is not in the list.
"""
raise NotImplementedError("Problem 4 incomplete.")
# Problem 5: Implement this class for creating doubly-linked lists.
class DoublyLinkedList(LinkedList):
"""Doubly-linked list data structure class. Inherits from the 'LinkedList'
class. Has a 'head' for the front of the list and a 'tail' for the end.
"""
def __init__(self):
raise NotImplementedError("Problem 5 incomplete")
# Problem 6: Implement this class for creating sorted linked lists.
# Use an instance of your object to sort a large data set in sort_words().
class SortedLinkedList(DoublyLinkedList):
"""Sorted doubly-linked list data structure class."""
# Overload add() and insert().
def add(self, data):
"""Create a new Node containing 'data' and insert it at the
appropriate location to preserve list sorting.
Example:
>>> print(my_list)
[3, 5]
>>> my_list.add(2)
>>> my_list.add(4)
>>> my_list.add(6)
>>> print(my_list)
[2, 3, 4, 5, 6]
"""
raise NotImplementedError("Problem 6 incomplete")
# Conclude problem 6 by implementing this function.
def sort_words(filename = "English.txt"):
"""Use the 'create_word_list' method from the 'WordList' module to generate
a scrambled list of words from the specified file. Use an instance of
the SortedLinkedList class to sort the list. Then return the list.
Inputs:
filename (str, opt): the file to be parsed and sorted. Defaults to
'English.txt.
Returns:
The SortedLinkedList object containing the sorted list.
"""
raise NotImplementedError("Problem 6 incomplete.")
# =========================== END OF File =========================== #
```
#### File: Python/DataStructures2/bintree.py
```python
from collections import deque
class Node(object):
def __init__(self, data):
self.data = data
self.left = None
self.right = None
self.height = 1
def free(self):
self.data = None
self.left = None
self.right = None
def __str__(self):
return str(self.data)
class BinTree(object):
def __init__(self):
self.root = None
self.size = 0
def clear(self):
"""Recursively clear the AVL Tree"""
def _clear(self, n):
if n is not None:
_clear(n.left)
_clear(n.right)
n.free()
_clear(self.root)
self.root = None
self.size = 0
def _set_height(self, n):
if n is None:
return 0
else:
return 1 + max(getattr(n.left, "height", 0), getattr(n.right, "height", 0))
def insert(self, item):
def _recur_insert(n, cand):
if n is None:
return Node(cand)
else:
if cand < n.data:
n.left = _recur_insert(n.left, cand)
elif cand > n.data:
n.right = _recur_insert(n.right, cand)
else:
return n
n.height = self._set_height(n)
return n
if self.root is None:
self.root = Node(item)
else:
self.root = _recur_insert(self.root, item)
self.root.height = self._set_height(self.root)
self.size += 1
def remove(self, item):
def _recur_remove(n, cand):
if n is None:
return
else:
if cand < n.data:
n.left = _recur_remove(n.left, cand)
elif cand > n.data:
n.right = _recur_remove(n.right, cand)
elif cand == n.data:
if n.left is None and n.right is None:
return
elif n.left is not None and n.right is None:
nleft = n.left
del n
return nleft
elif n.left is None and n.right is not None:
nright = n.right
del n
return nright
else:
nmin = n.right
while nmin.left is not None:
nmin = nmin.left
n.data, nmin.data = nmin.data, n.data
n.right = _recur_remove(n.right, nmin.data)
return n
if n is not None:
n.height = self._set_height(n)
return n
if self.root is None:
return
else:
self.root = _recur_remove(self.root, item)
if self.root is not None:
self.root.height = self._set_height(self.root)
self.size -= 1
def find(self, item):
n = self.root
while n is not None:
if item < n.data:
n = n.left
elif item > n.data:
n = n.right
else:
return n
def print_tree(tree_root):
parents = deque()
children = deque()
parents.append(tree_root)
level = 1
while len(parents) > 0 or len(children) > 0:
print "Level {}: {}".format(level,
', '.join(str(n) for n in parents))
while len(parents) > 0:
node = parents.popleft()
if node.left is not None:
children.append(node.left)
if node.right is not None:
children.append(node.right)
parents, children = children, parents
level += 1
```
#### File: Python/DataStructures2/spec.py
```python
from Trees import BST
from Trees import AVL
def iterative_search(linkedlist, data):
"""Find the node containing 'data' using an iterative approach.
If there is no such node in the list, or if the list is empty,
raise a ValueError with error message "<data> is not in the list."
Inputs:
linkedlist (LinkedList): a linked list object
data: the data to find in the list.
Returns:
The node in 'linkedlist' containing 'data'.
"""
# Start the search at the head.
current = linkedlist.head
# Iterate through the list, checking the data of each node.
while current is not None:
if current.data == data:
return current
current = current.next
# If 'current' no longer points to a Node, raise a value error.
raise ValueError(str(data) + " is not in the list.")
# Problem 1: rewrite iterative_search() using recursion.
def recursive_search(linkedlist, data):
"""Find the node containing 'data' using a recursive approach.
If there is no such node in the list, raise a ValueError with error
message "<data> is not in the list."
Inputs:
linkedlist (LinkedList): a linked list object
data: the data to find in the list.
Returns:
The node in 'linkedlist' containing 'data'.
"""
raise NotImplementedError("Problem 1 incomplete")
# Problem 2: Implement BST.insert() in Trees.py.
# Problem 3: Implement BST.remove() in Trees.py
# Problem 4: Test build and search speeds for LinkedList, BST, and AVL objects.
def plot_times(filename="English.txt"):
"""Vary n from 500 to 5000, inclusive, incrementing by 500. At each
iteration, use the create_word_list() from the 'WordList' module to
generate a list of n randomized words from the specified file.
Time (separately) how long it takes to load a LinkedList, a BST, and
an AVL with the data set.
Choose 5 random words from the data set. Time how long it takes to
find each word in each object. Calculate the average search time for
each object.
Create one plot with two subplots. In the first subplot, plot the
number of words in each dataset against the build time for each object.
In the second subplot, plot the number of words against the search time
for each object.
Inputs:
filename (str): the file to use in creating the data sets.
Returns:
Show the plot, but do not return any values.
"""
raise NotImplementedError("Problem 4 incomplete")
# =============================== END OF FILE =============================== #
```
#### File: Python/RegexBasic/match_function_definition.py
```python
import re
pattern_strings = {
'id': r"([a-zA-Z_]\w*)",
'str': r"('[^']*')",
'num': r"(\d+\.\d*|\.\d+)",
'_': r"(\s*)"
}
pattern_strings['param_rhs'] = r"(={_}({str}|{num}|{id}))".format(**pattern_strings)
pattern_strings['param'] = r"({id}{_}{param_rhs}?)".format(**pattern_strings)
pattern_strings['param_list'] = r"({param}{_}(,{_}{param})*)".format(**pattern_strings)
pattern_strings['func'] = r"^def {_}{id}{_}\({_}({param_list}{_})?\){_}:$".format(**pattern_strings)
function_pattern = re.compile(pattern_strings['func'])
def test(string):
return bool(function_pattern.match(string))
def run_tests():
assert test(r"def compile(pattern,string):")
assert test(r"def space ( ) :")
assert test(r"def a113(_dir, file_path='\Desktop\files', val=_PI):")
assert test(r"def func(num=3., num=.5, num=0.0):")
assert not test(r"def func(num=.):")
assert not test(r"def do_problem(error, ):")
assert not test(r"def variable:")
assert not test(r"def f.f():")
assert not test(r"def f(*args):")
assert not test(r"def f(, val):")
assert not test(r"def f(,):")
assert not test(r"def err*r(gamma):")
assert not test(r"def sleep('no parameter name'):")
assert not test(r"def func(value=_MY_CONSTANT, msg='%s' % _DEFAULT_MSG):")
assert not test(r"def func(s1='', this one is a little tricky, s2=''):")
assert not test(r"def func(): Remember your line anchors!")
assert not test(r"def func()")
assert not test(r"deffunc():")
assert not test(r"func():")
assert not test(r"exit")
assert test(r"def f( a):")
assert test(r"def f( a, b):")
assert test(r"def f(a ):")
assert test(r"def f(a, b ):")
assert not test(r"def f(a=3.6f):")
assert not test(r"def f(a='hug'.4):")
# assert test(r"")
print "Passed all tests."
run_tests()
while True:
input_string = raw_input("Enter a string>>> ")
if input_string == 'exit':
break
print test(input_string)
```
#### File: Python/RegexBasic/regex_bulk.py
```python
from sys import argv
import re
regex_pattern_string = argv[1]
strings_to_match = argv[2:]
pattern = re.compile(regex_pattern_string)
def print_case(s):
if pattern.match(s): # This is where the work happens
prefix = "Match: \t"
else:
prefix = "No match:\t"
print prefix, s
map(print_case, strings_to_match)
### This is basically equivalent to:
# for s in strings_to_match:
# print_case(s)
```
#### File: Python/StateMachines/solutions.py
```python
reprDFA_a = (set('ab'),
{(0, 'a'): 0,
(0, 'b'): 1,
(1, 'b'): 0,
(1, 'a'): 1},
0,
{1})
reprDFA_b = (set('01'),
{(0, '1'): 0,
(0, '0'): 1,
(1, '1'): 1,
(1, '0'): 2,
(2, '1'): 2,
(2, '0'): 3,
(3, '0'): 3,
(3, '1'): 3}
0,
{2})
reprDFA_c = (set('01'),
{(0, '0'): 2,
(0, '1'): 1,
(1, '0'): 2,
(1, '1'): 3,
(3, '0'): 2,
(3, '1'): 4,
(4, '0'): 2,
(4, '1'): 2,
(2, '0'): 2,
(2, '1'): 2},
0,
{1, 2})
equiv_regex_a = "a*(ba*b)*"
equiv_regex_b = "1*01*01*(0+1)*"
def dfa_sim(input_str, machine):
#unpack the machine
input_len = len(input_str) - 1
alphabet, transitions, curr, accept = machine
for i, c in enumerate(input_str):
#match the character
if c in alphabet:
if (curr, c) in transitions:
curr = transitions[(curr, c)]
else:
break
else:
raise ValueError("Invalid symbol")
if i == input_len and curr in accept:
return True
return False
def email_validator(email):
pattern = r'([\w\d\.-]+)@([\w\d\.-]+)\.([a-z]{2,4})'
mo = re.match(pattern, email)
if mo:
s = slice(*mo.span(2))
xs = 'x'*len(email[s])
else:
raise ValueError("invalid email")
# replace domain with x's
return ''.join([mo.group(1),
'@', xs, '.',
mo.group(3)])
#Reading Regex
#Patterns from RegexLib.com
#1. Match 32 alphanumeric characters
#This regex can be used to match hashes
#2. (19|20)\d\d match 4 digit numbers beginning with 19 or 20
#[- /.] Match one of four chars
#(0[1-9]|1[012]) Match two digits 01-12
#[- /.] Match one of four chars
#(0[1-9]|[12][0-9]|3[01]) Match two digits 01-31
#This regex matches dates of the form yyyy[- /.]mm[- /.]dd
#3. ([0-1]?[0-9]) Match zero or one zeros or ones and another digit 0-9 Matches times 00-19
#OR ([2][0-3]) Match 20-23
#: Match a colon literal
#([0-5]?[0-9]) match minutes 00-59
#(:([0-5]?[0-9]))? match a colon literal then 00-59 for seconds (matches 0 or 1 repetitions)
#This regex matches 24hr times with optional seconds
```
#### File: Python/WebTechnology/chatserver.py
```python
from flask import Flask, request, Response, redirect, url_for
app = Flask(__name__)
from chatsession import Session
import json
import sys
from datetime import datetime
session = Session()
def decode_date(date):
parts = date.split(".")
tmp = datetime.strptime(parts[0], "%Y-%m-%dT%H:%M:%S")
try:
tmp.replace(microsecond=int(parts[1]))
except:
pass
return tmp
class DateEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def dateobj(dct):
if 'timestamp' in dct:
dct['timestamp'] = decode_date(dct['timestamp'])
return dct
@app.route('/')
def index():
return '''This is a simple messaging program'''
@app.route('/changenick', methods=["POST"])
def change_nick():
msg = json.loads(request.data)
try:
session.change_nick(msg['old_nick'], msg['new_nick'])
return Response(status=200)
except KeyError:
return Response(status=500)
@app.route('/channels/<channel>')
def view_channel(channel):
if channel == '0':
print "returning all"
return '<br>'.join(str(x) for x in session.sessionlog)
channels = set([0, int(channel)])
return '<br>'.join('{}: {}'.format((x['nick'], x['content']) for x in session.sessionlog if x['channel'] in channels))
@app.route('/channel/create', methods=["POST"])
def create_channel():
msg = json.loads(request.data)
try:
session.register_channel(msg['channel'])
return Response(status_code=200, status='OK')
except:
return Response(status_code=500, status='SERVER ERROR')
@app.route('/channel')
def get_channels():
r = Response(status_code=200, status="OK")
r.content = json.dumps(session.channels)
return r
@app.route('/message/push', methods=["POST"])
def send_msg():
msg = json.loads(request.data, object_hook=dateobj)
print request.data
ch = msg['channel']
print "Trying to post to channel", ch
try:
session.log_message(msg)
print "Message logged on channel", ch, msg
return request.data
except ValueError:
print "No such channel exists!"
return Response(status_code=404, status="NOT FOUND")
@app.route('/message/pull')
def get_msg():
a = request.args['timestamp']
b = request.args['nick']
c = request.args['channel']
print "Pull Request: {}\t{}\t{}".format(a, b, c)
msgs = session.retrieve_new(b, decode_date(a), int(c))
print "Retrieved Messages ",msgs
return json.dumps(msgs, cls=DateEncoder)
if __name__ == "__main__":
host, port = sys.argv[1:3]
app.run(host=host, port=int(port), threaded=True, debug=True)
```
#### File: jessicaleete/numerical_computing/travis_post.py
```python
import os
import travis_common as tc
def all_present(fatal=True):
try:
assert os.path.isfile('Vol1.pdf')
assert os.path.isfile('Vol2.pdf')
assert os.path.isfile('Vol3.pdf')
assert os.path.isfile('Vol4.pdf')
assert os.path.isfile('ExtraLabs.pdf')
except AssertionError as e:
raise BuildError(e)
if __name__ == "__main":
all_present()
```
|
{
"source": "jessica-lei/coronavirus-2020",
"score": 4
}
|
#### File: coronavirus-2020/exp/metrics.py
```python
import numpy as np
def mean_squared_error(estimates, targets):
"""
Mean squared error measures the average of the square of the errors (the
average squared difference between the estimated values and what is
estimated. The formula is:
MSE = (1 / n) * \sum_{i=1}^{n} (Y_i - Yhat_i)^2
Implement this formula here, using numpy and return the computed MSE
https://en.wikipedia.org/wiki/Mean_squared_error
Args:
estimates(np.ndarray): the estimated values (should be the same shape as targets)
targets(np.ndarray): the ground truth values
Returns:
MSE(int): mean squared error calculated by above equation
"""
diffs = []
if estimates.ndim == 1:
for n in range(estimates.size):
diff = estimates[n] - targets[n]
diffs.append(diff)
else:
for i in range(estimates.shape[0]):
for j in range(estimates.shape[1]):
diff = estimates[i][j] - targets[i][j]
diffs.append(diff)
diffs = np.array(diffs)
diffs = np.square(diffs)
error = np.mean(diffs)
return error
```
|
{
"source": "jessicalemos/Sakila-NoSQL",
"score": 3
}
|
#### File: ficheiros_JSON/scripts/convert_customers.py
```python
import json
#----------------------- ADDRESS INFORMATION ------------------#
def find_address(address_id):
f = open("../newJson/addresses.json","rb")
addresses = json.loads(f.read())
i = 0 #contador para percorrer staff
address = addresses["address"]
size = len(address)
while(i < size):
if(address[i]["address_id"] == address_id):
return address[i]
i+=1
return
#----------------------- ADDRESS INFORMATION ------------------#
#----------------------- MAIN FUNCTION ------------------------#
def customers_dataset():
f = open("../oldJson/customer.json","rb") #abrir ficheiro rentals
customers = json.loads(f.read()) #transformar ficheiro para objeto em python
i = 0 #inicializar contador
size = len(customers["customer"])
while(i < size): #Enquanto houver entradas
if(customers["customer"][i]):
customer = customers["customer"][i]
name = customer["first_name"] + " " + customer["last_name"]
mail = customer["email"]
address = find_address(customer["address_id"])
address.pop("address_id")
customer.pop("active")
customer.pop("email")
customer.pop("create_date")
customer.pop("last_update")
customer.pop("store_id")
customer.pop("address_id")
customer.pop("first_name")
customer.pop("last_name")
customer["name"] = name
customer["email"]= mail
customer["address"] = address
i += 1
new = open("../newJson/customers.json","w")
json.dump(customers,new,indent=3)
#----------------------- MAIN FUNCTION ------------------------#
def main():
customers_dataset()
main()
```
#### File: ficheiros_JSON/scripts/convert_films.py
```python
import json
def find_actor(id):
f = open("../oldJson/atores.json","r")
actores = json.loads(f.read())
actor = actores["actor"]
i = 0
if(id == "110"):
end = " (2)"
else:
end = ""
while(i < 201):
if(actor[i]["actor"] == id):
actor[i].pop("last_update")
actor[i].pop("actor")
fn = actor[i]["first_name"]
ln = actor[i]["last_name"]
actor[i]["name"] = fn + " " + ln + end
actor[i].pop("first_name")
actor[i].pop("last_name")
#return (actor[i])
return actor[i]["name"]
i+=1
return
def find_actores_filme(idFilme):
f = open("../oldJson/film_actor.json","r")
data = json.loads(f.read())
data = data["film_actor"]
i = 0
j = 0
actors = []
while(i < 5462):
#print("welelele")
if(data[i]["film_id"] == idFilme):
actor = find_actor(data[i]["actor_id"])
actors.insert(j,actor)
j+=1
i+=1
return actors
# Função que encontra o objeto cidade com o id passado
def find_language(id):
f = open("../oldJson/language.json","rb")
languages = json.loads(f.read())
i = 0
while(i < 6):
if(languages["language"][i]["language_id"] == id):
return languages["language"][i]["name"]
i+=1
return
def find_category(id):
f = open("../oldJson/category.json","rb")
cats = json.loads(f.read())
i = 0
while(i < 16):
if(cats["category"][i]["category_id"] == id):
return cats["category"][i]["name"]
i+=1
return
# Função que encontra o nome do paíscom o id passado
def find_film_category(id):
f= open("../oldJson/film_category.json","rb")
films = json.loads(f.read())
i = 0
film = films["film_category"]
while(i < 1000):
if(film[i]["film_id"] == id):
cat = find_category(film[i]["category_id"])
return cat
i+=1
return
#Função que processa os endereços
def processFilms():
f = open("../oldJson/film.json","rb")
films = json.loads(f.read())
i = 0
while(i < 1000):
if(films["film"][i]):
film = films["film"][i]
# FIND LANGUAGE
language = find_language(film["language_id"])
#print(language)
# FIND CATEGORY
category = find_film_category(film["film_id"])
#print(category)
actors = find_actores_filme(film["film_id"])
#print(actors)
# CHANGE FIELDS AND VALUES
film.pop("language_id")
film["language"] = language
film["category"] = category
film["actors"] = actors
film.pop("original_language_id")
film["film_id"] = int(film["film_id"])
i+=1
new = open("../newJson/film.json","w")
json.dump(films,new,indent=3)
processFilms()
```
#### File: ficheiros_JSON/scripts/convert_rental.py
```python
import json
from datetime import datetime
#----------------------- PAYMENT INFORMATION ------------------#
def find_payment(rental_id):
f = open("../oldJson/payment.json","rb")
payments = json.loads(f.read())
i = 0
payment = payments["payment"]
size = len(payment)
while(i < size):
if(payment[i]["rental_id"] == rental_id):
return float(payment[i]["amount"])
i+=1
return
#----------------------- PAYMENT INFORMATION ------------------#
#----------------------- STAFF INFORMATION --------------------#
def find_location(address_id):
f = open("../newJson/addresses.json","rb")
addresses = json.loads(f.read())
i = 0 #contador para percorrer staff
address = addresses["address"]
size = len(address)
while(i < size):
if(address[i]["address_id"] == address_id):
add = address[i]["address"]
city = address[i]["city"]
district = address[i]["district"]
country = address[i]["country"]
return add,city,district,country
i+=1
return
def find_store(store_id):
f = open("../oldJson/store.json","rb")
stores = json.loads(f.read())
i = 0 #contador para percorrer staff
store = stores["store"]
size = len(store)
while(i < size):
if(store[i]["store_id"] == store_id):
return find_location(store[i]["address_id"])
i+=1
return ("","","","")
def find_staff(staff_id):
f = open("../oldJson/staff.json","rb")
staffs = json.loads(f.read())
i = 0 #contador para percorrer staff
staff = staffs["staff"]
size = len(staff)
while(i < size):
if(staff[i]["staff_id"] == staff_id):
name = staff[i]["first_name"] + " " + staff[i]["last_name"]
mail = staff[i]["email"]
store_id = staff[i]["store_id"]
staff[i].pop("email")
staff[i].pop("store_id")
staff[i].pop("password")
staff[i].pop("username")
staff[i].pop("active")
staff[i].pop("address_id")
staff[i].pop("first_name")
staff[i].pop("last_name")
address,city,district,country = find_store(store_id)
staff[i]["name"] = name
staff[i]["email"] = mail
staff[i]["store_id"] = store_id
staff[i]["store_address"] = address
staff[i]["store_city"] = city
staff[i]["store_district"] = district
staff[i]["store_country"] = country
return staff[i]
i+=1
return
#----------------------- STAFF INFORMATION --------------------#
#----------------------- FILM INFORMATION ---------------------#
def find_film(film_id):
f = open("../newJson/film.json","rb")
films = json.loads(f.read())
i = 0 #contador para percorrer staff
film = films["film"]
size = len(film)
while(i < size):
if(film[i]["film_id"] == film_id):
#print(film[i])
film[i].pop("release_year")
film[i].pop("rental_duration")
film[i].pop("rental_rate")
film[i].pop("length")
film[i].pop("replacement_cost")
film[i].pop("rating")
film[i].pop("last_update")
return film[i]
i+=1
return
def find_inventory(inventory_id):
f = open("../oldJson/inventory.json","rb")
invs = json.loads(f.read())
i = 0 #contador para percorrer staff
inventory = invs["inventory"]
size = len(inventory)
while(i < size):
if(inventory[i]["inventory_id"] == inventory_id):
film = find_film(int(inventory[i]["film_id"]))
return film
i+=1
return
#----------------------- FILM INFORMATION ---------------------#
#----------------------- MAIN FUNCTION ------------------------#
def rental_dataset():
f = open("../oldJson/rental.json","rb") #abrir ficheiro rentals
rentals = json.loads(f.read()) #transformar ficheiro para objeto em python
i = 0 #inicializar contador
size = len(rentals["rental"])
while(i < size): #Enquanto houver entradas
if(rentals["rental"][i]):
rental = rentals["rental"][i] #Pegar na entrada Rental
customer_id = rental["customer_id"] #id do cliente
payment_value = find_payment(str(rental["id"])) #Valor do pagamento
staff= find_staff(str(rental["staff_id"])) #Informação sobre o staff
film = find_inventory(str(rental["inventory_id"]))
try:
time_rental_date = datetime.strptime(rental["rental_date"], '%Y-%m-%d %H:%M:%S.%f')
except(ValueError):
time_rental_date = ""
try:
time_return_date = datetime.strptime(rental["return_date"], '%Y-%m-%d %H:%M:%S')
except(ValueError):
time_return_date = ""
rental_date = rental["rental_date"]
return_date = rental["return_date"]
if(time_rental_date != "" and time_return_date != ""):
rental_duration = time_return_date - time_rental_date
else:
rental_duration = ""
rental.pop("staff_id")
rental.pop("last_update")
rental.pop("customer_id")
rental.pop("rental_date")
rental.pop("return_date")
rental.pop("inventory_id")
rental["customer_id"] = customer_id
rental["rental_date"] = rental_date
rental["return_date"] = return_date
rental["rental_duration"] = str(rental_duration)
rental["payment_value"] = payment_value
rental["staff"] = staff
rental["film"] = film
print("Na iteração nº: " + str(i))
i+=1
new = open("../newJson/rental.json","w")
json.dump(rentals,new,indent=3)
#----------------------- MAIN FUNCTION ------------------------#
def main():
rental_dataset()
main()
```
|
{
"source": "jessicalettes/nmd-exons",
"score": 3
}
|
#### File: nmd-exons/01_notebooks/nmd.py
```python
from itertools import product
import pandas as pd
import gffutils
from ordered_set import OrderedSet
COLON = ':'
CDS = 'CDS'
EXCLUSION = 'Exclusion causes NMD'
EXON = 'exon'
GENE_ID = 'gene_id'
INCLUSION = 'Inclusion causes NMD'
MINUS = '-'
NONSENSE_MEDIATED_DECAY = 'nonsense_mediated_decay'
NOTFOUND = 'Exon not found'
PLUS = '+'
START = 'start'
START_CODON = 'start_codon'
STOP_CODON = 'stop_codon'
TRANSCRIPT = 'transcript'
TRANSCRIPT_ID = 'transcript_id'
TRANSCRIPT_TYPE = 'transcript_type'
UNKNOWN = 'Splicing not known to cause NMD'
class NMDExons(object):
def __init__(self, database, exon_ids):
"""Given exons and a feature database, say which cause NMD and how
Parameters
----------
database : gffutils.FeatureDB file string
A database of gene features create from a GTF/GFF file by gffutils
exon_ids : list-like of strings
Exons that you want to test whether they could be causing NMD.
Acceptable format is "exon:chrom:start-stop:strand", e.g.
'exon:chr1:100-200:+'
"""
self.db = gffutils.FeatureDB(database)
self.exon_ids = exon_ids
def find_nmd_exons(self):
"""Given list of exons, returns if exons are NMD and how
Returns
-------
nmd : Series
Series contains exon_id string and whether inclusion or exclusion
of that exon causes NMD
"""
nmd = pd.Series("Not yet evaluated", index=self.exon_ids)
counter = 0
for exon_id in self.exon_ids:
nmd[exon_id] = self._is_this_exon_nmd(exon_id)
if counter % 100 == 0:
print(counter)
counter += 1
return nmd
def _is_this_exon_nmd(self, exon_id):
"""Given a single exon id, returns if it causes NMD and how
Parameters
----------
exon_id : str
Single exon ID
Returns
-------
nmd : None | 'inclusion' | 'exclusion'
If the exon does not cause NMD, return None, else return whether
inclusion or exclusion of that exon causes NMD
"""
try:
exon = self.db[exon_id]
transcripts_with_exon = self._get_transcripts_with_exon(exon)
all_transcripts = self._get_all_transcripts_overlapping_exon(exon)
transcripts_without_exon = all_transcripts - transcripts_with_exon
exon_strand = exon_id.split(COLON)[3]
transcript_exons = self._get_exons_from_transcripts(all_transcripts, exon_strand)
iterator = product(transcripts_without_exon, transcripts_with_exon)
for t_without, t_with in iterator:
try:
t1_exons_without = transcript_exons[t_without]
if len(t1_exons_without) < 5:
continue
except KeyError:
return "t_without KeyError ({t_without})".format(t_without=t_without)
try:
t2_exons_with = transcript_exons[t_with]
except KeyError:
return "t_with KeyError ({t_with})".format(t_with=t_with)
try:
if exon_strand == PLUS:
first_exon_with = t2_exons_with[2]
last_exon_with = t2_exons_with[-1]
first_exon_without = t1_exons_without[2]
last_exon_without = t1_exons_without[-1]
else:
first_exon_with = t2_exons_with[0]
last_exon_with = t2_exons_with[-3]
first_exon_without = t1_exons_without[0]
last_exon_without = t1_exons_without[-3]
if first_exon_with != exon and last_exon_with != exon:
t2_exons_with = list(t2_exons_with)
t2_exons_with.remove(first_exon_with)
t2_exons_with.remove(last_exon_with)
t2_exons_with = tuple(t2_exons_with)
t1_exons_without = list(t1_exons_without)
t1_exons_without.remove(first_exon_without)
t1_exons_without.remove(last_exon_without)
t1_exons_without = tuple(t1_exons_without)
if (exon_strand == PLUS and t1_exons_without[1] ==
t2_exons_with[1]) or \
(exon_strand == MINUS and t1_exons_without[-1]
== t2_exons_with[-1]):
if len(set(t2_exons_with).symmetric_difference(
set(t1_exons_without))) == 1:
t1_type = self.db[t_without][TRANSCRIPT_TYPE][0]
t2_type = self.db[t_with][TRANSCRIPT_TYPE][0]
if (t1_type != NONSENSE_MEDIATED_DECAY) & \
(t2_type == NONSENSE_MEDIATED_DECAY):
return INCLUSION + " (annotated)"
if self._inclusion_nmd(t1_exons_without,
t2_exons_with, exon_strand):
return INCLUSION + " (found stop codon)"
if (t1_type == NONSENSE_MEDIATED_DECAY) & \
(t2_type != NONSENSE_MEDIATED_DECAY):
return EXCLUSION + " (annotated)"
if self._exclusion_nmd(t1_exons_without,
t2_exons_with, exon_strand):
return EXCLUSION + " (found stop codon)"
except KeyError:
return "KeyError"
except gffutils.FeatureNotFoundError:
return NOTFOUND
return UNKNOWN
def _get_transcripts_with_exon(self, exon):
"""Create set of transcript ids that contain possible NMD exon
Parameters
----------
exon : gffutils feature exon
The exon of interest that causes inclusion, exclusion or no NMD
Returns
-------
transcripts_with_exon : set
The set of transcript ids containing the exon of interest
"""
transcripts_with_exon = OrderedSet()
for exon_trans in self.db.parents(exon, featuretype=TRANSCRIPT):
if self._is_valid_transcript(exon_trans):
transcripts_with_exon.add(exon_trans[TRANSCRIPT_ID][0])
return transcripts_with_exon # parent_transcripts_of_exon
def _is_valid_transcript(self, transcript):
contains_cds = self._contains_child(transcript, "CDS")
contains_utr = self._contains_child(transcript, "UTR")
contains_start_codon = self._contains_child(transcript, "start_codon")
return contains_cds and contains_utr and contains_start_codon
def _contains_child(self, feature, featuretype):
children = self.db.children(feature, featuretype=featuretype)
return sum(1 for x in children) > 0
def _get_all_transcripts_overlapping_exon(self, exon):
"""Makes set of all transcript ids in gene containing possible NMD exon
Parameters
----------
exon : gffutils feature exon
The exon of interest that causes inclusion, exclusion or no NMD
Returns
-------
all_transcripts : set
The set of transcript ids from gene containing the exon of interest
"""
all_transcripts = OrderedSet()
for trans in self.db.region(region=exon, featuretype=TRANSCRIPT):
if self._is_valid_transcript(trans):
all_transcripts.add(trans[TRANSCRIPT_ID][0])
return all_transcripts # call transcripts_from_gene_containing_exon
def _get_exons_from_transcripts(self, all_transcripts, strand):
"""Create dictionary with tuples containing exons for all transcripts
Parameters
----------
all_transcripts : set
The set of transcript ids from gene containing the exon of interest
strand : str
Stand containing possible NMD causing exon
Returns
-------
transcript_exons : dict
Mapping of transcript ids to their children exons, cds and start
codons. The values are in tuple form with transcript start
codon, first cds and all internal exons.
"""
transcript_exons = dict()
for transcript_id in all_transcripts:
this_transcript_exons = tuple(self.db.children(transcript_id,
featuretype=EXON,
order_by=START))
transcript_cds = tuple(self.db.children(transcript_id,
featuretype=CDS,
order_by=START))
for start_codon in self.db.children(transcript_id,
featuretype=START_CODON):
if strand == PLUS:
first_cds = transcript_cds[0:1]
real_transcript_cds = (start_codon,) + first_cds + this_transcript_exons
else:
first_cds = transcript_cds[-1]
real_transcript_cds = this_transcript_exons + (first_cds,) + (start_codon,)
transcript_exons[transcript_id] = real_transcript_cds
return transcript_exons
def _inclusion_nmd(self, trans_without_exon, trans_with_exon, strand):
"""Given transcripts differing by one exon, determine if inclusion NMD
Parameters
----------
trans_without_exon : tuple
Identical strand to trans_with_exon but not containing exon
trans_with_exon : tuple
Transcript containing possible NMD causing exon
strand : str
Stand containing possible NMD causing exon
Returns
-------
bool
True if inclusion causes NMD, False otherwise
"""
trans_with_exon_set = OrderedSet(trans_with_exon)
trans_without_exon_set = OrderedSet(trans_without_exon)
as_exon = (trans_with_exon_set - trans_without_exon_set)[0]
index = trans_with_exon.index(as_exon)
if strand == PLUS:
for iterator in range(index, len(trans_with_exon)):
for stop in self.db.children(trans_with_exon[iterator][GENE_ID]
[0], featuretype=STOP_CODON):
if self.stop_codon_nmd(trans_with_exon[index], stop):
return True
if strand == MINUS:
for iterator in range(0, index + 1):
for stop in self.db.children(trans_with_exon[iterator][GENE_ID]
[0], featuretype=STOP_CODON):
if self.stop_codon_nmd(trans_with_exon[index], stop):
return True
return False
def _exclusion_nmd(self, trans_without_exon, trans_with_exon, strand):
"""Given transcripts differing by one exon, determine if exclusion NMD
Parameters
----------
trans_without_exon : tuple
Identical strand to trans_with_exon but not containing exon
trans_with_exon : tuple
Transcript containing possible NMD causing exon
strand : str
Stand containing possible NMD causing exon
Returns
-------
bool
True if exclusion causes NMD, False otherwise
"""
trans_with_exon_set = OrderedSet(trans_with_exon)
trans_without_exon_set = OrderedSet(trans_without_exon)
as_exon = (trans_with_exon_set - trans_without_exon_set)[0]
index = trans_with_exon.index(as_exon)
if strand == PLUS:
for iterator in range(index, len(trans_without_exon)):
for stop in self.db.children(trans_without_exon[iterator]
[GENE_ID][0],
featuretype=STOP_CODON):
if self.stop_codon_nmd(trans_without_exon[index], stop):
return True
else:
for iterator in range(0, index):
for stop in self.db.children(trans_with_exon[iterator][GENE_ID]
[0], featuretype=STOP_CODON):
if self.stop_codon_nmd(trans_with_exon[index], stop):
return True
return False
@staticmethod
def stop_codon_nmd(exon, stop_codon):
"""Given an exon and stop_codon, check if exon is NMD
Parameters
----------
exon : exon gffutils feature object
The exon to check NMD in
stop_codon : stop_codon gffutils feature object
The stop codon in the transcript containing NMD
Returns
-------
bool
True if exon is NMD and False if not
"""
if stop_codon.strand == PLUS:
if stop_codon.start >= exon.start and stop_codon.end <= exon.end \
and stop_codon.end + 50 < exon.end:
return True
else:
if stop_codon.start >= exon.start and stop_codon.end <= exon.end \
and exon.start + 50 < stop_codon.start:
return True
return False
```
#### File: jessicalettes/nmd-exons/nmd_original.py
```python
__author__ = 'rhythmicstar'
import gffutils
import pandas as pd
def possible_nmd(nmd_file):
splicing_data = pd.read_csv(nmd_file, header=None, sep='\s+')
index = pd.Index(splicing_data[3])
event_ids = pd.Series(index, name='event_id')
event_ids_df = event_ids.to_frame()
event_ids_df['isoform1'] = splicing_data[3].map(lambda x: x.split('|')[0]
.split('junction:')
[1][:-2])
event_ids_df['isoform2'] = splicing_data[3].map(lambda x: x.
split('isoform2=junction:')
[1].split('@')[0][:-2])
event_ids_df['exon'] = splicing_data[3].map(lambda x: x.split('exon:')[1].
split('@')[0][:-2])
event_ids_df['junction'] = splicing_data[3].map(lambda x: x.
split('@junction:')[1].
split(' ')[0][:-2])
event_ids_df['strand'] = splicing_data[5].map(lambda x: x)
event_ids_df['type'] = ''
event_ids_df['inclusion_nmd'] = ''
event_ids_df['exclusion_nmd'] = ''
event_ids_df = event_ids_df[['isoform1', 'isoform2', 'exon', 'junction',
'strand', 'type', 'inclusion_nmd',
'exclusion_nmd']]
# NMD from exon inclusion
# pos_str = pd.DataFrame([['', '', 'chr3:186506099-186506205', '', '+',
# 'inclusion', '', '']], columns=list(
# ['isoform1', 'isoform2', 'exon', 'junction', 'strand', 'type',
# 'inclusion_nmd', 'exclusion_nmd']))
# neg_str = pd.DataFrame([['', '', 'chr8:109254341-109254575', '', '-',
# 'inclusion', '', '']], columns=list(
# ['isoform1', 'isoform2', 'exon', 'junction', 'strand', 'type',
# 'inclusion_nmd', 'exclusion_nmd']))
# d = pd.DataFrame([['', '', 'chr2:191777919-191778090', '', '+',
# 'inclusion', '', '']], columns=list(
# ['isoform1', 'isoform2', 'exon', 'junction', 'strand', 'type',
# 'inclusion_nmd', 'exclusion_nmd']))
magobhb_inc = pd.DataFrame([['', '', 'chr12:10761697-10761982', '', '+',
'inclusion', '', '']], columns=list(
['isoform1', 'isoform2', 'exon', 'junction', 'strand', 'type',
'inclusion_nmd', 'exclusion_nmd']))
# asout_df = pd.DataFrame([['', '', 'chr3:186502751-186502890', '', '+',
# 'exclusion', '', '']], columns=list(
# ['isoform1', 'isoform2', 'exon', 'junction', 'strand', 'type',
# 'inclusion_nmd', 'exclusion_nmd']))
# e = pd.DataFrame([['', '', 'chr11:107420479-107420549', '', '-',
# 'exclusion', '', '']], columns=list(
# ['isoform1', 'isoform2', 'exon', 'junction', 'strand', 'type',
# 'inclusion_nmd', 'exclusion_nmd']))
# event_ids_df = event_ids_df.append([pos_str, neg_str, d, new_exon,
# asout_df, e], ignore_index=True)
eif4a2_exc = pd.DataFrame([['', '', 'chr3:186502751-186502890', '', '+',
'inclusion', '', '']], columns=list(
['isoform1', 'isoform2', 'exon', 'junction', 'strand', 'type',
'inclusion_nmd', 'exclusion_nmd']))
eif4a2_inc = pd.DataFrame([['', '', 'chr3:186506099-186506205', '', '+',
'inclusion', '', '']], columns=list(
['isoform1', 'isoform2', 'exon', 'junction', 'strand', 'type',
'inclusion_nmd', 'exclusion_nmd']))
MAGOHB_inc = pd.DataFrame([['', '', 'chr12:10761697-10761982', '', '-',
'inclusion', '', '']], columns=list(
['isoform1', 'isoform2', 'exon', 'junction', 'strand', 'type',
'inclusion_nmd', 'exclusion_nmd']))
neg_cont = pd.DataFrame([['', '', 'chr3:186502353-186502486', '', '+',
'inclusion', '', '']], columns=list(
['isoform1', 'isoform2', 'exon', 'junction', 'strand', 'type',
'inclusion_nmd', 'exclusion_nmd']))
event_ids_df = event_ids_df.append([magobhb_inc, eif4a2_exc, eif4a2_inc,
MAGOHB_inc, neg_cont],
ignore_index=True)
return event_ids_df
def include_exon_nmd(event_ids_df, dbhuman):
for i, row in event_ids_df.iterrows():
nmd = False
location = row['exon']
for index, human_exon in \
enumerate(dbhuman.features_of_type('exon', limit=location)):
if human_exon['transcript_type'] == ["nonsense_mediated_decay"]:
for stop_codon in dbhuman.children(human_exon['gene_id'][0],
featuretype='stop_codon'):
if stop_codon_nmd(human_exon, stop_codon):
nmd = True
if nmd:
event_ids_df.ix[i, 6] = 'True'
else:
event_ids_df.ix[i, 6] = 'False'
return event_ids_df
# def stop_codon_nmd(human_exon, stop_codon):
# if stop_codon.strand == '+':
# if stop_codon.start >= human_exon.start and stop_codon.end <= \
# human_exon.end and stop_codon.end + 50 < human_exon.end:
# return True
# if stop_codon.strand == '-':
# if stop_codon.start >= human_exon.start and stop_codon.end <= \
# human_exon.end and human_exon.start + 50 < stop_codon.start:
# return True
# return False
def main(database, nmd_file):
dbhuman = gffutils.FeatureDB(database, keep_order=True)
event_ids_df = possible_nmd(nmd_file)
# if negative correlation
event_ids_df = include_exon_nmd(event_ids_df, dbhuman)
# if positive correlation
# event_ids_df = exclude_exon_nmd(event_ids_df)
return event_ids_df
```
#### File: jessicalettes/nmd-exons/test_nmd.py
```python
import gffutils
import pandas.util.testing as pdt
import pandas as pd
import pytest
@pytest.fixture
def database():
return '/Users/rhythmicstar/projects/exon_evolution//gencode.v19.' \
'annotation.outrigger.nmdtest.gtf.db'
@pytest.fixture
def exon_ids():
return ('exon:chr10:101510126-101510153:+',
'exon:chr7:33075546-33075600:-',
'exon:chr12:42778742-42778798:+',
'exon:chr8:29931393-29931571:-',
'exon:chr3:186502353-186502486:+',
'exon:chr3:42661508-42661535:+',
'exon:chr14:20785953-20786133:-')
@pytest.fixture
def single_exon_id():
return 'exon:chr3:186502751-186502890:+'
@pytest.fixture
def is_exon_nmd():
return 'Exclusion causes NMD (annotated)', \
'Inclusion causes NMD (annotated)', \
'Exclusion causes NMD (found stop codon)', \
'Inclusion causes NMD (found stop codon)', \
'Splicing not known to cause NMD', \
'Splicing not known to cause NMD', \
'Splicing not known to cause NMD'
@pytest.fixture()
def stop_codon_exon_ids():
return [('stop_codon:chr3:186506106-186506108:+',
'exon:chr3:186506099-186506205:+', True),
('stop_codon:chr17:44101535-44101537:+',
'exon:chr17:44101322-44101549:+', False)]
@pytest.fixture()
def parent_transcripts_of_exon():
return {'ENST00000323963.5', 'ENST00000498746.1', 'ENST00000440191.2',
'ENST00000425053.1'}
@pytest.fixture()
def all_transcripts_of_exon():
return {'ENST00000497177.1', 'ENST00000429589.1', 'ENST00000486805.1',
'ENST00000466362.1', 'ENST00000441007.1', 'ENST00000475653.1',
'ENST00000465792.1', 'ENST00000440191.2', 'ENST00000445596.1',
'ENST00000465222.1', 'ENST00000425053.1', 'ENST00000492144.1',
'ENST00000494445.1', 'ENST00000467585.1', 'ENST00000498746.1',
'ENST00000461021.1', 'ENST00000465032.1', 'ENST00000443963.1',
'ENST00000426808.1', 'ENST00000495049.1', 'ENST00000491473.1',
'ENST00000323963.5', 'ENST00000496382.1', 'ENST00000468362.1',
'ENST00000475409.1', 'ENST00000465267.1', 'ENST00000485101.1',
'ENST00000356531.5'}
@pytest.fixture()
def inc_trans_without_exon_ids():
return ['start_codon:chr11:57480091-57480093:+',
'CDS:chr11:57480091-57480279:+:0',
'exon:chr11:57505080-57505140:+',
'exon:chr11:57505826-57505902:+',
'exon:chr11:57506136-57506242:+',
'exon:chr11:57506446-57506511:+',
'exon:chr11:57506603-57506732:+']
@pytest.fixture()
def inc_trans_with_exon_ids():
return ['start_codon:chr11:57480091-57480093:+',
'CDS:chr11:57480091-57480279:+:0',
'exon:chr11:57505080-57505140:+',
'exon:chr11:57505385-57505498:+',
'exon:chr11:57505826-57505902:+',
'exon:chr11:57506136-57506242:+',
'exon:chr11:57506446-57506511:+',
'exon:chr11:57506603-57506732:+']
@pytest.fixture()
def exc_trans_without_exon_ids():
return ['start_codon:chr12:42729705-42729707:+',
'CDS:chr12:42729705-42729776:+:0',
'exon:chr12:42729685-42729776:+',
'exon:chr12:42745687-42745851:+',
'exon:chr12:42748963-42749024:+',
'exon:chr12:42768665-42768876:+',
'exon:chr12:42781258-42781337:+',
'exon:chr12:42787372-42787491:+',
'exon:chr12:42792656-42792796:+']
@pytest.fixture()
def exc_trans_with_exon_ids():
return ['start_codon:chr12:42729705-42729707:+',
'CDS:chr12:42729705-42729776:+:0',
'exon:chr12:42729685-42729776:+',
'exon:chr12:42745687-42745851:+',
'exon:chr12:42748963-42749024:+',
'exon:chr12:42768665-42768876:+',
'exon:chr12:42778742-42778798:+',
'exon:chr12:42781258-42781337:+',
'exon:chr12:42787372-42787491:+',
'exon:chr12:42792656-42792796:+']
@pytest.fixture()
def true_exc_nmd():
return True
@pytest.fixture()
def true_inc_nmd():
return True
@pytest.fixture()
def strand_true_exc_nmd():
return '+'
@pytest.fixture()
def strand_true_inc_nmd():
return '+'
@pytest.fixture()
def true_dict():
return {'ENST00000443963.1': ['start_codon:chr3:186501400-186501402:+',
'CDS:chr3:186501400-186501428:+:0',
'exon:chr3:186501386-186501428:+',
'exon:chr3:186502218-186502266:+',
'exon:chr3:186502353-186502485:+',
'exon:chr3:186503672-186503840:+',
'exon:chr3:186503953-186504062:+',
'exon:chr3:186504291-186504434:+',
'exon:chr3:186504916-186505053:+',
'exon:chr3:186505284-186505373:+',
'exon:chr3:186505592-186505671:+',
'exon:chr3:186506914-186507686:+'],
'ENST00000441007.1': ['start_codon:chr3:186501400-186501402:+',
'CDS:chr3:186501400-186501428:+:0',
'exon:chr3:186500994-186501139:+',
'exon:chr3:186501237-186501428:+',
'exon:chr3:186502221-186502266:+',
'exon:chr3:186502353-186502448:+'],
'ENST00000498746.1': ['start_codon:chr3:186502243-186502245:+',
'CDS:chr3:186502243-186502266:+:0',
'exon:chr3:186501992-186502266:+',
'exon:chr3:186502353-186502485:+',
'exon:chr3:186502751-186502890:+',
'exon:chr3:186503672-186503702:+'],
'ENST00000429589.1': ['start_codon:chr3:186501400-186501402:+',
'CDS:chr3:186501400-186501428:+:0',
'exon:chr3:186501366-186501428:+',
'exon:chr3:186502221-186502266:+',
'exon:chr3:186502353-186502485:+',
'exon:chr3:186503672-186503840:+',
'exon:chr3:186503953-186504062:+',
'exon:chr3:186504291-186504434:+',
'exon:chr3:186504916-186505053:+',
'exon:chr3:186505268-186505373:+',
'exon:chr3:186505592-186505671:+',
'exon:chr3:186506914-186506929:+'],
'ENST00000323963.5': ['start_codon:chr3:186501400-186501402:+',
'CDS:chr3:186501400-186501428:+:0',
'exon:chr3:186501336-186501428:+',
'exon:chr3:186502221-186502266:+',
'exon:chr3:186502353-186502485:+',
'exon:chr3:186502751-186502890:+',
'exon:chr3:186503672-186503840:+',
'exon:chr3:186503953-186504062:+',
'exon:chr3:186504291-186504434:+',
'exon:chr3:186504916-186505053:+',
'exon:chr3:186505284-186505373:+',
'exon:chr3:186505592-186505671:+',
'exon:chr3:186506914-186507689:+'],
'ENST00000445596.1': ['start_codon:chr3:186501400-186501402:+',
'CDS:chr3:186501400-186501428:+:0',
'exon:chr3:186501094-186501428:+',
'exon:chr3:186502221-186502266:+',
'exon:chr3:186502353-186502485:+',
'exon:chr3:186502751-186502836:+'],
'ENST00000425053.1': ['start_codon:chr3:186501400-186501402:+',
'CDS:chr3:186501400-186501428:+:0',
'exon:chr3:186501366-186501428:+',
'exon:chr3:186502221-186502266:+',
'exon:chr3:186502353-186502485:+',
'exon:chr3:186502751-186502890:+',
'exon:chr3:186503672-186503840:+',
'exon:chr3:186503953-186504062:+',
'exon:chr3:186504291-186504434:+',
'exon:chr3:186504916-186505053:+',
'exon:chr3:186505284-186505373:+',
'exon:chr3:186505592-186505671:+',
'exon:chr3:186506099-186506205:+',
'exon:chr3:186506914-186507670:+'],
'ENST00000440191.2': ['start_codon:chr3:186501400-186501402:+',
'CDS:chr3:186501400-186501428:+:0',
'exon:chr3:186501366-186501428:+',
'exon:chr3:186502218-186502266:+',
'exon:chr3:186502353-186502485:+',
'exon:chr3:186502751-186502890:+',
'exon:chr3:186503672-186503840:+',
'exon:chr3:186503953-186504062:+',
'exon:chr3:186504291-186504434:+',
'exon:chr3:186504916-186505053:+',
'exon:chr3:186505284-186505373:+',
'exon:chr3:186505592-186505671:+',
'exon:chr3:186506914-186507686:+'],
'ENST00000426808.1': ['start_codon:chr3:186501400-186501402:+',
'CDS:chr3:186501400-186501428:+:0',
'exon:chr3:186501361-186501428:+',
'exon:chr3:186502221-186502266:+',
'exon:chr3:186502353-186502485:+',
'exon:chr3:186503672-186503840:+',
'exon:chr3:186503953-186504062:+',
'exon:chr3:186504291-186504434:+',
'exon:chr3:186504916-186505053:+',
'exon:chr3:186505284-186505373:+',
'exon:chr3:186505592-186505671:+',
'exon:chr3:186506914-186507686:+'],
'ENST00000356531.5': ['start_codon:chr3:186502423-186502425:+',
'CDS:chr3:186502423-186502485:+:0',
'exon:chr3:186501386-186501428:+',
'exon:chr3:186502218-186502266:+',
'exon:chr3:186502353-186502485:+',
'exon:chr3:186503672-186503840:+',
'exon:chr3:186503953-186504062:+',
'exon:chr3:186504291-186504434:+',
'exon:chr3:186504916-186505053:+',
'exon:chr3:186505284-186505373:+',
'exon:chr3:186505592-186505671:+',
'exon:chr3:186506914-186507683:+']}
class TestNMDExons(object):
@pytest.fixture
def nmd_exons(self, database, exon_ids):
from nmd import NMDExons
return NMDExons(database, exon_ids)
@pytest.fixture
def inc_trans_without_exon(self, nmd_exons, inc_trans_without_exon_ids):
return [nmd_exons.db[x] for x in inc_trans_without_exon_ids]
@pytest.fixture
def inc_trans_with_exon(self, nmd_exons, inc_trans_with_exon_ids):
return [nmd_exons.db[x] for x in inc_trans_with_exon_ids]
@pytest.fixture
def exc_trans_without_exon(self, nmd_exons, exc_trans_without_exon_ids):
return [nmd_exons.db[x] for x in exc_trans_without_exon_ids]
@pytest.fixture
def exc_trans_with_exon(self, nmd_exons, exc_trans_with_exon_ids):
return [nmd_exons.db[x] for x in exc_trans_with_exon_ids]
def test___init__(self, nmd_exons, exon_ids):
assert isinstance(nmd_exons.db, gffutils.FeatureDB)
pdt.assert_equal(nmd_exons.exon_ids, exon_ids)
def test_find_nmd_exons(self, nmd_exons, is_exon_nmd, exon_ids):
test = nmd_exons.find_nmd_exons()
true = pd.Series(is_exon_nmd, index=exon_ids)
pdt.assert_series_equal(test, true)
def test__is_this_exon_nmd(self, is_exon_nmd, exon_ids, nmd_exons):
for exon_id, true in zip(exon_ids, is_exon_nmd):
test = nmd_exons._is_this_exon_nmd(exon_id)
assert test == true
def test__get_transcripts_with_exon(self, database, single_exon_id,
nmd_exons, parent_transcripts_of_exon):
db = gffutils.FeatureDB(database)
exon = db[single_exon_id]
test = nmd_exons._get_transcripts_with_exon(exon)
true = parent_transcripts_of_exon
assert test == true
def test__get_all_transcripts_overlapping_exon(self, database,
single_exon_id, nmd_exons,
all_transcripts_of_exon):
db = gffutils.FeatureDB(database)
exon = db[single_exon_id]
test = nmd_exons._get_all_transcripts_overlapping_exon(exon)
true = all_transcripts_of_exon
assert test == true
def test__create_dict(self, all_transcripts_of_exon, strand_true_exc_nmd,
nmd_exons, true_dict):
test = nmd_exons._get_exons_from_transcripts(all_transcripts_of_exon,
strand_true_exc_nmd)
test = dict((key, [v.id for v in values]) for key, values in
test.items())
true = true_dict
pdt.assert_dict_equal(test, true)
def test__inclusion_nmd(self, inc_trans_without_exon, inc_trans_with_exon,
strand_true_inc_nmd, nmd_exons, true_inc_nmd):
test = nmd_exons._inclusion_nmd(inc_trans_without_exon,
inc_trans_with_exon,
strand_true_inc_nmd)
true = true_inc_nmd
assert test == true
def test__exclusion_nmd(self, exc_trans_without_exon, exc_trans_with_exon,
strand_true_exc_nmd, nmd_exons, true_exc_nmd):
test = nmd_exons._exclusion_nmd(exc_trans_without_exon,
exc_trans_with_exon,
strand_true_exc_nmd)
true = true_exc_nmd
assert test == true
def test_nmd_stop_codon_nmd(self, database, stop_codon_exon_ids,
nmd_exons):
for stop_codon_exon_pair in stop_codon_exon_ids:
db = gffutils.FeatureDB(database)
stop = stop_codon_exon_pair[0]
stop_codon = db[stop]
exon = db[stop_codon_exon_pair[1]]
true = stop_codon_exon_pair[2]
test = nmd_exons.stop_codon_nmd(exon, stop_codon)
assert test == true
```
|
{
"source": "jessicalettes/orthoexon",
"score": 2
}
|
#### File: orthoexon/tests/test_util.py
```python
import os
import pytest
@pytest.fixture
def exon_id_with_quotes():
return "'ENSE00001229068.1'"
@pytest.fixture
def exon_id():
return "ENSE00001229068.1"
def test_separate_with_quotes(exon_id_with_quotes):
from orthoexon.util import separate
test = separate(exon_id_with_quotes)
true = "ENSE00001229068"
assert test == true
def test_separate(exon_id):
from orthoexon.util import separate
test = separate(exon_id)
true = "ENSE00001229068"
assert test == true
@pytest.fixture
def location():
return "chr20:10256140-10256211:+:0"
def test_splitstart(location):
from orthoexon.util import splitstart
test = splitstart(location)
true = '10256140'
assert test == true
def test_splitend(location):
from orthoexon.util import splitend
test = splitend(location)
true = '10256211'
assert test == true
@pytest.fixture
def human_gtf_filename(table_folder):
return os.path.join(table_folder, 'humanrbfox2andfmr1andsnap25.gtf')
@pytest.fixture
def human_gtf_database(table_folder):
return os.path.join(table_folder, 'humanrbfox2andfmr1andsnap25.gtf.db')
@pytest.fixture
def human_fasta(table_folder):
return os.path.join(table_folder, 'GRCm38.p3.genome.fa')
def test_translate(exon_id, human_fasta, human_gtf_database):
from orthoexon.util import translate
from orthoexon.util import separate
for index, species1gene in enumerate(human_gtf_database.features_of_type('gene')):
species1gffutilsgeneid = str(species1gene['gene_id'])
species1geneid = separate(species1gffutilsgeneid)
for exon in human_gtf_database.children(species1geneid,
featuretype='CDS',
order_by='start'):
if exon_id == exon:
test = translate(exon, human_fasta)
break
break
true = 'MAEDADMRNELEEMQRRADQLADE'
assert test == true
# def test_getsequence(exon, human_gtf_database):
# from orthoexon.util import getsequence
#
# test = getsequence(exon, human_gtf_database)
# true = 'ATGGCCGAAGACGCAGACATGCGCAATGAGCTGGAGGAGATGCAGCGAAGGGCTGACCAGTT' \
# 'GGCTGATGAG'
#
# assert test == true
# def test_make_sequence_array(finalsequencedf):
# from orthoexon.util import make_sequence_array
#
# test = make_sequence_array(finalsequencedf)
# true = ......
#
# assert test == true
```
#### File: orthoexon/orthoexon/util.py
```python
__author__ = 'rhythmicstar'
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
def separate(geneId):
sep = '.'
splitGeneId = geneId.partition(sep)
splittingGeneId = splitGeneId[0]
splittingGeneId = splittingGeneId.strip("'")
splittingGeneId = splittingGeneId.strip('"')
return splittingGeneId
def splitstart(location):
sep = ':'
splitstart = location.partition(sep)
splittingstart = splitstart[2]
sep2 = "-"
splitstart2 = splittingstart.partition(sep2)
startLoc = splitstart2[0]
return startLoc
def splitend(location):
sep = '-'
splitend = location.partition(sep)
splittingend = splitend[2]
sep2 = ":"
splitend2 = splittingend.partition(sep2)
endLoc = splitend2[0]
return endLoc
# to translate sequence with correct strand and frame
def translate(exon, fasta):
exonFrame = int(exon.frame)
exonSeq = exon.sequence(fasta, use_strand=False)
exonSeq = Seq(exonSeq, alphabet = generic_dna)
if exon.strand == '-':
exonSeq = exonSeq.reverse_complement()
exonProtein = exonSeq[exonFrame:].translate(to_stop=True)
print("{}:{}-{}:{} {}\t{}\t{}".format(exon.chrom, exon.start, exon.stop,
exon.frame, exon.strand, exonSeq,
exonProtein))
return exonProtein
# to change gencode gene ids into ensembl gene ids
def getsequence(exon, fasta):
exonFrame = int(exon.frame)
exonSeq = exon.sequence(fasta, use_strand=False)
exonSeq = Seq(exonSeq, alphabet = generic_dna)
if exon.strand == '-':
exonSeq = exonSeq.reverse_complement()
exonSeq = exonSeq[exonFrame:]
print("{}:{}-{}:{} {}\t{}".format(exon.chrom, exon.start, exon.stop,
exon.frame, exon.strand, exonSeq))
return exonSeq
def make_sequence_array(finalsequencedf):
sequence_array = []
for index, row in finalsequencedf.iterrows():
sequence_array.append(SeqRecord(Seq(row['Sequences']),
id=row['Exon ID'], description=''))
return sequence_array
```
|
{
"source": "jessicaleu24/NSF-demo",
"score": 2
}
|
#### File: grasp/data/cgrcnn_dataset.py
```python
import numpy as np
import os, sys, random, copy
import torch
try:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
except:
pass
from detectron2.structures import BoxMode
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.utils.visualizer import Visualizer
from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes
from detectron2.data.build import build_detection_train_loader, build_batch_data_loader
from data.cgrcnn_dataset_as_torch_loader import cgrcnn_dataset_torch
def get_grasp_dicts(root, mode="train"):
img_path = root + "images/{}/".format(mode)
bbx_path = root + "pruned_rbbxs/"
image_filenames = os.listdir(img_path)
dataset_dicts = []
for idx, filename in enumerate(image_filenames):
record = {}
record["file_name"] = img_path + filename
height, width = np.load(record["file_name"]).astype(np.float32).shape
record["image_id"] = idx
record["height"] = height
record["width"] = width
rbbxs = np.load(bbx_path + filename, allow_pickle=True)
grasps = []
for rbbx in rbbxs:
rbox = rbbx[[0, 1, 4, 3, 5]]
grasp = {
"bbox": rbox.tolist(),
"bbox_mode": BoxMode.XYWHA_ABS,
"category_id": 1,
"metric": rbbx[8],
"tilt": rbbx[6],
"z": rbbx[2],
}
grasps.append(grasp)
record["annotations"] = grasps
dataset_dicts.append(record)
return dataset_dicts
def cgrcnn_mapper(dataset_dict):
dataset_dict = copy.deepcopy(dataset_dict)
depth = np.load(dataset_dict["file_name"]).astype(np.float32)
inst = Instances(depth.shape)
depth = torch.from_numpy(np.tile(depth, (3, 1, 1)))
grasps = dataset_dict["annotations"]
gt_boxes, gt_tilts, gt_z, gt_metric = None, None, None, None
for grasp in grasps:
box, z, tilt, metric = np.array(grasp["bbox"]), np.array(grasp["z"]), np.array(grasp["tilt"]), np.array(grasp["metric"])
if gt_boxes is None:
gt_boxes, gt_tilts, gt_z, gt_metric = box, tilt, z, metric
else:
gt_boxes = np.vstack((gt_boxes, box))
gt_tilts = np.hstack((gt_tilts, tilt))
gt_z = np.hstack((gt_z, z))
gt_metric = np.hstack((gt_metric, metric))
inst.gt_boxes = RotatedBoxes(torch.from_numpy(gt_boxes.astype(np.float32).reshape(-1, 5)))
# inst.gt_tilts = torch.from_numpy(gt_tilts.astype(np.float32))
# inst.gt_z = torch.from_numpy(gt_z.astype(np.float32))
# inst.gt_metric = torch.from_numpy(gt_metric.astype(np.float32))
inst.gt_classes = torch.ones(gt_boxes.shape[0], dtype=torch.int64)
return {"image": depth, "instances": inst}
def build_as_detection_loader(cfg, root):
# d = "train"
# dataset_dicts = get_grasp_dicts(root, mode=d)
# inputs = cgrcnn_mapper(dataset_dicts[0])
for d in ["train", "test"]:
DatasetCatalog.register("grasp_" + d, lambda d=d: get_grasp_dicts(root, mode=d))
MetadataCatalog.get("grasp_" + d).set(thing_classes=["grasps"])
grasp_metadata = MetadataCatalog.get("grasp_train")
trainloader = build_detection_train_loader(cfg, mapper=cgrcnn_mapper)
return trainloader
def build_as_torch_loader(root, mode="train", batch_size=16, num_workers=0):
if mode == "train":
train_dataset = cgrcnn_dataset_torch(root, mode=mode)
train_sampler = torch.utils.data.RandomSampler(train_dataset, replacement=False, num_samples=None, generator=None)
trainloader = build_batch_data_loader(dataset=train_dataset, sampler=train_sampler, total_batch_size=batch_size, aspect_ratio_grouping=False, num_workers=num_workers)
return trainloader
elif mode == "test":
test_dataset = cgrcnn_dataset_torch(root, mode=mode)
test_sampler = torch.utils.data.RandomSampler(test_dataset, replacement=False, num_samples=None, generator=None)
testloader = build_batch_data_loader(dataset=test_dataset, sampler=test_sampler, total_batch_size=batch_size, aspect_ratio_grouping=False, num_workers=num_workers)
return testloader
```
#### File: modeling/roi_heads/grasp_rotated_fast_rcnn.py
```python
import logging
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from typing import Dict, List, Optional, Tuple, Union
from detectron2.config import configurable
from detectron2.layers import Linear, ShapeSpec, batched_nms_rotated, cat, nonzero_tuple
from detectron2.structures import Instances, Boxes, RotatedBoxes, pairwise_iou_rotated, ImageList
from detectron2.utils.events import get_event_storage
from detectron2.utils.registry import Registry
from ..box_regression import Box2BoxTransformRotated
from ..poolers import ROIPooler
from ..matcher import SampleMatcher
from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals
from .box_head import build_box_head
from .fast_rcnn import FastRCNNOutputLayers, FastRCNNOutputs
from .roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
from .rotated_fast_rcnn import RROIHeads
logger = logging.getLogger(__name__)
##########################################################################################################################################
############################################### Grasp Rotated RCNN Output ################################################################
##########################################################################################################################################
def grasp_fast_rcnn_inference_rotated(
boxes, scores, tilts, zs, image_shapes, score_thresh, nms_thresh, topk_per_image
):
"""
Call `fast_rcnn_inference_single_image_rotated` for all images.
Args:
boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic
boxes for each image. Element i has shape (Ri, K * 5) if doing
class-specific regression, or (Ri, 5) if doing class-agnostic
regression, where Ri is the number of predicted objects for image i.
This is compatible with the output of :meth:`FastRCNNOutputs.predict_boxes`.
scores (list[Tensor]): A list of Tensors of predicted class scores for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i. Compatible with the output of :meth:`FastRCNNOutputs.predict_probs`.
image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.
score_thresh (float): Only return detections with a confidence score exceeding this
threshold.
nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
all detections.
Returns:
instances: (list[Instances]): A list of N instances, one for each image in the batch,
that stores the topk most confidence detections.
kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates
the corresponding boxes/scores index in [0, Ri) from the input, for image i.
"""
result_per_image = [
grasp_fast_rcnn_inference_single_image_rotated(
scores_per_image, boxes_per_image, tilts_per_image, zs_per_image, image_shape, score_thresh, nms_thresh, topk_per_image
)
for scores_per_image, boxes_per_image, tilts_per_image, zs_per_image, image_shape in zip(scores, boxes, tilts, zs, image_shapes)
]
return [x[0] for x in result_per_image], [x[1] for x in result_per_image]
def grasp_fast_rcnn_inference_single_image_rotated(
scores, boxes, tilts, zs, image_shape, score_thresh, nms_thresh, topk_per_image
):
"""
Single-image inference. Return rotated bounding-box detection results by thresholding
on scores and applying rotated non-maximum suppression (Rotated NMS).
Args:
Same as `fast_rcnn_inference_rotated`, but with rotated boxes, scores, and image shapes
per image.
Returns:
Same as `fast_rcnn_inference_rotated`, but for only one image.
"""
valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1) & torch.isfinite(tilts).all(dim=1) & torch.isfinite(zs).all(dim=1)
if not valid_mask.all():
boxes = boxes[valid_mask]
scores = scores[valid_mask]
tilts = tilts[valid_mask]
zs = zs[valid_mask]
B = 5 # box dimension
scores = scores[:, :-1]
num_bbox_reg_classes = boxes.shape[1] // B
# Convert to Boxes to use the `clip` function ...
boxes = RotatedBoxes(boxes.reshape(-1, B))
boxes.clip(image_shape)
boxes = boxes.tensor.view(-1, num_bbox_reg_classes, B) # R x C x B
# Filter results based on detection scores
filter_mask = scores > score_thresh # R x K
# R' x 2. First column contains indices of the R predictions;
# Second column contains indices of classes.
filter_inds = filter_mask.nonzero()
if num_bbox_reg_classes == 1:
boxes = boxes[filter_inds[:, 0], 0]
else:
boxes = boxes[filter_mask]
scores = scores[filter_mask]
tilts = tilts[filter_inds[:, 0]]
zs = zs[filter_inds[:, 0]]
# Apply per-class Rotated NMS
keep = batched_nms_rotated(boxes, scores, filter_inds[:, 1], nms_thresh)
if topk_per_image >= 0:
keep = keep[:topk_per_image]
boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep]
tilts, zs = tilts[keep], zs[keep]
result = Instances(image_shape)
result.pred_boxes = RotatedBoxes(boxes)
result.scores = scores
result.pred_classes = filter_inds[:, 1]
result.pred_zs = torch.flatten(zs)
result.pred_tilts = torch.flatten(tilts)
return result, filter_inds[:, 0]
class GraspRotatedFastRCNNOutputs(FastRCNNOutputs):
"""
An internal implementation that stores information about outputs of a Fast R-CNN head,
and provides methods that are used to decode the outputs of a Fast R-CNN head.
"""
def __init__(
self,
box2box_transform,
pred_class_logits,
pred_proposal_deltas,
pred_zs,
pred_tilts,
proposals,
smooth_l1_beta=0.0,
box_reg_loss_type="smooth_l1",
):
self.box2box_transform = box2box_transform
self.num_preds_per_image = [len(p) for p in proposals]
self.pred_class_logits = pred_class_logits
self.pred_proposal_deltas = pred_proposal_deltas
self.pred_zs = torch.flatten(pred_zs)
self.pred_tilts = torch.flatten(pred_tilts)
self.smooth_l1_beta = smooth_l1_beta
self.box_reg_loss_type = box_reg_loss_type
self.image_shapes = [x.image_size for x in proposals]
self.mse_loss = nn.MSELoss()
if len(proposals):
box_type = type(proposals[0].proposal_boxes)
# cat(..., dim=0) concatenates over all images in the batch
self.proposals = box_type.cat([p.proposal_boxes for p in proposals])
assert (
not self.proposals.tensor.requires_grad
), "Proposals should not require gradients!"
# The following fields should exist only when training.
if proposals[0].has("gt_boxes"):
self.gt_boxes = box_type.cat([p.gt_boxes for p in proposals])
assert proposals[0].has("gt_classes")
self.gt_classes = cat([p.gt_classes for p in proposals], dim=0)
self.gt_zs = cat([p.gt_z for p in proposals], dim=0)
self.gt_tilts = cat([p.gt_tilts for p in proposals], dim=0)
else:
self.proposals = Boxes(torch.zeros(0, 4, device=self.pred_proposal_deltas.device))
self._no_instances = len(proposals) == 0 # no instances found
def z_mse_loss(self):
return self.mse_loss(self.pred_zs, self.gt_zs)
def tilt_mse_loss(self):
return self.mse_loss(self.pred_tilts, self.gt_tilts)
def losses(self):
"""
Compute the default losses for box head in Fast(er) R-CNN,
with softmax cross entropy loss and smooth L1 loss.
Returns:
A dict of losses (scalar tensors) containing keys "loss_cls" and "loss_box_reg".
"""
return {"loss_cls": self.softmax_cross_entropy_loss(), "loss_box_reg": self.box_reg_loss(), "loss_mse_z": self.z_mse_loss(), "loss_mse_tilt": self.tilt_mse_loss()}
class GraspRotatedFastRCNNOutputLayers(FastRCNNOutputLayers):
"""
Two linear layers for predicting Rotated Fast R-CNN outputs.
Edited for grasp planning.
"""
@configurable
def __init__(self, **kwargs):
"""
NOTE: this interface is experimental.
"""
super().__init__(**kwargs)
self.z_pred = Linear(self.input_size, 1)
self.tilt_pred = Linear(self.input_size, 1)
@classmethod
def from_config(cls, cfg, input_shape):
args = super().from_config(cfg, input_shape)
args["box2box_transform"] = Box2BoxTransformRotated(
weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS
)
args["loss_weight"] = {k: v for k, v in zip(["loss_cls", "loss_box_reg", "loss_mse_tilt", "loss_mse_z"], cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT)}
return args
def forward(self, x):
"""
Args:
x: per-region features of shape (N, ...) for N bounding boxes to predict.
Returns:
Tensor: shape (N,K+1), scores for each of the N box. Each row contains the scores for
K object categories and 1 background class.
Tensor: bounding box regression deltas for each box. Shape is shape (N,Kx4), or (N,4)
for class-agnostic regression.
"""
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
scores = self.cls_score(x)
proposal_deltas = self.bbox_pred(x)
tilts = self.tilt_pred(x)
zs = self.z_pred(x)
return (scores, proposal_deltas), (tilts, zs)
def losses(self, predictions, proposals, tilts_and_zs=None):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were used
to compute predictions. The fields ``proposal_boxes``, ``gt_boxes``,
``gt_classes`` are expected.
Returns:
Dict[str, Tensor]: dict of losses
"""
scores, proposal_deltas = predictions
tilts, zs = tilts_and_zs
losses = GraspRotatedFastRCNNOutputs(
self.box2box_transform,
scores,
proposal_deltas,
tilts,
zs,
proposals,
self.smooth_l1_beta,
self.box_reg_loss_type,
).losses()
return {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()}
def inference(self, predictions, proposals, tilts_and_zs):
"""
Returns:
list[Instances]: same as `fast_rcnn_inference_rotated`.
list[Tensor]: same as `fast_rcnn_inference_rotated`.
"""
boxes = self.predict_boxes(predictions, proposals)
scores = self.predict_probs(predictions, proposals)
tilts, zs = self.predict_tilts_zs(tilts_and_zs, proposals)
image_shapes = [x.image_size for x in proposals]
return grasp_fast_rcnn_inference_rotated(
boxes,
scores,
tilts,
zs,
image_shapes,
self.test_score_thresh,
self.test_nms_thresh,
self.test_topk_per_image,
)
def predict_tilts_zs(self, tilts_and_zs, proposals):
"""
Args:
tilts_and_zs: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were
used to compute predictions.
Returns:
list[Tensor]: A list of Tensors of predicted class probabilities for each image.
Element i has shape (Ri, 1), where Ri is the number of proposals for image i.
"""
tilts, zs = tilts_and_zs
num_inst_per_image = [len(p) for p in proposals]
return tilts.split(num_inst_per_image), zs.split(num_inst_per_image)
@ROI_HEADS_REGISTRY.register()
class GraspRROIHeads(RROIHeads):
"""
This class is used by Rotated Fast R-CNN to detect rotated boxes.
For now, it only supports box predictions but not mask or keypoints.
"""
@configurable
def __init__(self, **kwargs):
"""
NOTE: this interface is experimental.
"""
super().__init__(**kwargs)
assert (
not self.mask_on and not self.keypoint_on
), "Mask/Keypoints not supported in Rotated ROIHeads."
assert not self.train_on_pred_boxes, "train_on_pred_boxes not implemented for RROIHeads!"
@classmethod
def from_config(cls, cfg, input_shape):
ret = super().from_config(cfg, input_shape)
ret["train_on_pred_boxes"] = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES
ret["proposal_matcher"] = SampleMatcher(
cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS,
cfg.MODEL.ROI_HEADS.IOU_LABELS,
allow_low_quality_matches=True,
)
return ret
@classmethod
def _init_box_head(cls, cfg, input_shape):
# fmt: off
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
# fmt: on
assert pooler_type in ["ROIAlignRotated"], pooler_type
# assume all channel counts are equal
in_channels = [input_shape[f].channels for f in in_features][0]
box_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
box_head = build_box_head(
cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution)
)
# This line is the only difference v.s. StandardROIHeads
box_predictor = GraspRotatedFastRCNNOutputLayers(cfg, box_head.output_shape)
return {
"box_in_features": in_features,
"box_pooler": box_pooler,
"box_head": box_head,
"box_predictor": box_predictor,
}
@torch.no_grad()
def label_and_sample_proposals(self, proposals, targets):
"""
Prepare some proposals to be used to train the RROI heads.
It performs box matching between `proposals` and `targets`, and assigns
training labels to the proposals.
It returns `self.batch_size_per_image` random samples from proposals and groundtruth boxes,
with a fraction of positives that is no larger than `self.positive_sample_fraction.
Args:
See :meth:`StandardROIHeads.forward`
Returns:
list[Instances]: length `N` list of `Instances`s containing the proposals
sampled for training. Each `Instances` has the following fields:
- proposal_boxes: the rotated proposal boxes
- gt_boxes: the ground-truth rotated boxes that the proposal is assigned to
(this is only meaningful if the proposal has a label > 0; if label = 0
then the ground-truth box is random)
- gt_classes: the ground-truth classification lable for each proposal
"""
gt_boxes = [x.gt_boxes for x in targets]
if self.proposal_append_gt:
proposals = add_ground_truth_to_proposals(gt_boxes, proposals)
proposals_with_gt = []
num_fg_samples = []
num_bg_samples = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
has_gt = len(targets_per_image) > 0
match_quality_matrix = pairwise_iou_rotated(
targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
)
match_quality_matrix = F.relu(match_quality_matrix)
matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)
sampled_idxs, gt_classes = self._sample_proposals(
matched_idxs, matched_labels, targets_per_image.gt_classes
)
proposals_per_image = proposals_per_image[sampled_idxs]
proposals_per_image.gt_classes = gt_classes
if has_gt:
sampled_targets = matched_idxs[sampled_idxs]
proposals_per_image.gt_boxes = targets_per_image.gt_boxes[sampled_targets]
proposals_per_image.gt_z = targets_per_image.gt_z[sampled_targets]
proposals_per_image.gt_tilts = targets_per_image.gt_tilts[sampled_targets]
else:
gt_boxes = RotatedBoxes(
targets_per_image.gt_boxes.tensor.new_zeros((len(sampled_idxs), 5))
)
proposals_per_image.gt_boxes = gt_boxes
num_bg_samples.append((gt_classes == self.num_classes).sum().item())
num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])
proposals_with_gt.append(proposals_per_image)
# Log the number of fg/bg samples that are selected for training ROI heads
storage = get_event_storage()
storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples))
storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples))
return proposals_with_gt
def forward(
self,
images: ImageList,
features: Dict[str, torch.Tensor],
proposals: List[Instances],
targets: Optional[List[Instances]] = None,
) -> Tuple[List[Instances], Dict[str, torch.Tensor]]:
"""
See :class:`ROIHeads.forward`.
"""
del images
if self.training:
assert targets
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
if self.training:
losses = self._forward_grasp(features, proposals)
# losses = self._forward_box(features, proposals)
# Usually the original proposals used by the box head are used by the mask, keypoint
# heads. But when `self.train_on_pred_boxes is True`, proposals will contain boxes
# predicted by the box head.
losses.update(self._forward_mask(features, proposals))
losses.update(self._forward_keypoint(features, proposals))
return proposals, losses
else:
pred_instances = self._forward_grasp(features, proposals)
# pred_instances = self._forward_box(features, proposals)
# During inference cascaded prediction is used: the mask and keypoints heads are only
# applied to the top scoring box detections.
pred_instances = self.forward_with_given_boxes(features, pred_instances)
return pred_instances, {}
def _forward_grasp(
self, features: Dict[str, torch.Tensor], proposals: List[Instances],
) -> Union[Dict[str, torch.Tensor], List[Instances]]:
"""
Forward logic of the grasp prediction branch. If `self.train_on_pred_boxes is True`,
the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
proposals (list[Instances]): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
Returns:
In training, a dict of losses.
In inference, a list of `Instances`, the predicted instances.
"""
features = [features[f] for f in self.box_in_features]
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
box_features = self.box_head(box_features)
predictions, tilts_and_zs = self.box_predictor(box_features)
del box_features
if self.training:
losses = self.box_predictor.losses(predictions, proposals, tilts_and_zs)
# proposals is modified in-place below, so losses must be computed first.
if self.train_on_pred_boxes:
with torch.no_grad():
pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(
predictions, proposals
)
for proposals_per_image, pred_boxes_per_image in zip(proposals, pred_boxes):
proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image)
return losses
else:
pred_instances, _ = self.box_predictor.inference(predictions, proposals, tilts_and_zs)
return pred_instances
```
|
{
"source": "JessicaLozada/docassemble.jcc.abilitytopay",
"score": 2
}
|
#### File: jcc/abilitytopay/translations.py
```python
_translations = {
"req_fine_reduction": {
"en": "###Request a Fine Reduction",
"es": "###Cómo solicitar una reducción de multa",
"zh-s": "###请求减少罚款",
"zh-t": "###請求減少罰款"
},
"landing_page_description": {
"en":
"""*This online system is an optional way to request a fine reduction for your traffic ticket in cases of financial need.*
**Use this tool to:**
* Request a reduction in your traffic fine
* Request a payment plan
* Request Community Service
* Request more time to pay your ticket
**Do not use this tool:**
* If you wish to contest the ticket
* If your offense is a misdemeanor violation
* If you would like to attend traffic school
* If you have proof of correction and would like a dismissal or reduction of those charges
Please refer to your courtesy notice or contact your court instead.""",
"es":
"""*Este sistema en línea es una manera opcional de solicitar una reducción en su multa de tránsito debido a una necesidad económica.*
**Use este servicio para solicitar:**
* Una reducción de su multa de tránsito
* Un plan de pagos
* Servicio comunitario
* Más tiempo para pagar su multa
**No use este servicio:**
* Si quiere disputar la multa
* Si su ofensa es una violación por delito menor
* Si desea asistir a la escuela de tránsito
* Si tiene un comprobante de corrección y desea reducir la multa
Por favor refiérase a su aviso de cortesía o comuníquese con su corte.
""",
"zh-s":
"""*如果您有财务需要,本在线系统是请求减少交通罚单罚款的可选途径。*
**使用该工具请求:**
* 减少您的交通罚款
* 支付计划
* 社区服务
* 延长支付罚单时间
*如果您想对罚单提出异议,或者亲自解决问题,请联系您的法院。请参阅您的提醒通知。*""",
"zh-t":
"""*如果您有財務需求,本線上系統是請求減少交通罰單罰款的途徑之一。*
**使用本工具請求:**
* 減少您的交通罰款
* 付款計劃
* 社區服務
* 延長支付罰單的時間
*如果您想對罰單提出異議,或是親自解決問題,請聯繫您的法院。請參閱您的提醒通知。*"""
},
"lookup_citation": {
"en": "### Look Up Your Citation",
"es": "### Busque su citación",
"zh-s": "### 查看您的罚单",
"zh-t": "### 查看您的罰單"
},
"what_county": {
"en": "What California county did you get the citation in?",
"es": "¿En qué condado de California recibió la citación?",
"zh-s": "您在加州哪个县得到罚单?",
"zh-t": "您在加州哪個縣得到罰單?"
},
"what_citation_number": {
"en": "What's the citation number?",
"es": "¿Cuál es el número de citación?",
"zh-s": "罚单号码是多少?",
"zh-t": "罰單號碼是多少?"
},
"dont_have_citation_number": {
"en": "I don't have my citation number",
"es": "No tengo mi número de citación",
"zh-s": "我没有罚单号码",
"zh-t": "我沒有罰單號碼"
},
"what_first_name": {
"en": "What is your first name?",
"es": "¿Cuál es su nombre?",
"zh-s": "您的名字是什么?",
"zh-t": "您的名字是什麼?"
},
"what_last_name": {
"en": "What is your last name?",
"es": "¿Cuál es su apellido?",
"zh-s": "您的姓是什么?",
"zh-t": "您的姓是什麼?"
},
"what_dob": {
"en": "What's your date of birth?",
"es": "¿Cuál es su fecha de nacimiento?",
"zh-s": "您的出生日期是哪天?",
"zh-t": "您的出生日期是哪天?"
},
"what_license_number": {
"en": "What is your Driver's License Number?",
"es": "¿Cuál es el número de su licencia de manejar?",
"zh-s": "您的驾照号码是多少?",
"zh-t": "您的駕照號碼是多少?"
},
"name": {
"en": "Name",
"es": "Nombre",
"zh-s": "姓名",
"zh-t": "姓名"
},
"citation_number": {
"en": "Citation Number",
"es": "Número de citación",
"zh-s": "罚单号码",
"zh-t": "罰單號碼"
},
"county": {
"en": "County",
"es": "Condado",
"zh-s": "县",
"zh-t": "縣"
},
"violation_date": {
"en": "Violation Date",
"es": "Fecha de la infracción",
"zh-s": "违规日期",
"zh-t": "違規日期"
},
"total_due": {
"en": "Total Due",
"es": "Monto que se debe",
"zh-s": "应付总额",
"zh-t": "應付總額"
},
"yes": {
"en": "Yes",
"es": "Sí",
"zh-s": "是",
"zh-t": "是"
},
"no": {
"en": "No",
"es": "No",
"zh-s": "没有",
"zh-t": "否"
},
"your_citations": {
"en": "### Your Citations",
"es": "### Sus citaciones",
"zh-s": "",
"zh-t": ""
},
"need_more_info": {
"en": "We need some more information to find your citation: ",
"es": "Necesitamos más información para encontrar su citación:",
"zh-s": "",
"zh-t": ""
},
"found_multiple_citations": {
"en": "We found your citation. We also looked for other citations in your name in {county} County. You can request fine reductions for all the citations listed below.",
"es": "Encontramos su citación. También buscamos otras citaciones bajo su nombre en el condado de {county}. Puede solicitar una reducción en las multas para todas las citaciones que aparecen abajo.",
"zh-s": "",
"zh-t": ""
},
"select_citation": {
"en": "Select each of the tickets for which you want to request a reduction.",
"es": "Seleccione las citaciones para las que desea solicitar una reducción.",
"zh-s": "",
"zh-t": ""
},
"none_of_these_are_right": {
"en": "None of these are right",
"es": "Ninguna de estas es mía",
"zh-s": "",
"zh-t": ""
},
"how_this_works": {
"en": "### How this works",
"es": "### Cómo funciona este servicio",
"zh-s": "### 如何运作",
"zh-t": "### 如何運作"
},
"dont_have_citation_number": {
"en": "I don't have my citation number",
"es": "No tengo mi número de citación",
"zh-s": "我没有罚单号码",
"zh-t": ""
},
"how_works_instructions": {
"en": """
*We will walk you through a few questions to help the Court better understand your need for a reduction.*
Be prepared to share information about your income, monthly expenses and any public benefits you currently receive. A court official will still review your answers as they would if you came to court in person.
""",
"es": """
*Le haremos algunas preguntas para que la corte pueda comprender mejor su necesidad de reducir la multa.*
Esté preparado para compartir información sobre sus ingresos, gastos mensuales y cualquier beneficio público que recibe actualmente. Sus respuestas serán revisadas por un funcionario judicial como si fuera a la corte en persona.
""",
"zh-s": """
*我们将通过几个问题帮助法院更好地了解您要减少罚款的需求。*
请准备好分享您的收入、每月开支及目前领取的公共福利。法院官员仍然会像您亲自来法院一样审查您的答案。
""",
"zh-t": """
*我們將透過幾個問題幫助法院更瞭解您要減少罰款的需求。*
請準備好分享您的收入、每月開支及目前領取的公共福利。法院官員仍然會像您親自來法院一樣審查您的答案。
"""
},
"public_benefits": {
"en": "### Public Benefits",
"es": "### Beneficios públicos",
"zh-s": "### 公共福利",
"zh-t": "### 公共福利"
},
"receiving_benefits": {
"en": "Are you currently receiving any benefits? If you don''t see the public benefit you are currently enrolled in, click \"Other\".",
"es": "¿Está recibiendo beneficios actualmente? Si no ve el beneficio público que está recibiendo actualmente, haga clic en \"Otro\".",
"zh-s": "您目前是否在领取任何福利?如果您没有看到您目前登记的公共福利,点击“其他”",
"zh-t": "您目前是否領取任何福利?如果您沒有看到您目前登記的公共福利,請點選「其他」"
},
"cal_fresh": {
"en": "CalFresh (Food Stamps)",
"es": "CalFresh (cupones de alimentos)",
"zh-s": "CalFresh(食品券)",
"zh-t": "CalFresh(糧食券)"
},
"medi_cal": {
"en": "Medi-Cal",
"es": "Medi-Cal",
"zh-s": "Medi-Cal",
"zh-t": "Medi-Cal"
},
"cr_ga": {
"en": "General Assistance / County Relief",
"es": "Ayuda general/Ayuda del condado",
"zh-s": "普通救助/ 县救济",
"zh-t": "普通救助/ 縣救濟"
},
"ssi": {
"en": "SSI: Supplemental Security Income",
"es": "SSI: Seguridad de ingreso suplementario",
"zh-s": "SSI:社会安全补助金",
"zh-t": "SSI:社會安全補助金"
},
"wic": {
"en": "WIC: Special Supplemental Nutrition for Women, Infants and Children",
"es": "WIC: Programa de Nutrición y Alimentos para Mujeres, Bebés y Niños",
"zh-s": "",
"zh-t": ""
},
"ssp": {
"en": "SSP: State Supplemental Payment",
"es": "SSP: Pagos suplementarios del estado",
"zh-s": "SSP:州补助金",
"zh-t": "SSP:州補助金"
},
"ihss": {
"en": "IHSS: In Home Supportive Services",
"es": "IHSS: Servicios de apoyo en el hogar",
"zh-s": "IHSS:居家支持服务",
"zh-t": "IHSS:居家支援服務"
},
"cal_works": {
"en": "CalWORKS: California Work Opportunity and Responsibility to Kids Act",
"es": "CalWORKs: Oportunidades de trabajo y responsabilidades hacia los niños de California",
"zh-s": "CalWORKS:《加州工作机会及对孩子责任法案》",
"zh-t": "CalWORKS:《加州工作機會及對孩子責任法案》"
},
"tanf": {
"en": "TANF: Temporary Assistance for Needy Families",
"es": "TANF: Ayuda temporal para familias necesitadas",
"zh-s": "TANF:穷困家庭临时救助",
"zh-t": "TANF:窮困家庭臨時救助"
},
"capi": {
"en": "CAPI: Cash Assistance Program for Aged, Blind or Disabled Legal Immigrants",
"es": "CAPI: Programa de ayuda en efectivo para inmigrantes legales ancianos, no videntes o discapacitados",
"zh-s": "CAPI:老人、盲人或残障合法移民现金救助计划",
"zh-t": "CAPI:老人、盲人或殘障合法移民現金救助計畫"
},
"other": {
"en": "Other",
"es": "Otro",
"zh-s": "其他",
"zh-t": "其他"
},
"other_benefit_name": {
"en": "What's the other public benefit you receive?",
"es": "¿Cómo se llama el beneficio público que recibe? Nota: debe responder esta pregunta en inglés.",
"zh-s": "",
"zh-t": ""
},
"proof_calfresh": {
"en": "Proof of CalFresh",
"es": "### Comprobante de CalFresh",
"zh-s": "### CalFresh 的证明",
"zh-t": ""
},
"calfresh_upload": {
"en": "### You said you receive CalFresh. Can you please upload any document that proves you are currently receiving this benefit - like a CalFresh card?",
"es": "### Usted dijo que recibe CalFresh. ¿Puede subir un documento que demuestre que está recibiendo actualmente este beneficio, como por ejemplo una tarjeta de CalFresh?",
"zh-s": "",
"zh-t": ""
},
"calfresh_card": {
"en": "CalFresh Card",
"es": "Tarjeta de CalFresh",
"zh-s": "",
"zh-t": ""
},
"have_no_proof": {
"en": "I don't have proof available",
"es": "No tengo un comprobante en este momento",
"zh-s": "我现在没有证明",
"zh-t": "我現在沒有證明"
},
"why_no_proof": {
"en": "Tell us why you can't provide documentation at this time",
"es": "Explique por qué no puede darnos documentación en este momento. Nota: debe responder esta pregunta en inglés.",
"zh-s": "告诉我们您为何现在不能提供文件",
"zh-t": "告訴我們您為何現在不能提供文件"
},
"proof_of_medical": {
"en": "### Proof of Medi-Cal",
"es": "### Comprobante de Medi-Cal",
"zh-s": "###Medi-Cal 的证明",
"zh-t": ""
},
"upload_medical": {
"en": "### You said you receive Medi-Cal. Can you please upload any document that proves you are currently receiving this benefit - like a MediCal card?",
"es": "### Usted dijo que recibe Medi-Cal. ¿Puede subir un documento que demuestre que está recibiendo actualmente este beneficio, como por ejemplo una tarjeta de Medi-Cal?",
"zh-s": "",
"zh-t": ""
},
"medi_cal_card": {
"en": "Medi-Cal Card",
"es": "Tarjeta de Medi-Cal",
"zh-s": "",
"zh-t": ""
},
"proof_of_ssi_title": {
"en": "### Proof of SSI",
"es": "### Comprobante de SSI",
"zh-s": "###SSI 的证明",
"zh-t": ""
},
"upload_ssi_document": {
"en": "*You said you receive Supplemental Security Income. Can you please upload any document that proves you are currently receiving this benefit?*",
"es": "*Usted dijo que recibe Seguridad de ingreso suplementario. ¿Puede subir un documento que demuestre que está recibiendo actualmente este beneficio, como por ejemplo una tarjeta de Seguridad de ingreso suplementario?*",
"zh-s": "",
"zh-t": ""
},
"proof_of_ssi": {
"en": "Proof of SSI",
"es": "Comprobante de SSI",
"zh-s": "SSI 的证明",
"zh-t": ""
},
"proof_of_ssp": {
"en": "### Proof of State Supplemental Payment",
"es": "### Comprobante de Pagos suplementarios del estado",
"zh-s": "###州补助金 的证明",
"zh-t": ""
},
"upload_ssp": {
"en": "*You said you receive State Supplemental Payment (SSP). Can you please upload any document that proves you are currently receiving this benefit?*",
"es": "*Usted dijo que recibe beneficios de SSP (pagos suplementarios del estado). ¿Puede subir un documento que demuestre que está recibiendo actualmente este beneficio, como por ejemplo una tarjeta de pagos suplementarios del estado?*",
"zh-s": "",
"zh-t": ""
},
"proof_of_ssp_abbrev": {
"en": "Proof of SSP",
"es": "Comprobante de SSP",
"zh-s": "州补助金 的证明",
"zh-t": ""
},
"proof_of_gacr": {
"en": "### Proof of General Assistance/County Relief",
"es": "### Comprobante de ayuda general/ayuda del condado",
"zh-s": "###普通救助/ 县救济 的证明",
"zh-t": ""
},
"upload_gacr": {
"en": "*You said you receive General Assistance/County Relief. Can you please upload any document that proves you are currently receiving this benefit?*",
"es": "*Usted dijo que recibe ayuda general/ayuda del condado. ¿Puede subir un documento que demuestre que está recibiendo actualmente este beneficio, como por ejemplo una tarjeta de ayuda general/ayuda del condado?*",
"zh-s": "",
"zh-t": ""
},
"proof_of_gacr_abbrev": {
"en": "Proof of GA/CR",
"es": "Comprobante de ayuda general/ayuda del condado",
"zh-s": "普通救助/ 县救济 的证明",
"zh-t": ""
},
"proof_of_ihss": {
"en": "### Proof of In Home Supportive Services",
"es": "### Comprobante de servicios de apoyo en el hogar",
"zh-s": "###居家支持服务 的证明",
"zh-t": ""
},
"upload_ihss": {
"en": "*You said you receive In Home Supportive Services (IHSS). Can you please upload any document that proves you are currently receiving this benefit?*",
"es": "*Usted dijo que recibe beneficios de IHSS (servicios de apoyo en el hogar). ¿Puede subir un documento que demuestre que está recibiendo actualmente este beneficio, como por ejemplo una tarjeta de servicios de apoyo en el hogar?*",
"zh-s": "",
"zh-t": ""
},
"proof_of_ihss_abbrev": {
"en": "Proof of IHSS",
"es": "Comprobante de IHSS",
"zh-s": "居家支持服务 的证明",
"zh-t": ""
},
"proof_of_tanf": {
"en": "### Proof of TANF",
"es": "### Comprobante de TANF",
"zh-s": "###穷困家庭临时救助 的证明",
"zh-t": ""
},
"upload_tanf": {
"en": "*You said you receive Temporary Assistance for Needy Families. Can you please upload any document that proves you are currently receiving this benefit?*",
"es": "*Usted dijo que recibe beneficios de TANF (ayuda temporal para familias necesitadas). ¿Puede subir un documento que demuestre que está recibiendo actualmente este beneficio, como por ejemplo una tarjeta de ayuda temporal para familias necesitadas?*",
"zh-s": "",
"zh-t": ""
},
"proof_of_tanf_abbrev": {
"en": "Proof of TANF",
"es": "Comprobante de TANF",
"zh-s": "穷困家庭临时救助 的证明",
"zh-t": ""
},
"proof_of_wic": {
"en": "### Proof of WIC",
"es": "### Comprobante de WIC",
"zh-s": "###妇女、婴儿及儿童特殊营养补助 的证明",
"zh-t": ""
},
"upload_wic": {
"en": "*You said you are enrolled in WIC. Can you please upload any document that proves you are currently receiving this benefit?*",
"es": "*Usted dijo que recibe beneficios de WIC (programa de Nutrición suplementaria especial para mujeres, bebés y niños). ¿Puede cargar un documento que demuestre que está recibiendo actualmente este beneficio, como por ejemplo una tarjeta de nutrición suplementaria especial para mujeres, bebés y niños?*",
"zh-s": "",
"zh-t": ""
},
"proof_of_wic_abbrev": {
"en": "Proof of WIC",
"es": "Comprobante de WIC",
"zh-s": "妇女、婴儿及儿童特殊营养补助 的证明",
"zh-t": ""
},
"proof_of_calworks": {
"en": "### Proof of CalWORKS",
"es": "### Comprobante de CalWORKS",
"zh-s": "###《加州工作机会及对孩子责任法案》 的证明",
"zh-t": ""
},
"upload_calworks": {
"en": "*You said you are enrolled in CalWORKS. Can you please upload any document that proves you are currently receiving this benefit?*",
"es": "*Usted dijo que recibe beneficios de CalWORKs (asistencia monetaria del Programa de Oportunidades de Trabajo y Responsabilidad hacia los Niños). ¿Puede cargar un documento que demuestre que está recibiendo actualmente este beneficio, como por ejemplo una tarjeta de oportunidades de trabajo y responsabilidades hacia los niños de California?*",
"zh-s": "",
"zh-t": ""
},
"proof_of_calworks_abbrev": {
"en": "Proof of CalWORKS",
"es": "Comprobante de CalWORKS",
"zh-s": "《加州工作机会及对孩子责任法案》 的证明",
"zh-t": ""
},
"proof_of_capi": {
"en": "### Proof of CAPI",
"es": "### Comprobante de CAPI",
"zh-s": "###老人、盲人或残障合法移民现金救助计划 的证明",
"zh-t": ""
},
"upload_capi": {
"en": "*You said you are enrolled in the Cash Assistance Program for Aged, Blind, or Disabled Legal Immigrants (CAPI). Can you please upload any document that proves you are currently receiving this benefit?*",
"es": "*Usted dijo que recibe beneficios de CAPI (ayuda en efectivo para inmigrantes legales ancianos, no videntes o discapacitados). ¿Puede cargar un documento que demuestre que está recibiendo actualmente este beneficio, como por ejemplo una tarjeta de programa de ayuda en efectivo para inmigrantes legales ancianos, no videntes o discapacitados?*",
"zh-s": "",
"zh-t": ""
},
"proof_of_capi_abbrev": {
"en": "Proof of CAPI",
"es": "Comprobante de CAPI",
"zh-s": "老人、盲人或残障合法移民现金救助计划 的证明",
"zh-t": ""
},
"proof_of": {
"en": "### Proof of",
"es": "### Comprobante de",
"zh-s": "### 的证明",
"zh-t": ""
},
"said_receive": {
"en": "### You said you receive",
"es": "Usted dijo que recibe",
"zh-s": "",
"zh-t": ""
},
"please_upload": {
"en": ". Please upload any document that proves you are currently receiving this benefit.",
"es": ". Suba un documento que demuestre que está recibiendo actualmente este beneficio.",
"zh-s": "",
"zh-t": ""
},
"upload_picture": {
"en": "Upload picture",
"es": "Subir foto",
"zh-s": "",
"zh-t": ""
},
# TODO: Shim these into prepareFileUpload() so we don't store translations
# directly in a2p.js
"Use a different photo": {
"en": "Use a different photo",
"es": "Subir foto diferente",
"zh-s": "",
"zh-t": ""
},
"Add a photo": {
"en": "Add a photo",
"es": "Subir foto",
"zh-s": "",
"zh-t": ""
},
"current_income": {
"en": "### Current Income",
"es": "### Ingresos actuales",
"zh-s": "### 当前收入",
"zh-t": "### 目前收入"
},
"money_each_month": {
"en": "### How much money do you take home each month?",
"es": "### ¿Cuánto dinero gana por mes después de descontar los impuestos?",
"zh-s": "### 您每个月带回家多少税后收入?",
"zh-t": "### 您每個月帶回家多少稅後收入?"
},
"answer_best_you_can": {
"en": "Answer the best you can. Be sure to include income from your job and any other sources, including payments from people in your household (such as spouse or live-in partner).",
"es": "Responda a su mejor saber y entender. No se olvide de incluir los ingresos de su trabajo y de toda otra fuente, incluyendo los ingresos de otras personas que viven en su hogar (como un cónyuge o pareja de hecho).",
"zh-s": "尽可能回答。务必包括您的工作及其他来源的收入,比如从家人(如配偶或同居伴侣)得到的付款?",
"zh-t": "盡可能回答。務必包括您的工作及其他來源的收入,例如從家人(如配偶或同居伴侶)得到的付款?"
},
"amount_of_income": {
"en": "Amount of Income",
"es": "Monto de ingresos",
"zh-s": "收入金额",
"zh-t": "收入金額"
},
"household_details": {
"en": "### Household Details",
"es": "### Detalles sobre el hogar",
"zh-s": "### 家庭详情",
"zh-t": "### 家庭詳情"
},
"how_many_household": {
"en": "How many people live in your household?",
"es": "¿Cuántas personas viven en su hogar?",
"zh-s": "您家里有几口人?",
"zh-t": "您家裡有幾口人?"
},
"monthly_expenses": {
"en": "### Monthly Expenses",
"es": "### Gastos mensuales",
"zh-s": "### 每月花费",
"zh-t": "### 每月開支"
},
"please_estimate": {
"en": "### Now, please estimate what you pay _each month_ for the following expenses.",
"es": "### Ahora estime cuánto paga por mes por los siguientes gastos.",
"zh-s": "### 现在,请估算您每个月的以下花费是多少。",
"zh-t": "### 現在,請估算您每個月的以下開支。"
},
"you_take_home_pre": {
"en": "You said you take home",
"es": "Usted dijo que gana ",
"zh-s": "您说您每个月带回家${ format_money(income) }。如果您没有此项花费",
"zh-t": "您說每個月帶回家"
},
"you_take_home_post": {
"en": " each month. If you don't spend money on an expense, enter $0.",
"es": " por mes, después de deducir impuestos. Si no tiene este gasto, ponga $0.",
"zh-s": " 请输入$0。",
"zh-t": "。如果您沒有此項開支,請輸入$0。 "
},
"rent": {
"en": "Rent",
"es": "Alquiler",
"zh-s": "租金",
"zh-t": "租金"
},
"mortgage": {
"en": "Mortgage",
"es": "Hipoteca",
"zh-s": "按揭",
"zh-t": "房屋貸款"
},
"utilities": {
"en": "Utilities",
"es": "Servicios públicos",
"zh-s": "公用事业",
"zh-t": "公用事業"
},
"phone_bill": {
"en": "Phone Bill",
"es": "Teléfono",
"zh-s": "电话费",
"zh-t": "電話費"
},
"food_groceries_restaurants": {
"en": "Food (Groceries & Restaurants)",
"es": "Alimentos (supermercado y restaurantes)",
"zh-s": "食品(食品店和饭店)",
"zh-t": "食品(雜貨店和餐廳)"
},
"insurance": {
"en": "Insurance",
"es": "Seguro",
"zh-s": "保险",
"zh-t": "保險"
},
"clothing": {
"en": "Clothing",
"es": "Ropa",
"zh-s": "衣服",
"zh-t": "衣服"
},
"child_spousal_support": {
"en": "Child or Spousal Support",
"es": "Manutención de los hijos o del cónyuge",
"zh-s": "子女或配偶扶养费",
"zh-t": "子女或配偶扶養費"
},
"transportation_gas_etc": {
"en": "Transportation (Gas, Car Payments, Transit)",
"es": "Transporte (gasolina, pagos del carro, transporte público)",
"zh-s": "交通(汽油、车款、公交)",
"zh-t": "交通(汽油、汽車還款、公交)"
},
"would_other_hardship": {
"en": "Would you like to report other expenses or reasons for financial hardship?",
"es": "¿Quiere reportar otros gastos o razones para explicar sus dificultades económicas?",
"zh-s": "您是否想要报告其他花费或财务困难的原因?",
"zh-t": "您是否願意報告其他開支或財務困難的原因?"
},
"what_hardship": {
"en": "What other financial hardship would you like the Court to consider?",
"es": "¿Qué otro tipo de problema económico quiere que considere la corte? Nota: debe responder esta pregunta en inglés.",
"zh-s": "您希望法院考虑哪些其他种类的财务困难?",
"zh-t": "您希望法院考慮其他哪一種財務困難?"
},
"total_additional_expenses": {
"en": "What is the total monthly cost of these additional expenses?",
"es": "¿Cuánto es el costo total mensual de estos gastos adicionales?",
"zh-s": "",
"zh-t": ""
},
"additional_requests": {
"en": "Additional Requests",
"es": "Solicitudes adicionales",
"zh-s": "附加请求",
"zh-t": "額外請求"
},
"would_like_additional": {
"en": "In addition to a fine reduction, would you like to add any of the following requests to the current or reduced amount?",
"es": "Además de solicitar una multa reducida, ¿desea pedir una de las siguientes opciones a la corte?",
"zh-s": "",
"zh-t": "您是否想在目前或減低的金額之外增加以下請求:"
},
"payment_plan": {
"en": "Payment Plan",
"es": "Plan de pagos",
"zh-s": "支付计划",
"zh-t": "付款計劃"
},
"community_service": {
"en": "Community Service",
"es": "Servicio comunitario",
"zh-s": "社区服务",
"zh-t": "社區服務"
},
"extension": {
"en": "Extension",
"es": "Aplazamiento de pago de la multa",
"zh-s": "延期",
"zh-t": "延期"
},
"administrative_fees": {
"en": "Note: Your court may charge administrative fees for setting up a payment plan or community service work plan.",
"es": "Nota: Su corte puede cobrar una cuota para establecer un plan de pagos o un plan de servicio comunitario.",
"zh-s": "备注:您的法院可就设定支付计划或者社区服务工作计划收取管理费。",
"zh-t": "備註:您的法院可能會收取設定付款計劃或社區服務工作計劃的管理費。"
},
"make_plea": {
"en": "### Make a Plea for {citation_number}",
"es": "### Haga su declaración {citation_number}",
"zh-s": "### 进行答辩 {citation_number}",
"zh-t": "### 進行答辯 {citation_number}"
},
"plea_instructions": {
"en": """
In order to submit your fine reduction request, you need to admit responsibility for the ticket by pleading **Guilty** or **No Contest**.
If you do not want to admit responsibility or if you do not understand these rights, please exit the system and contact your court to set up an in-person court appearance.
By pleading you will be giving up the following rights:
* To be represented by an attorney employed by you;
* To have a speedy and public trial in front of a judge;
* To testify, to present evidence, and to use court orders without cost to compel the attendance of witnesses and the production of evidence on your behalf;
* To have the witnesses against you testify under oath in court, and to question such witnesses;
* To remain silent and not testify and not incriminate yourself.
""",
"es": """
Para presentar su solicitud de reducción de multa, tiene que admitir su responsabilidad por la citación y declararse culpable o sin disputa. Si no quiere admitir responsabilidad o no comprende estos derechos, deje de usar este programa y comuníquese con la corte para programar una comparecencia en persona.
Al declararse culpable o sin disputa, estará renunciando a los siguientes derechos:
* representación por un abogado contratado por usted;
* un juicio público y sin demora delante de un juez;
* dar testimonio, presentar pruebas, y usar órdenes de la corte sin costo para obligar la asistencia de testigos y la presentación de pruebas en su nombre;
* el testimonio bajo juramento en la corte de testigos en su contra, y la interrogación de dichos testigos;
* guardar silencio y no testificar ni incriminarse.
""",
"zh-s": """
为提交您的减少罚款请求,您需要通过有罪或无异议答辩承认对罚单的责任。如果您不想承认责任,或者您不理解这些权利,请退出系统,联系法院安排亲自出庭。
通过答辩,您将放弃以下权利:
* 由您聘请的律师代理;
* 由法官进行快速、公开审理;
* 作证,出示证据,免费使用法庭命令强制证人为您出庭和举证;
* 让对您不利的证人在法庭宣誓作证,并质问该证人;
* 保持沉默,不作证,不自证有罪。
""",
"zh-t": """
為提交您的減少罰款請求,您需要透過有罪或無異議答辯承認您對罰單的責任。如果您不想承認責任,或是不理解這些權利,請退出系統,聯繫法院安排親自出庭。
透過答辯,您將放棄以下權利:
* 由您聘請的律師代理;
* 由法官進行快速、公開的審理;
* 作證,出示證據,免費使用法庭命令強制證人為您出庭和舉證;
* 讓對您不利的證人在法庭宣誓作證,並質問該證人;
* 保持沉默,不作證,不自證有罪。
"""
},
"Make_plea_choice": {
"en": "Make a choice between pleading Guilty or No Contest. A no contest plea is a way of saying, 'I don’t believe I did all that the officer charges, but I admit violating the law.'",
"es": "Decida si se va a declarar culpable o sin disputa. Declararse ‘sin disputa’ es una manera de decir “no creo haber hecho todo lo que me acusa el agente, pero admito que violé la ley”.",
"zh-s": "在有罪或无异议答辩之间做出选择。无异议答辩是表示:“我不认为我做了官员指控的一切,但我承认违反法律。”",
"zh-t": "在有罪或無異議答辯之間做出選擇。無異議答辯是表示:「我不認為我做了官員指控的一切,但我承認違反法律。」"
},
"no_content_plea": {
"en": "**No Contest Plea.** I have read, understand, and waive the rights above, there are facts to support my plea, I am entering my plea freely and voluntarily, and I agree to plead “no contest”. I understand that, for purposes of this case, a plea of no contest will be considered the same as a plea of guilty and that if I plead no contest the court will find me guilty.",
"es": "**Sin disputa.** He leído, comprendo y renuncio a los derechos descritos arriba; hay hechos que justifican mi declaración. Hago esta declaración en forma libre y voluntaria, y acepto hacer una declaración de ‘sin disputa’. Comprendo que una declaración de sin disputa en este caso se interpretará de la misma manera que una declaración de culpable, y que si me declaro sin disputa la corte me declarará culpable.",
"zh-s": "**无异议答辩。** 我已阅读、理解并放弃以上权利,有事实支撑我的答辩,我的答辩是自由、自愿做出的,并且我同意“无异议”答辩。我理解,就本案而言,无异议答辩将被视同有罪答辩,并且如果我进行无异议答辩,法院将认定我有罪。",
"zh-t": "**無異議答辯。** 我已閱讀、理解並放棄以上權利,有事實支持我的答辯,我的答辯是自由、自願做出的,而且我同意「無異議」答辯。我理解,就本案而言,無異議答辯將被視同有罪答辯,如果我進行無異議答辯,法院將認定我有罪。"
},
"guilty_plea": {
"en": "**Guilty Plea.** I have read, understand, and waive the rights above, there are facts to support my plea. I am entering my plea freely and voluntarily, and agree to plead guilty.",
"es": "**Declaración de culpable.** He leído, comprendo y renuncio a los derechos descritos arriba; hay hechos que justifican mi declaración. Hago esta declaración en forma libre y voluntaria, y acepto hacer una declaración de culpable.",
"zh-s": "**有罪答辩。** 我已阅读、理解并放弃以上权利,有事实支撑我的答辩。我的答辩是自由、自愿做出的,并且我同意有罪答辩。",
"zh-t": "**有罪答辯。** 我已閱讀、理解並放棄以上權利,有事實支持我的答辯。我的答辯是自由、自願做出的,而且我同意有罪答辯。"
},
"admit_responsibility": {
"en": "Note: Once you admit responsibility, you will have a conviction for this traffic offense that will be reported the Department of Motor Vehicles (DMV).",
"es": "Nota: Una vez que admita responsabilidad, lo condenarán por esta infracción de tránsito y la condena será reportada al Departamento de Vehículos Motorizados (DMV).",
"zh-s": "备注:一旦您承认责任,您将被认定实施了该交通犯罪,这将被报告给机动车管理局(DMV)。",
"zh-t": "備註:一旦您承認責任,您將被認定實施了該交通犯罪,這會報告給機動車輛管理局(DMV)。"
},
"optional_questions": {
"en": "### Optional Questions",
"es": "### Preguntas opcionales",
"zh-s": "### 选答问题",
"zh-t": "### 選答題"
},
"info_confidential": {
"en": "## Your information will be kept confidential and may be used for research conducted to improve court services.",
"es": "## Su información será confidencial y se puede usar para investigaciones con el fin de mejorar los servicios de la corte.",
"zh-s": "## 您的信息将被保密,可能被用于为改善法院服务而进行的研究。",
"zh-t": "## 您的資訊將會保密,可能被用於為改善法院服務而進行的研究。"
},
"how_helpful": {
"en": "How helpful was this tool in addressing your traffic ticket?",
"es": "¿Qué tan útil fue este servicio para resolver su multa de tránsito?",
"zh-s": "该工具对解决您的交通罚单有多大帮助?",
"zh-t": "本工具對解決您的交通罰單有多大幫助?"
},
"very_helpful": {
"en": "Very helpful",
"es": "Muy útil",
"zh-s": "很有帮助",
"zh-t": "很有幫助"
},
"somewhat_helpful": {
"en": "Somewhat helpful",
"es": "Algo útil",
"zh-s": "有些帮助",
"zh-t": "有點幫助"
},
"as_helpful_as_court": {
"en": "As helpful as coming into court",
"es": "Tan útil como ir a la corte",
"zh-s": "跟去法院的帮助相同",
"zh-t": "和去法院的幫助相同"
},
"somewhat_unhelpful": {
"en": "Somewhat unhelpful",
"es": "No muy útil",
"zh-s": "不大有帮助",
"zh-t": "不太有幫助"
},
"not_helpful": {
"en": "Not helpful at all",
"es": "Completamente inútil",
"zh-s": "根本没有帮助",
"zh-t": "根本沒有幫助"
},
"say_more_about_difficulty": {
"en": "Can you say more about what difficulty you encountered with the tool?",
"es": "¿Nos puede contar más sobre la dificultad que tuvo con este servicio?",
"zh-s": "您能否详细说明您使用该工具面临的困难?",
"zh-t": "您能否詳細說說使用本工具面臨的困難?"
},
"how_to_resolve_again": {
"en": "If you get another traffic ticket, how would you prefer to resolve it?",
"es": "Si recibe otra multa de tránsito, ¿cómo preferiría resolverla?",
"zh-s": "如果您得到其他交通罚单,您是否想要解决?",
"zh-t": "如果您得到其他交通罰單,您是否想要解決?"
},
"use_this_tool": {
"en": "Use this online tool",
"es": "Con este servicio en línea",
"zh-s": "使用该在线工具",
"zh-t": "使用本線上工具"
},
"in_person": {
"en": "In-person at court",
"es": "En persona en la corte",
"zh-s": "亲自去法院",
"zh-t": "親自去法院"
},
"unsure": {
"en": "Unsure",
"es": "No estoy seguro",
"zh-s": "不确定",
"zh-t": "不確定"
},
"would_have_been_difficult": {
"en": "Would it have been difficult to come to court in person?",
"es": "¿Es díficil para usted ir a la corte en persona?",
"zh-s": "是否难以亲自去法院?",
"zh-t": "是否難以親自去法院?"
},
"why_difficult": {
"en": "Tell us why it's difficult for you to come to court in person (check all that apply)",
"es": "¿Por qué le cuesta ir a la corte en persona? (marque todo lo que corresponde)",
"zh-s": "告诉我们您为何难以亲自去法院(勾选所有适用项目)",
"zh-t": "告訴我們您為何難以親自去法院(勾選所有適用項目)"
},
"physical_disability": {
"en": "Physical Disability",
"es": "Discapacidad física",
"zh-s": "身体残障",
"zh-t": "身體殘障"
},
"transportation": {
"en": "Transportation",
"es": "Transporte",
"zh-s": "交通接送",
"zh-t": "交通接送"
},
"cant_take_off_work": {
"en": "Can't take time off work",
"es": "No puedo faltar al trabajo",
"zh-s": "不能从工作中抽出时间",
"zh-t": "不能從工作中抽出時間"
},
"childcare": {
"en": "Childcare",
"es": "Cuidado de los hijos",
"zh-s": "托儿",
"zh-t": "托兒"
},
"share_why_difficult": {
"en": "Share why it's difficult for you to come to court in person",
"es": "Explique por qué le cuesta ir a la corte en persona",
"zh-s": "告诉我们您为何难以亲自去法院",
"zh-t": "語言服務"
},
"final_step": {
"en": "### Final Step",
"es": "### Paso final",
"zh-s": "",
"zh-t": ""
},
"what_email_can_court_use": {
"en": "What email address can the Court use to communicate about your request?",
"es": "Su dirección de email donde la corte puede comunicarse con usted acerca de su solicitud",
"zh-s": "",
"zh-t": ""
},
"email_again": {
"en": "Please enter your email address again.",
"es": "Ingrese su dirección de email otra vez",
"zh-s": "",
"zh-t": ""
},
"make_sure_emails_match": {
"en": "Make sure the e-mail addresses match.",
"es": "Asegure que las dos direcciones sean idénticas",
"zh-s": "",
"zh-t": ""
},
"emails_match": {
"en": '<span class="text-success">E-mail addresses match!</span>',
"es": '<span class="text-success">¡Las dos direcciones sean idénticas!</span>',
"zh-s": '<span class="text-success"></span>',
"zh-t": '<span class="text-success"></span>'
},
"penalty_of_perjury": {
"en": "By checking this box, I declare under penalty of perjury that what I have reported is true.",
"es": "Al marcar esta casilla, declaro bajo pena de perjurio que lo que he reportado es verdadero.",
"zh-s": "",
"zh-t": ""
},
"cant_continue_email": {
"en": "You cannot continue until you confirm your e-mail address.",
"es": "No puede continuar hasta confirmar su dirección de email.",
"zh-s": "",
"zh-t": ""
},
"cant_continue_sign": {
"en": "You cannot continue until you electronically sign under penalty of perjury.",
"es": "No puede continuar hasta firmar electrónicamente bajo pena de perjurio.",
"zh-s": "",
"zh-t": ""
},
"continue_button": {
"en": "Submit",
"es": "Ingresar",
"zh-s": "",
"zh-t": ""
},
"thank_you": {
"en": "### Thank you!",
"es": "### ¡Gracias!",
"zh-s": "",
"zh-t": ""
},
"expect_response": {
"en": "*Expect a response from the court by email in 30 business days. If you don't get a notification please contact the Court.*",
"es": "*Puede esperar una respuesta de la corte por email en 30 días hábiles. Si no recibe una notificación, comuníquese con la corte.",
"zh-s": "",
"zh-t": ""
},
"restart": {
"en": "Start Another Request",
"es": "Comenzar otra solicitud",
"zh-s": "",
"zh-t": ""
},
#Error messages
###########################################
"citation_number_required": {
"en": "Please enter a citation number.",
"es": "Favor de poner el número de citación.",
"zh-s": "",
"zh-t": ""
},
"cant_process_online": {
"en": "We can't process this citation online. Please contact your Court for more information.",
"es": "No podemos procesar esta citación en línea. Favor de contactarse con su Corte para más información.",
"zh-s": "",
"zh-t": ""
},
"no_match_found": {
"en": "No match was found. Please check the county and citation number. Try again. Or click \"I don't have my citation\" to search another way.",
"es": "No se encontró una citación. Favor de verificar el condado y el número de citación. Intente de nuevo. O haga clic en \"No tengo mi citación\" para buscar de otra manera.",
"zh-s": "",
"zh-t": ""
},
"check_information": {
"en": "Check the information you entered. Try again.",
"es": "Verifique la información que ingresó. Intente de nuevo.",
"zh-s": "",
"zh-t": ""
},
"something_went_wrong": {
"en": "Sorry! Something went wrong with your submission. Our support team has been notified. Please try again later, or contact your court.",
"es": "¡Lo sentimos! Hubo un error con su submisión. Nuestro equipo de apoyo ha sido notificado. Intente nuevamente más tarde, o contáctese con su corte.",
"zh-s": "",
"zh-t": ""
},
}
def get_translation(key, lang, **kwargs):
return _translations[key][lang].format(**kwargs)
```
|
{
"source": "jessicamecht/learning_from_mistakes",
"score": 3
}
|
#### File: learning_from_mistakes/ptdarts/loss.py
```python
import torch
import numpy as np
import torch.nn as nn
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def calculate_weighted_loss(logits, target, weights, criterion):
'''calculates the weighted loss for each input example, and then averages everything in the end
:param input torch tensor of size (number of examples, feature_dimension_1, ...)
:param target torch tensor of size (number of examples,)
:param criterion torch nn loss function
:param weights torch tensor of size (number of examples,)
:returns tuple of averaged loss and class probabilities as torch tensors of size (number of examples,)'''
loss = criterion(logits, target)
weighted_loss_individual = loss.float() * weights.float()
loss = torch.mean(weighted_loss_individual)
return loss
```
#### File: ptdarts/weight_samples/label_similarity.py
```python
import os, sys
import torch
sys.path.append('../')
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
def measure_label_similarity(validation_targets, training_targets):
'''checks for each training and test label if they are the same
:param validation_targets torch of size (number val targets)
:param training_targets torch of size (number train targets)
:returns torch of size (number training targets, number val targets)'''
val_targets = validation_targets.reshape(validation_targets.shape[0], 1)
train_targets = training_targets.reshape(training_targets.shape[0], 1)
train_targets = torch.repeat_interleave(train_targets, val_targets.shape[0], dim=1)
val_targets = torch.repeat_interleave(val_targets, train_targets.shape[0], dim=1)
val_train_diff = val_targets.T - train_targets
assert(val_train_diff[0][0] == validation_targets[0] - training_targets.T[0] )
assert(val_train_diff.shape[0] == training_targets.shape[0] and val_train_diff.shape[1] == validation_targets.shape[0])
val_train_equality = (val_train_diff == 0).float()
return val_train_equality
```
|
{
"source": "jessicamecht/lfm",
"score": 3
}
|
#### File: jessicamecht/lfm/architect.py
```python
import copy
import torch
import gc
from weight_samples.sample_weights import calc_instance_weights
import higher
import torch.nn.functional as F
import torch.nn as nn
class Architect():
""" Compute gradients of alphas """
def __init__(self, net, w_momentum, w_weight_decay):
"""
Args:
net
w_momentum: weights momentum
"""
self.net = net
self.v_net = copy.deepcopy(net)
self.w_momentum = w_momentum
self.w_weight_decay = w_weight_decay
def virtual_step(self, trn_X, trn_y, xi, w_optim):
"""
Compute unrolled weight w' (virtual step)
Step process:
1) forward
2) calc loss
3) compute gradient (by backprop)
4) update gradient
Args:
xi: learning rate for virtual gradient step (same as weights lr)
w_optim: weights optimizer
"""
# forward & calc loss
loss = self.net.loss(trn_X, trn_y) # L_trn(w)
# compute gradient
gradients = torch.autograd.grad(loss, self.net.weights())
# do virtual step (update gradient)
# below operations do not need gradient tracking
with torch.no_grad():
# dict key is not the value, but the pointer. So original network weight have to
# be iterated also.
for w, vw, g in zip(self.net.weights(), self.v_net.weights(), gradients):
m = w_optim.state[w].get('momentum_buffer', 0.) * self.w_momentum
vw.copy_(w - xi * (m + g + self.w_weight_decay*w))
# synchronize alphas
for a, va in zip(self.net.alphas(), self.v_net.alphas()):
va.copy_(a)
def unrolled_backward(self, trn_X, trn_y, val_X, val_y, xi, w_optim, visual_encoder, coefficient_vector):
""" Compute unrolled loss and backward its gradients
Args:
xi: learning rate for virtual gradient step (same as net lr)
w_optim: weights optimizer - for virtual step
"""
# do virtual step (calc w`)
self.virtual_step(trn_X, trn_y, xi, w_optim)
# calc unrolled loss
loss = self.v_net.loss(val_X, val_y) # L_val(w`)
# compute gradient
v_alphas = tuple(self.v_net.alphas())
v_weights = tuple(self.v_net.weights())
v_grads = torch.autograd.grad(loss, v_alphas + v_weights)
dalpha = v_grads[:len(v_alphas)]
dw = v_grads[len(v_alphas):]
hessian = self.compute_hessian(dw, trn_X, trn_y)
# update final gradient = dalpha - xi*hessian
with torch.no_grad():
for alpha, da, h in zip(self.net.alphas(), dalpha, hessian):
alpha.grad = da - xi*h
model_backup = self.net.state_dict()
w_optim_backup = w_optim.state_dict()
#self.coefficient_vector, visual_encoder_parameters = meta_learn_new(self.net, trn_X, trn_y, val_X, val_y, coefficient_vector, visual_encoder, self.config)
#self.visual_encoder.load_state_dict(visual_encoder_parameters)
print('memory_allocated', torch.cuda.memory_allocated() / 1e9, 'memory_reserved',
torch.cuda.memory_reserved() / 1e9)
meta_learn(self.net, w_optim, trn_X, trn_y, val_X, val_y, coefficient_vector, visual_encoder)
print('memory_allocated1', torch.cuda.memory_allocated() / 1e9, 'memory_reserved',
torch.cuda.memory_reserved() / 1e9)
self.net.load_state_dict(model_backup)
w_optim.load_state_dict(w_optim_backup)
#update_gradients(visual_encoder_gradients, coeff_vector_gradients, visual_encoder, coefficient_vector)
def compute_hessian(self, dw, trn_X, trn_y):
"""
dw = dw` { L_val(w`, alpha) }
w+ = w + eps * dw
w- = w - eps * dw
hessian = (dalpha { L_trn(w+, alpha) } - dalpha { L_trn(w-, alpha) }) / (2*eps)
eps = 0.01 / ||dw||
"""
norm = torch.cat([w.view(-1) for w in dw]).norm()
eps = 0.01 / norm
# w+ = w + eps*dw`
with torch.no_grad():
for p, d in zip(self.net.weights(), dw):
p += eps * d
loss = self.net.loss(trn_X, trn_y)
dalpha_pos = torch.autograd.grad(loss, self.net.alphas()) # dalpha { L_trn(w+) }
# w- = w - eps*dw`
with torch.no_grad():
for p, d in zip(self.net.weights(), dw):
p -= 2. * eps * d
loss = self.net.loss(trn_X, trn_y)
dalpha_neg = torch.autograd.grad(loss, self.net.alphas()) # dalpha { L_trn(w-) }
# recover w
with torch.no_grad():
for p, d in zip(self.net.weights(), dw):
p += eps * d
hessian = [(p-n) / 2.*eps for p, n in zip(dalpha_pos, dalpha_neg)]
return hessian
def meta_learn(model, optimizer, input, target, input_val, target_val, coefficient_vector, visual_encoder):
'''Method to meta learn the visual encoder weights and coefficient vector r, we use the higher library to be
able to optimize through the validation loss because pytorch does not allow parameters to have grad_fn's
Calculates the weighted training loss and performs a weight update, then calculates the validation loss and makes
an update of the weights of the visual encoder and coefficient vector
V' <- V - eps * d L_{Val}/dV
r' <- r - gamma * d L_{Val}/dr
Args:
model: current network architecture model
optimizer: weight optimizer for model
input: training input of size (number of training images, channels, height, width)
target: training target of size (number train examples, 1)
input_val: validation input of size (number of validation images, channels, height, width)
target_val: validation target of size (number val examples, 1)
coefficient_vector: Tensor of size (number train examples, 1)
visual_encoder: Visual encoder neural network to calculate instance weights
eps: Float learning rate for visual encoder
gamma: Float learning rate for coefficient vector
'''
with torch.no_grad():
logits_val = model(input_val)
with torch.backends.cudnn.flags(enabled=False):
with higher.innerloop_ctx(model, optimizer, copy_initial_weights=False, track_higher_grads=True,
device='cuda') as (fmodel, foptimizer):
# functional version of model allows gradient propagation through parameters of a model
logits = fmodel(input)
weights = calc_instance_weights(input, target, input_val, target_val, logits_val, coefficient_vector, visual_encoder)
loss = F.cross_entropy(logits, target, reduction='none')
weighted_training_loss = torch.mean(weights * loss)
foptimizer.step(weighted_training_loss) # replaces gradients with respect to model weights -> w2
logits_val = fmodel(input_val)
meta_val_loss = F.cross_entropy(logits_val, target_val)
meta_val_loss.backward()
#coeff_vector_gradients = torch.autograd.grad(meta_val_loss, coefficient_vector, retain_graph=True)
#coeff_vector_gradients = coeff_vector_gradients[0].detach()
#visual_encoder_gradients = torch.autograd.grad(meta_val_loss, visual_encoder.parameters())
#visual_encoder_gradients = (visual_encoder_gradients[0].detach(), visual_encoder_gradients[1].detach())# equivalent to backward for given parameters
logits.detach()
weighted_training_loss.detach()
del logits, meta_val_loss, foptimizer, fmodel, weighted_training_loss, logits_val, weights,
gc.collect()
torch.cuda.empty_cache()
#return visual_encoder_gradients, coeff_vector_gradients
def meta_learn_new(model, input, target, input_val, target_val, coefficient_vector, visual_encoder, config):
device = 'cpu'
with torch.no_grad():
logits_val = model(input_val).to(device)
model = model.to(device)
input = input.to(device)
target = target.to(device)
input_val = input_val.to(device)
target_val = target_val.to(device)
coefficient_vector = torch.nn.Parameter(torch.tensor(coefficient_vector, requires_grad=True).to(device))
visual_encoder = visual_encoder.to(device)
optimizer = torch.optim.SGD(model.parameters(), config.w_lr, momentum=config.w_momentum,
weight_decay=config.w_weight_decay)
visual_encoder_optimizer = torch.optim.Adam(visual_encoder.parameters(), betas=(0.5, 0.999),
weight_decay=config.alpha_weight_decay)
coeff_vector_optimizer = torch.optim.Adam([coefficient_vector], betas=(0.5, 0.999),
weight_decay=config.alpha_weight_decay)
visual_encoder_optimizer.zero_grad()
coeff_vector_optimizer.zero_grad()
with torch.backends.cudnn.flags(enabled=False):
with higher.innerloop_ctx(model, optimizer, copy_initial_weights=True, track_higher_grads=True,
device=device) as (fmodel, foptimizer):
# functional version of model allows gradient propagation through parameters of a model
logits = fmodel(input)
weights = calc_instance_weights(input, target, input_val, target_val, logits_val, coefficient_vector,
visual_encoder)
loss = F.cross_entropy(logits, target, reduction='none')
weighted_training_loss = torch.mean(weights * loss)
foptimizer.step(weighted_training_loss) # replaces gradients with respect to model weights -> w2
logits_val = fmodel(input_val)
meta_val_loss = F.cross_entropy(logits_val, target_val)
meta_val_loss.backward()
visual_encoder_optimizer.step()
coeff_vector_optimizer.step()
logits.detach()
meta_val_loss.detach()
loss.detach()
weighted_training_loss.detach()
del logits, meta_val_loss, foptimizer, fmodel, weighted_training_loss, logits_val, weights,
gc.collect()
torch.cuda.empty_cache()
return coefficient_vector, visual_encoder.state_dict()
def update_gradients(visual_encoder_gradients, coeff_vector_gradients, visual_encoder, coefficient_vector):
# Update the visual encoder weights
with torch.no_grad():
for p, grad in zip(visual_encoder.parameters(), visual_encoder_gradients):
if p.grad is not None:
p.grad.data += grad.detach()
else:
p.grad = grad.detach()
# Update the coefficient vector
for p, grad in zip(coefficient_vector, coeff_vector_gradients):
if p.grad is not None:
p.grad += grad.detach()
else:
p.grad = grad.detach()
```
|
{
"source": "jessicamegane/pge",
"score": 3
}
|
#### File: pge/core/logger.py
```python
from core.parameters import params
import json
import os
def evolution_progress(gen, population, best, gram):
""" Function that saves the grammar and population of the current generation and run. """
data = '%d,%s,%f,%f' %(gen,"".join(best['phenotype']), best['fitness'], best['tst_error'])
if params['VERBOSE']:
print(data)
save_progress_to_file(data)
to_save = []
to_save.append({"grammar": gram.get_dict()})
if params['SAVE_POP']:
for index, ind in enumerate(population):
to_save.append({"genotype": ind['genotype'],
"fenotype": ind['phenotype'],
"fitness": ind['fitness'],
"id": index
})
folder = params['PATH'] + 'last_' + str(params['RUN'])
if not os.path.exists(folder):
os.makedirs(folder, exist_ok=True)
with open('%s/generation_%d.json' % (folder,(gen)), 'w') as f:
f.write(json.dumps(to_save))
def save_progress_to_file(data):
# save info of best individual overall
with open(params['PATH'] + "data.txt", "a") as f:
f.write(data + '\n')
f.close()
def save_parameters():
folder = params['PATH']
params_lower = dict((k.lower(), v) for k, v in params.items())
c = json.dumps(params_lower)
if not os.path.exists(folder):
os.makedirs(folder, exist_ok=True)
open('%sparameters.json' % (folder), 'a').write(c)
```
#### File: pge/core/parameters.py
```python
import argparse
import yaml
'''
This was adapted from PonyGE2: https://github.com/PonyGE/PonyGE2
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. PonyGE2: Grammatical Evolution in Python. arXiv preprint, arXiv:1703.08535, 2017.
'''
""""Algorithm Parameters"""
params = {
'PARAMETERS': None,
'RUN': 0,
'POP_SIZE': 1000,
'GENERATIONS': 100,
'ELITISM': 100,
'T_SIZE': 3,
'PROB_MUTATION': 0.05,
'PROB_CROSSOVER': 0.9,
'NR_CUTS': 1,
'SIZE_GENOTYPE': 128,
'SIZE_CODON': 255,
'MAX_WRAPS': 1,
'PGE': True,
'LEARNING_FACTOR': 0.01,
'ADAPTIVE': False,
'ADAPTIVE_INCREMENT': 0.001,
'SEED': 3601294368,
'SAVE_POP': False,
'GRAMMAR': 'grammars/5bitparity.bnf',
'EXPERIMENT_NAME': 'test_pge',
'PATH': None,
'VERBOSE': False
}
def load_parameters(file_name=None):
with open(file_name, 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
params.update(cfg)
def set_parameters(arguments):
# Initialise parser
parser = argparse.ArgumentParser(
usage=argparse.SUPPRESS,
description="Probabilistic Grammatical Evolution code",
)
parser.add_argument('--parameters',
dest='PARAMETERS',
type=str,
help='Specifies the parameters file to be used. Must '
'include the full file extension. Full file path '
'does NOT need to be specified.')
parser.add_argument('--popsize',
dest='POP_SIZE',
type=int,
help='Specifies the population size.')
parser.add_argument('--generations',
dest='GENERATIONS',
type=float,
help='Specifies the total number of generations.')
parser.add_argument('--elitism',
dest='ELITISM',
type=int,
help='Specifies the total number of individuals that should survive in each generation.')
parser.add_argument('--seed',
dest='SEED',
type=int,
help='Specifies the seed to be used by the random number generator.')
parser.add_argument('--prob_crossover',
dest='PROB_CROSSOVER',
type=float,
help='Specifies the probability of crossover usage. Float required.')
parser.add_argument('--prob_mutation',
dest='PROB_MUTATION',
type=float,
help='Specifies the probability of mutation usage. Float required.')
parser.add_argument('--t_size',
dest='T_SIZE',
type=int,
help='Specifies the tournament size for parent selection.')
parser.add_argument('--nr_cuts',
dest='NR_CUTS',
type=int,
help='Specifies the number of cuts to do in the genotype during crossover.')
parser.add_argument('--size_genotype',
dest='SIZE_GENOTYPE',
type=int,
help='Specifies number of codons in the genotype.')
parser.add_argument('--size_codon',
dest='SIZE_CODON',
type=int,
help='Specifies the higher value that each codon of standard GE can be.'
'The codon will be a random number in the interval [0,SIZE_CODON].')
parser.add_argument('--max_wraps',
dest='MAX_WRAPS',
type=int,
help='Specifies the maximum number of wraps.')
parser.add_argument('--grammar',
dest='GRAMMAR',
type=str,
help='Specifies the path to the grammar file.')
parser.add_argument('--experiment_name',
dest='EXPERIMENT_NAME',
type=str,
help='Specifies the name of the folder where stats are going to be stored.')
parser.add_argument('--verbose',
dest='VERBOSE',
type=bool,
help='Turns on the verbose output of the program')
parser.add_argument('--run',
dest='RUN',
type=int,
help='Specifies the run number.')
parser.add_argument('--pge',
dest='PGE',
type=bool,
help='Specifies if it is to run PGE or the standard GE. '
'Boolean required. True: run PGE, False: run GE.')
parser.add_argument('--learning_factor',
dest='LEARNING_FACTOR',
type=float,
help='Specifies the value of the learning factor used to update the probabilities. '
'Float Required.')
parser.add_argument('--adaptive',
dest='ADAPTIVE',
type=bool,
help='Specifies if it is supposed to run the adaptive version of PGE.')
parser.add_argument('--adaptive_increment',
dest='ADAPTIVE_INCREMENT',
type=float,
help='Specifies the value used to add to the learning factor each generation. '
'Float Required.')
# Parse command line arguments using all above information.
args, _ = parser.parse_known_args(arguments)
# All default args in the parser are set to "None".
cmd_args = {key: value for key, value in vars(args).items() if value is
not None}
# Set "None" values correctly.
for key in sorted(cmd_args.keys()):
# Check all specified arguments.
if type(cmd_args[key]) == str and cmd_args[key].lower() == "none":
cmd_args[key] = None
if 'PARAMETERS' in cmd_args:
load_parameters(cmd_args['PARAMETERS'])
params.update(cmd_args)
# Default path
params['PATH'] = params['EXPERIMENT_NAME'] + "/" + str(params['LEARNING_FACTOR']) + "/"
```
#### File: pge/examples/StringMatch.py
```python
class StringMatch():
"""Fitness function for matching a string. Takes a string and
returns fitness. Penalises output that is not the same length as
the target. Usage: StringMatch("golden") returns a *callable
object*, ie the fitness function."""
# maximise = False
def __init__(self, target = "pge"):
self.target = target
self.invalid_fitness = 100000
def __call__(self, phenotype):
fitness = max(len(self.target), len(phenotype))
# Loops as long as the shorter of two strings
for (t_p, g_p) in zip(self.target, phenotype):
if t_p == g_p:
fitness -= 1
else:
# Imperfect match, find ASCII distance to match.
fitness -= 1 / (1 + (abs(ord(t_p) - ord(g_p))))
return fitness, -1
if __name__ == "__main__":
import core
eval_func = StringMatch()
core.evolutionary_algorithm(evaluation_function=eval_func)
```
|
{
"source": "jessicamizzi/khmer",
"score": 2
}
|
#### File: khmer/sandbox/collect-variants.py
```python
import sys
import screed
import os
import khmer
from khmer.khmer_args import build_counting_args
DEFAULT_NORMALIZE_LIMIT = 20
def main():
parser = build_counting_args()
parser.add_argument("-t", "--trusted-cutoff", dest="trusted_cutoff",
type=int, default=3)
parser.add_argument("--bits-theta", help="Tuning parameter controlling"
"trade off of speed vs alignment sensitivity",
default=1.0, type=float, dest="bits_theta")
parser.add_argument('--normalize-to', '-Z', type=int, dest='normalize_to',
help='base cutoff on abundance',
default=DEFAULT_NORMALIZE_LIMIT)
parser.add_argument('-s', '--savehash', dest='savehash', default='')
parser.add_argument('-l', '--loadhash', dest='loadhash',
default='')
parser.add_argument('--details-out', dest="details_out")
parser.add_argument('input_filenames', nargs='+')
args = parser.parse_args()
if not args.quiet:
print >>sys.stderr, '\nPARAMETERS:'
print >>sys.stderr, ' - kmer size = %d \t\t(-k)' % args.ksize
print >>sys.stderr, ' - n hashes = %d \t\t(-N)' % args.n_tables
print >>sys.stderr, ' - min hashsize = %-5.2g \t(-x)' % \
args.min_tablesize
print >>sys.stderr, ''
print >>sys.stderr, 'Estimated memory usage is %.2g bytes ' \
'(n_hashes x min_hashsize)' % \
(args.n_tables * args.min_tablesize)
print >>sys.stderr, '-' * 8
K = args.ksize
HT_SIZE = args.min_tablesize
N_HT = args.n_tables
filenames = args.input_filenames
if args.loadhash:
print 'loading hashtable from', args.loadhash
ht = khmer.load_counting_hash(args.loadhash)
else:
print 'making hashtable'
ht = khmer.new_counting_hash(K, HT_SIZE, N_HT)
aligner = khmer.ReadAligner(ht, args.trusted_cutoff, args.bits_theta)
if args.details_out is not None:
details_out = open(args.details_out, "w")
else:
details_out = None
total = 0
discarded = 0
for input_filename in filenames:
output_name = os.path.basename(input_filename) + '.keepvar'
outfp = open(output_name, 'w')
for n, record in enumerate(screed.open(input_filename)):
if n > 0 and n % 10000 == 0:
print '... kept', total - discarded, 'of', total, ', or', \
int(100. - discarded / float(total) * 100.), '%'
print '... in file', input_filename
total += 1
if len(record.sequence) < K:
continue
seq = record.sequence.upper().replace('N', 'A')
##
# build the alignment...
score, graph_alignment, read_alignment, truncated = \
aligner.align(record.sequence)
# next, decide whether or to keep it.
keep = False
if truncated:
keep = True # keep all truncated alignments - why?
else:
# build a better sequence -- this is the corrected one.
graph_seq = graph_alignment.replace("-", "")
# OR?
#graph_seq = ""
#for i in range(len(graph_alignment)):
# if graph_alignment[i] == "-":
# graph_seq += read_alignment[i]
# else:
# graph_seq += graph_alignment[i]
# get the minimum count for this new sequence
mincount = ht.get_min_count(graph_seq)
if mincount < args.normalize_to:
keep = True
if details_out is not None:
details_out.write(
"+{7}\t{0:0.2f}\t{3}\t{4}\nread: "
"{6}\ngraph_aln: {1}\nread_aln: {2}\nstored_seq:{5}\n"
"".format(
score, graph_alignment, read_alignment, truncated,
keep, seq, record.sequence, record.name))
if keep:
ht.consume(seq)
outfp.write('>%s\n%s\n' % (record.name, record.sequence))
else:
discarded += 1
if total:
print 'DONE with', input_filename, \
'; kept', total - discarded, 'of', total, 'or', \
int(100. - discarded / float(total) * 100.), '%'
print 'output in', output_name
if args.savehash:
print 'Saving hashfile through', input_filename
print '...saving to', args.savehash
ht.save(args.savehash)
# Change 0.2 only if you really grok it. HINT: You don't.
fp_rate = khmer.calc_expected_collisions(ht)
print 'fp rate estimated to be %1.3f' % fp_rate
if fp_rate > 0.20:
print >>sys.stderr, "**"
print >>sys.stderr, "** ERROR: the counting hash is too small for"
print >>sys.stderr, "** this data set. Increase hashsize/num ht."
print >>sys.stderr, "**"
print >>sys.stderr, "** Do not use these results!!"
sys.exit(-1)
if __name__ == '__main__':
main()
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
```
#### File: khmer/sandbox/fasta-to-abundance-hist.py
```python
import sys
import khmer
def main():
files = sys.argv[2:]
total_reads = len(files) * [0]
n_consumed = len(files) * [0]
n_seq_kept = len(files) * [0]
print 'loading ht'
ht = khmer.new_counting_hash(1, 1, 1)
ht.load(sys.argv[1])
for i, infile in enumerate(files):
print 'outputting', infile + '.freq'
ht.output_fasta_kmer_pos_freq(infile, infile + ".freq")
if __name__ == '__main__':
main()
```
#### File: khmer/sandbox/hi-lo-abundance-by-position.py
```python
import sys
import os
import khmer
def write_dist(dist, fp):
for n, i in enumerate(dist):
fp.write('%d %d\n' % (n, i))
def main():
hashfile = sys.argv[1]
filename = sys.argv[2]
outfile = os.path.basename(filename)
print 'loading kh file', hashfile
ht = khmer.load_counting_hash(hashfile)
x = ht.fasta_count_kmers_by_position(filename, 100, 1)
write_dist(x, open(outfile + '.pos.abund=1', 'w'))
print 'wrote', outfile + '.pos.abund=1'
y = ht.fasta_count_kmers_by_position(filename, 100, 255)
write_dist(y, open(outfile + '.pos.abund=255', 'w'))
print 'wrote', outfile + '.pos.abund=255'
if __name__ == '__main__':
main()
```
#### File: khmer/scripts/sample-reads-randomly.py
```python
import argparse
import screed
import os.path
import random
import textwrap
import sys
import khmer
from khmer.kfile import check_file_status, check_space
from khmer.khmer_args import info
from khmer.utils import write_record
DEFAULT_NUM_READS = int(1e5)
DEFAULT_MAX_READS = int(1e8)
DEBUG = True
def get_parser():
epilog = ("""
Take a list of files containing sequences, and subsample 100,000
sequences (:option:`-N`/:option:`--num_reads`) uniformly, using
reservoir sampling. Stop after first 100m sequences
(:option:`-M`/:option:`--max_reads`). By default take one subsample,
but take :option:`-S`/:option:`--samples` samples if specified.
The output is placed in :option:`-o`/:option:`--output` <file>
(for a single sample) or in <file>.subset.0 to <file>.subset.S-1
(for more than one sample).
This script uses the `reservoir sampling
<http://en.wikipedia.org/wiki/Reservoir_sampling>`__ algorithm.
""") # noqa
parser = argparse.ArgumentParser(
description="Uniformly subsample sequences from a collection of files",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog=textwrap.dedent(epilog))
parser.add_argument('filenames', nargs='+')
parser.add_argument('-N', '--num_reads', type=int, dest='num_reads',
default=DEFAULT_NUM_READS)
parser.add_argument('-M', '--max_reads', type=int, dest='max_reads',
default=DEFAULT_MAX_READS)
parser.add_argument('-S', '--samples', type=int, dest='num_samples',
default=1)
parser.add_argument('-R', '--random-seed', type=int, dest='random_seed')
parser.add_argument('-o', '--output', dest='output_file',
metavar='output_file',
type=argparse.FileType('w'), default=None)
parser.add_argument('--version', action='version', version='%(prog)s '
+ khmer.__version__)
parser.add_argument('-f', '--force', default=False, action='store_true',
help='Overwrite output file if it exits')
return parser
def main():
info('sample-reads-randomly.py')
args = get_parser().parse_args()
for _ in args.filenames:
check_file_status(_, args.force)
check_space(args.filenames, args.force)
# seed the random number generator?
if args.random_seed:
random.seed(args.random_seed)
# bound n_samples
num_samples = max(args.num_samples, 1)
#
# Figure out what the output filename is going to be
#
output_file = args.output_file
if output_file:
if num_samples > 1:
sys.stderr.write(
"Error: cannot specify -o with more than one sample.")
if not args.force:
sys.exit(1)
output_filename = output_file.name
else:
filename = args.filenames[0]
output_filename = os.path.basename(filename) + '.subset'
if num_samples == 1:
print >>sys.stderr, 'Subsampling %d reads using reservoir sampling.' %\
args.num_reads
print >>sys.stderr, 'Subsampled reads will be placed in %s' % \
output_filename
print >>sys.stderr, ''
else: # > 1
print >>sys.stderr, 'Subsampling %d reads, %d times,' \
% (args.num_reads, num_samples), ' using reservoir sampling.'
print >>sys.stderr, 'Subsampled reads will be placed in %s.N' \
% output_filename
print >>sys.stderr, ''
reads = []
for n in range(num_samples):
reads.append([])
total = 0
# read through all the sequences and load/resample the reservoir
for filename in args.filenames:
print >>sys.stderr, 'opening', filename, 'for reading'
for record in screed.open(filename, parse_description=False):
total += 1
if total % 10000 == 0:
print >>sys.stderr, '...', total, 'reads scanned'
if total >= args.max_reads:
print >>sys.stderr, 'reached upper limit of %d reads' % \
args.max_reads, '(see -M); exiting'
break
# collect first N reads
if total <= args.num_reads:
for n in range(num_samples):
reads[n].append(record)
else:
# use reservoir sampling to replace reads at random
# see http://en.wikipedia.org/wiki/Reservoir_sampling
for n in range(num_samples):
guess = random.randint(1, total)
if guess <= args.num_reads:
reads[n][guess - 1] = record
# output all the subsampled reads:
if len(reads) == 1:
print >>sys.stderr, 'Writing %d sequences to %s' % \
(len(reads[0]), output_filename)
if not output_file:
output_file = open(output_filename, 'w')
for record in reads[0]:
write_record(record, output_file)
else:
for n in range(num_samples):
n_filename = output_filename + '.%d' % n
print >>sys.stderr, 'Writing %d sequences to %s' % \
(len(reads[n]), n_filename)
output_file = open(n_filename, 'w')
for record in reads[n]:
write_record(record, output_file)
if __name__ == '__main__':
main()
```
#### File: khmer/scripts/split-paired-reads.py
```python
import screed
import sys
import os
import textwrap
import argparse
import khmer
from khmer.kfile import check_file_status, check_space
from khmer.khmer_args import info
from khmer.utils import (write_record, check_is_left, check_is_right,
broken_paired_reader)
def get_parser():
epilog = """
Some programs want paired-end read input in the One True Format, which is
interleaved; other programs want input in the Insanely Bad Format, with
left- and right- reads separated. This reformats the former to the latter.
The directory into which the left- and right- reads are output may be
specified using :option:`-o`/:option:`--output-dir`. This directory will be
created if it does not already exist.
Alternatively, you can specify the filenames directly with
:option:`-1`/:option:`--output-first` and
:option:`-2`/:option:`--output-second`, which will override the
:option:`-o`/:option:`--output-dir` setting on a file-specific basis.
:option:`-p`/:option:`--force-paired` will require the input file to
be properly interleaved; by default, this is not required.
Example::
split-paired-reads.py tests/test-data/paired.fq
Example::
split-paired-reads.py -o ~/reads-go-here tests/test-data/paired.fq
Example::
split-paired-reads.py -1 reads.1 -2 reads.2 tests/test-data/paired.fq
"""
parser = argparse.ArgumentParser(
description='Split interleaved reads into two files, left and right.',
epilog=textwrap.dedent(epilog),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('infile')
parser.add_argument('-o', '--output-dir', metavar="output_directory",
dest='output_directory', default='', help='Output '
'split reads to specified directory. Creates '
'directory if necessary')
parser.add_argument('-1', '--output-first', metavar='output_first',
default=None, help='Output "left" reads to this '
'file')
parser.add_argument('-2', '--output-second', metavar='output_second',
default=None, help='Output "right" reads to this '
'file')
parser.add_argument('-p', '--force-paired', action='store_true',
help='Require that reads be interleaved')
parser.add_argument('--version', action='version', version='%(prog)s '
+ khmer.__version__)
parser.add_argument('-f', '--force', default=False, action='store_true',
help='Overwrite output file if it exists')
return parser
def main():
info('split-paired-reads.py')
args = get_parser().parse_args()
infile = args.infile
check_file_status(infile, args.force)
filenames = [infile]
check_space(filenames, args.force)
# decide where to put output files - specific directory? or just default?
if args.output_directory:
if not os.path.exists(args.output_directory):
os.makedirs(args.output_directory)
out1 = args.output_directory + '/' + os.path.basename(infile) + '.1'
out2 = args.output_directory + '/' + os.path.basename(infile) + '.2'
else:
out1 = os.path.basename(infile) + '.1'
out2 = os.path.basename(infile) + '.2'
# OVERRIDE output file locations with -1, -2
if args.output_first:
out1 = args.output_first
if args.output_second:
out2 = args.output_second
fp_out1 = open(out1, 'w')
fp_out2 = open(out2, 'w')
counter1 = 0
counter2 = 0
index = None
screed_iter = screed.open(infile, parse_description=False)
# walk through all the reads in broken-paired mode.
for index, is_pair, record1, record2 in broken_paired_reader(screed_iter):
if index % 100000 == 0 and index:
print >> sys.stderr, '...', index
# are we requiring pairs?
if args.force_paired and not is_pair:
print >>sys.stderr, 'ERROR, %s is not part of a pair' % \
record1.name
sys.exit(1)
if is_pair:
write_record(record1, fp_out1)
counter1 += 1
write_record(record2, fp_out2)
counter2 += 1
else:
name = record1.name
if check_is_left(name):
write_record(record1, fp_out1)
counter1 += 1
elif check_is_right(name):
write_record(record1, fp_out2)
counter2 += 1
else:
print >>sys.stderr, \
"Unrecognized format for read pair information: %s" % name
print >>sys.stderr, "Exiting."
sys.exit(1)
print >> sys.stderr, "DONE; split %d sequences (%d left, %d right)" % \
(counter1 + counter2, counter1, counter2)
print >> sys.stderr, "/1 reads in %s" % out1
print >> sys.stderr, "/2 reads in %s" % out2
if __name__ == '__main__':
main()
```
#### File: khmer/tests/test_hll.py
```python
import math
import string
import khmer
from screed.fasta import fasta_iter
import khmer_tst_utils as utils
from nose.tools import assert_raises
TT = string.maketrans('ACGT', 'TGCA')
K = 20 # size of kmer
ERR_RATE = 0.01
N_UNIQUE = 3960
def teardown():
utils.cleanup()
def test_hll_add_python():
# test python code to count unique kmers using HyperLogLog.
# use the lower level add() method, which accepts anything,
# and compare to an exact count using collections.Counter
filename = utils.get_test_data('random-20-a.fa')
hllcpp = khmer.HLLCounter(ERR_RATE, K)
counter = set()
for n, record in enumerate(fasta_iter(open(filename))):
sequence = record['sequence']
seq_len = len(sequence)
for n in range(0, seq_len + 1 - K):
kmer = sequence[n:n + K]
rc = kmer[::-1].translate(TT)
hllcpp.add(kmer)
if rc in counter:
kmer = rc
counter.update([kmer])
n_unique = len(counter)
assert n_unique == N_UNIQUE
assert abs(1 - float(hllcpp.estimate_cardinality()) / N_UNIQUE) < ERR_RATE
def test_hll_consume_string():
# test c++ code to count unique kmers using HyperLogLog,
# using screed to feed each read to the counter.
filename = utils.get_test_data('random-20-a.fa')
hllcpp = khmer.HLLCounter(ERR_RATE, K)
for n, record in enumerate(fasta_iter(open(filename))):
hllcpp.consume_string(record['sequence'])
assert abs(1 - float(hllcpp.estimate_cardinality()) / N_UNIQUE) < ERR_RATE
def test_hll_empty_fasta():
filename = utils.get_test_data('test-empty.fa')
hll = khmer.HLLCounter(ERR_RATE, K)
with assert_raises(IOError):
hll.consume_fasta(filename)
def test_hll_consume_fasta():
# test c++ code to count unique kmers using HyperLogLog
filename = utils.get_test_data('random-20-a.fa')
hllcpp = khmer.HLLCounter(ERR_RATE, K)
hllcpp.consume_fasta(filename)
assert abs(1 - float(hllcpp.estimate_cardinality()) / N_UNIQUE) < ERR_RATE
def test_hll_consume_fasta_ep():
# During estimation trigger the _Ep() method,
# we need all internal counters values to be different than zero for this.
filename = utils.get_test_data('paired-mixed.fa')
hll = khmer.HLLCounter(0.36, 32)
hll.consume_fasta(filename)
assert all(c != 0 for c in hll.counters)
assert len(hll) == 236
def test_hll_consume_fasta_estimate_bias():
# During estimation trigger the estimate_bias method,
# we need all internal counters values to be different than zero for this,
# and also the cardinality should be small (if it is large we fall on the
# default case).
filename = utils.get_test_data("test-abund-read-3.fa")
hll = khmer.HLLCounter(0.36, K)
hll.consume_fasta(filename)
assert all(c != 0 for c in hll.counters)
assert len(hll) == 79
def test_hll_len():
filename = utils.get_test_data('random-20-a.fa')
hllcpp = khmer.HLLCounter(ERR_RATE, K)
hllcpp.consume_fasta(filename)
assert hllcpp.estimate_cardinality() == len(hllcpp)
def test_hll_empty():
hllcpp = khmer.HLLCounter(ERR_RATE, K)
assert len(hllcpp) == 0
def test_hll_readonly_alpha():
hllcpp = khmer.HLLCounter(ERR_RATE, K)
with assert_raises(AttributeError):
hllcpp.alpha = 5
def test_hll_cover_calc_alpha():
hllcpp = khmer.HLLCounter(0.36, K)
counters = hllcpp.counters
assert hllcpp.alpha == 0.673
assert len(counters) == 2 ** 4
hllcpp = khmer.HLLCounter(0.21, K)
counters = hllcpp.counters
assert hllcpp.alpha == 0.697
assert len(counters) == 2 ** 5
hllcpp = khmer.HLLCounter(0.16, K)
counters = hllcpp.counters
assert hllcpp.alpha == 0.709
assert len(counters) == 2 ** 6
def test_hll_invalid_base():
# this test should raise a ValueError,
# since there are invalid bases in read.
hllcpp = khmer.HLLCounter(ERR_RATE, 5)
with assert_raises(ValueError):
hllcpp.consume_string("ACGTTTCGNAATNNNNN")
def test_hll_invalid_error_rate():
# test if error_rate is a valid value
with assert_raises(ValueError):
hllcpp = khmer.HLLCounter(-0.01, K)
def test_hll_invalid_error_rate_max():
# test if error_rate is a valid value
with assert_raises(ValueError):
hllcpp = khmer.HLLCounter(0.367696, K)
def test_hll_error_rate_max():
# test if error_rate is a valid value
hllcpp = khmer.HLLCounter(0.367695, K)
assert len(hllcpp.counters) == 2 ** 4
def test_hll_invalid_error_rate_min():
# test if error_rate is a valid value
with assert_raises(ValueError):
hllcpp = khmer.HLLCounter(0.0040624, K)
def test_hll_error_rate_min():
# test if error_rate is a valid value
hllcpp = khmer.HLLCounter(0.0040625, K)
assert len(hllcpp.counters) == 2 ** 16
def test_hll_change_error_rate():
hllcpp = khmer.HLLCounter(0.0040625, K)
assert hllcpp.error_rate == 0.0040625
# error rate is discrete, what we test here is if an error rate of 1%
# rounds to the appropriate value
hllcpp.error_rate = 0.01
assert hllcpp.error_rate == 0.008125
with assert_raises(TypeError):
del hllcpp.error_rate
with assert_raises(TypeError):
hllcpp.error_rate = 5
with assert_raises(ValueError):
hllcpp.error_rate = 2.5
with assert_raises(ValueError):
hllcpp.error_rate = -10.
# error rate can only be changed prior to first counting,
hllcpp.consume_string('AAACCACTTGTGCATGTCAGTGCAGTCAGT')
with assert_raises(AttributeError):
hllcpp.error_rate = 0.3
def test_hll_change_ksize():
hllcpp = khmer.HLLCounter(0.0040625, K)
assert hllcpp.ksize == K
hllcpp.ksize = 24
assert hllcpp.ksize == 24
hllcpp.ksize = 12L
assert hllcpp.ksize == 12
with assert_raises(ValueError):
hllcpp.ksize = -20
with assert_raises(TypeError):
del hllcpp.ksize
with assert_raises(TypeError):
hllcpp.ksize = 33.4
# error rate can only be changed prior to first counting,
hllcpp.consume_string('AAACCACTTGTGCATGTCAGTGCAGTCAGT')
with assert_raises(AttributeError):
hllcpp.ksize = 30
def test_hll_get_counters():
hll = khmer.HLLCounter(0.36, K)
counters = hll.counters
assert len(counters) == 2 ** 4
assert all(c == 0 for c in counters)
```
#### File: khmer/tests/test_labelhash.py
```python
import khmer
from khmer import LabelHash
from screed.fasta import fasta_iter
import screed
import khmer_tst_utils as utils
from nose.plugins.attrib import attr
def teardown():
utils.cleanup()
#
# @camillescott TODO: more tests!
# * thread-safety
@attr('linux')
def test_toobig():
try:
lh = LabelHash(20, 1e13, 1)
assert 0, "This should fail."
except MemoryError as err:
print str(err)
def test_n_labels():
lh = LabelHash(20, 1e7, 4)
filename = utils.get_test_data('test-labels.fa')
lh.consume_fasta_and_tag_with_labels(filename)
print lh.n_labels()
assert lh.n_labels() == 4
def test_get_label_dict():
lb = LabelHash(20, 1e7, 4)
filename = utils.get_test_data('test-labels.fa')
lb.consume_fasta_and_tag_with_labels(filename)
labels = lb.get_label_dict()
expected = [0, 1, 2, 3]
for e_label in expected:
assert e_label in labels
for a_label in labels:
assert a_label in expected
def test_get_tag_labels():
lb = LabelHash(20, 1e7, 4)
filename = utils.get_test_data('single-read.fq')
lb.consume_fasta_and_tag_with_labels(filename)
tag = 173473779682
labels = lb.get_tag_labels(tag)
assert len(labels) == 1
assert labels.pop() == 0
def test_consume_fasta_and_tag_with_labels():
lb = LabelHash(20, 1e7, 4)
read_1 = 'ACGTAACCGGTTAAACCCGGGTTTAAAACCCCGGGGTTTT'
filename = utils.get_test_data('test-transcript.fa')
total_reads, n_consumed = lb.consume_fasta_and_tag_with_labels(filename)
print "doing get"
assert lb.get(read_1[:20])
assert total_reads == 3
print "doing n_labels"
print lb.n_labels()
print "doing label dict"
print lb.get_label_dict()
print "get tagset"
for tag in lb.get_tagset():
print "forward hash"
print tag, khmer.forward_hash(tag, 20)
for record in screed.open(filename):
print "Sweeping tags"
print lb.sweep_tag_neighborhood(record.sequence, 40)
print "Sweeping labels..."
print lb.sweep_label_neighborhood(record.sequence, 40)
assert lb.n_labels() == 3
def test_consume_partitioned_fasta_and_tag_with_labels():
lb = LabelHash(20, 1e7, 4)
filename = utils.get_test_data('real-partition-small.fa')
total_reads, n_consumed = lb.consume_partitioned_fasta_and_tag_with_labels(
filename)
labels = set()
for record in screed.open(filename):
seq = record.sequence
labels.update(lb.sweep_label_neighborhood(seq, 0, False, False))
# print lb.n_labels()
# print labels
assert len(labels) == 1
assert labels.pop() == 2
assert lb.n_labels() == 1
def test_consume_sequence_and_tag_with_labels():
lb = LabelHash(20, 1e6, 4)
label = 0
sequence = 'ATGCATCGATCGATCGATCGATCGATCGATCGATCGATCG'
n_consumed = lb.consume_sequence_and_tag_with_labels(sequence, label)
labels = set()
labels.update(lb.sweep_label_neighborhood(sequence))
assert label in labels
assert len(labels) == 1
def test_sweep_tag_neighborhood():
lb = LabelHash(20, 1e7, 4)
filename = utils.get_test_data('single-read.fq')
lb.consume_fasta_and_tag(filename)
tags = lb.sweep_tag_neighborhood('CAGGCGCCCACCACCGTGCCCTCCAACCTGATGGT')
assert len(tags) == 1
assert tags.pop() == 173473779682
def test_sweep_label_neighborhood():
lb = LabelHash(20, 1e7, 4)
filename = utils.get_test_data('single-read.fq')
lb.consume_fasta_and_tag_with_labels(filename)
labels = lb.sweep_label_neighborhood('CAGGCGCCCACCACCGTGCCCTCCAACCTGATGGT')
assert len(labels) == 1
assert labels.pop() == 0
'''
* The test data set as four reads: A, B, C, and D
* Overlaps are A <-> B <-> C, with D on its own
* Thus, traversing from A should find labels from A and B,
traversing from B should find labels from A, B, and C,
and traversing from C should find labels from B and C
'''
def test_label_tag_correctness():
lb = LabelHash(20, 1e7, 4)
filename = utils.get_test_data('test-labels.fa')
lb.consume_fasta_and_tag_with_labels(filename)
# read A
labels = lb.sweep_label_neighborhood(
'ATCGTGTAAGCTATCGTAATCGTAAGCTCTGCCTAGAGCTAGGCTAGGCTCTGCCTAGAG'
'CTAGGCTAGGTGTGCTCTGCCTAGAGCTAGGCTAGGTGT')
print lb.sweep_tag_neighborhood(
'TTCGTGTAAGCTATCGTAATCGTAAGCTCTGCCTAGAGCTAGGCTAGGCTCTGCCTAGAG'
'CTAGGCTAGGTGTGCTCTGCTAGAGCTAGGCTAGGTGT')
print labels
print len('ATCGTGTAAGCTATCGTAATCGTAAGCTCTGCCTAGAGCTAGGCTAG') - 19
assert len(labels) == 2
assert 0 in labels
assert 1 in labels
# read B
labels = lb.sweep_label_neighborhood(
'GCGTAATCGTAAGCTCTGCCTAGAGCTAGGCTAGCTCTGCCTAGAGCTAGGCTAGGTGTTGGGGATAG'
'ATAGATAGATGACCTAGAGCTAGGCTAGGTGTTGGGGATAGATAGATAGATGA')
print labels
assert len(labels) == 3
assert 0 in labels
assert 1 in labels
assert 2 in labels
# read C
labels = lb.sweep_label_neighborhood(
'TGGGATAGATAGATAGATGACCTAGAGCTAGGCTAGGTGTTGGGGATAGATAGATAGATGACCTAGAG'
'CTAGGCTAGGTGTTGGGGATAGATAGATAGATGAGTTGGGGATAGATAGATAGATGAGTGTAGATCCA'
'ACAACACATACA')
print labels
assert len(labels) == 2
assert 1 in labels
assert 2 in labels
# read D
labels = lb.sweep_label_neighborhood(
'TATATATATAGCTAGCTAGCTAACTAGCTAGCATCGATCGATCGATC')
print labels
assert len(labels) == 1
assert 3 in labels
#
# Begin Hashbits tests
#
def test__get_set_tag_density():
ht = khmer.LabelHash(32, 1, 1)
orig = ht._get_tag_density()
assert orig != 2
ht._set_tag_density(2)
assert ht._get_tag_density() == 2
def test_n_occupied_1():
filename = utils.get_test_data('random-20-a.fa')
K = 20 # size of kmer
HT_SIZE = 100000 # size of hashtable
N_HT = 1 # number of hashtables
# test modified c++ n_occupied code
ht1 = khmer.LabelHash(K, HT_SIZE, N_HT)
for n, record in enumerate(fasta_iter(open(filename))):
ht1.consume(record['sequence'])
# this number calculated independently
assert ht1.n_occupied() == 3877
def test_bloom_python_1():
# test python code to count unique kmers using bloom filter
filename = utils.get_test_data('random-20-a.fa')
K = 20 # size of kmer
HT_SIZE = 100000 # size of hashtable
N_HT = 3 # number of hashtables
ht2 = khmer.LabelHash(K, HT_SIZE, N_HT)
n_unique = 0
for n, record in enumerate(fasta_iter(open(filename))):
sequence = record['sequence']
seq_len = len(sequence)
for n in range(0, seq_len + 1 - K):
kmer = sequence[n:n + K]
if (not ht2.get(kmer)):
n_unique += 1
ht2.count(kmer)
assert n_unique == 3960
assert ht2.n_occupied() == 3882
assert ht2.n_unique_kmers() == 3960 # this number equals to n_unique
def test_bloom_c_1():
# test c++ code to count unique kmers using bloom filter
filename = utils.get_test_data('random-20-a.fa')
K = 20 # size of kmer
HT_SIZE = 100000 # size of hashtable
N_HT = 3 # number of hashtables
ht3 = khmer.LabelHash(K, HT_SIZE, N_HT)
for n, record in enumerate(fasta_iter(open(filename))):
ht3.consume(record['sequence'])
assert ht3.n_occupied() == 3882
assert ht3.n_unique_kmers() == 3960
def test_n_occupied_2(): # simple one
K = 4
HT_SIZE = 10 # use 11
N_HT = 1
ht1 = khmer.LabelHash(K, HT_SIZE, N_HT)
ht1.count('AAAA') # 00 00 00 00 = 0
assert ht1.n_occupied() == 1
ht1.count('ACTG') # 00 10 01 11 =
assert ht1.n_occupied() == 2
ht1.count('AACG') # 00 00 10 11 = 11 # collision 1
assert ht1.n_occupied() == 2
ht1.count('AGAC') # 00 11 00 10 # collision 2
assert ht1.n_occupied() == 2
def test_bloom_c_2(): # simple one
K = 4
HT_SIZE = 10 # use 11
N_HT1 = 1 # hashtable size = 11
N_HT2 = 2 # hashtable size = 11,13
# use only 1 hashtable, no bloom filter
ht1 = khmer.LabelHash(K, HT_SIZE, N_HT1)
ht1.count('AAAA') # 00 00 00 00 = 0
ht1.count('ACTG') # 00 10 01 11 =
assert ht1.n_unique_kmers() == 2
ht1.count('AACG') # 00 00 10 11 = 11 # collision with 1st kmer
assert ht1.n_unique_kmers() == 2
ht1.count('AGAC') # 00 11 00 10 # collision with 2nd kmer
assert ht1.n_unique_kmers() == 2
# use two hashtables with 11,13
ht2 = khmer.LabelHash(K, HT_SIZE, N_HT2)
ht2.count('AAAA') # 00 00 00 00 = 0
ht2.count('ACTG') # 00 10 01 11 = 2*16 +4 +3 = 39
assert ht2.n_unique_kmers() == 2
ht2.count('AACG') # 00 00 10 11 = 11 # collision with only 1st kmer
assert ht2.n_unique_kmers() == 3
ht2.count('AGAC') # 00 11 00 10 3*16 +2 = 50
# collision with both 2nd and 3rd kmers
assert ht2.n_unique_kmers() == 3
def test_filter_if_present():
ht = khmer.LabelHash(32, 1e6, 2)
maskfile = utils.get_test_data('filter-test-A.fa')
inputfile = utils.get_test_data('filter-test-B.fa')
outfile = utils.get_temp_filename('filter')
ht.consume_fasta(maskfile)
ht.filter_if_present(inputfile, outfile)
records = list(fasta_iter(open(outfile)))
assert len(records) == 1
assert records[0]['name'] == '3'
def test_combine_pe():
inpfile = utils.get_test_data('combine_parts_1.fa')
ht = khmer.LabelHash(32, 1, 1)
ht.consume_partitioned_fasta(inpfile)
assert ht.count_partitions() == (2, 0)
s1 = "CATGCAGAAGTTCCGCAACCATACCGTTCAGT"
pid1 = ht.get_partition_id(s1)
s2 = "CAAATGTACATGCACTTAAAATCATCCAGCCG"
pid2 = ht.get_partition_id(s2)
assert pid1 == 2
assert pid2 == 80293
ht.join_partitions(pid1, pid2)
pid1 = ht.get_partition_id(s1)
pid2 = ht.get_partition_id(s2)
assert pid1 == pid2
assert ht.count_partitions() == (1, 0)
def test_load_partitioned():
inpfile = utils.get_test_data('combine_parts_1.fa')
ht = khmer.LabelHash(32, 1, 1)
ht.consume_partitioned_fasta(inpfile)
assert ht.count_partitions() == (2, 0)
s1 = "CATGCAGAAGTTCCGCAACCATACCGTTCAGT"
assert ht.get(s1)
s2 = "CAAATGTACATGCACTTAAAATCATCCAGCCG"
assert ht.get(s2)
s3 = "CATGCAGAAGTTCCGCAACCATACCGTTCAGTTCCTGGTGGCTA"[-32:]
assert ht.get(s3)
def test_count_within_radius_simple():
inpfile = utils.get_test_data('all-A.fa')
ht = khmer.LabelHash(4, 1e6, 2)
print ht.consume_fasta(inpfile)
n = ht.count_kmers_within_radius('AAAA', 1)
assert n == 1
n = ht.count_kmers_within_radius('AAAA', 10)
assert n == 1
def test_count_within_radius_big():
inpfile = utils.get_test_data('random-20-a.fa')
ht = khmer.LabelHash(20, 1e6, 4)
ht.consume_fasta(inpfile)
n = ht.count_kmers_within_radius('CGCAGGCTGGATTCTAGAGG', int(1e6))
assert n == 3960
ht = khmer.LabelHash(21, 1e6, 4)
ht.consume_fasta(inpfile)
n = ht.count_kmers_within_radius('CGCAGGCTGGATTCTAGAGGC', int(1e6))
assert n == 39
def test_count_kmer_degree():
inpfile = utils.get_test_data('all-A.fa')
ht = khmer.LabelHash(4, 1e6, 2)
ht.consume_fasta(inpfile)
assert ht.kmer_degree('AAAA') == 2
assert ht.kmer_degree('AAAT') == 1
assert ht.kmer_degree('AATA') == 0
assert ht.kmer_degree('TAAA') == 1
def test_save_load_tagset():
ht = khmer.LabelHash(32, 1, 1)
outfile = utils.get_temp_filename('tagset')
ht.add_tag('A' * 32)
ht.save_tagset(outfile)
ht.add_tag('G' * 32)
ht.load_tagset(outfile) # implicitly => clear_tags=True
ht.save_tagset(outfile)
# if tags have been cleared, then the new tagfile will be larger (34 bytes)
# else smaller (26 bytes).
fp = open(outfile, 'rb')
data = fp.read()
fp.close()
assert len(data) == 26, len(data)
def test_save_load_tagset_noclear():
ht = khmer.LabelHash(32, 1, 1)
outfile = utils.get_temp_filename('tagset')
ht.add_tag('A' * 32)
ht.save_tagset(outfile)
ht.add_tag('G' * 32)
ht.load_tagset(outfile, False) # set clear_tags => False; zero tags
ht.save_tagset(outfile)
# if tags have been cleared, then the new tagfile will be large (34 bytes);
# else small (26 bytes).
fp = open(outfile, 'rb')
data = fp.read()
fp.close()
assert len(data) == 34, len(data)
def test_stop_traverse():
filename = utils.get_test_data('random-20-a.fa')
K = 20 # size of kmer
HT_SIZE = 100000 # size of hashtable
N_HT = 3 # number of hashtables
ht = khmer.LabelHash(K, HT_SIZE, N_HT)
# without tagging/joining across consume, this breaks into two partition;
# with, it is one partition.
ht.add_stop_tag('TTGCATACGTTGAGCCAGCG')
ht.consume_fasta_and_tag(filename) # DO NOT join reads across stoptags
subset = ht.do_subset_partition(0, 0, True)
ht.merge_subset(subset)
n, _ = ht.count_partitions()
assert n == 2, n
def test_tag_across_stoptraverse():
filename = utils.get_test_data('random-20-a.fa')
K = 20 # size of kmer
HT_SIZE = 100000 # size of hashtable
N_HT = 3 # number of hashtables
ht = khmer.LabelHash(K, HT_SIZE, N_HT)
# without tagging/joining across consume, this breaks into two partition;
# with, it is one partition.
ht.add_stop_tag('CCGAATATATAACAGCGACG')
ht.consume_fasta_and_tag_with_stoptags(filename) # DO join reads across
subset = ht.do_subset_partition(0, 0)
n, _ = ht.count_partitions()
assert n == 99 # reads only connected by traversal...
n, _ = ht.subset_count_partitions(subset)
assert n == 2 # but need main to cross stoptags.
ht.merge_subset(subset)
n, _ = ht.count_partitions() # ta-da!
assert n == 1, n
def test_notag_across_stoptraverse():
filename = utils.get_test_data('random-20-a.fa')
K = 20 # size of kmer
HT_SIZE = 100000 # size of hashtable
N_HT = 3 # number of hashtables
ht = khmer.LabelHash(K, HT_SIZE, N_HT)
# connecting k-mer at the beginning/end of a read: breaks up into two.
ht.add_stop_tag('TTGCATACGTTGAGCCAGCG')
ht.consume_fasta_and_tag_with_stoptags(filename)
subset = ht.do_subset_partition(0, 0)
ht.merge_subset(subset)
n, _ = ht.count_partitions()
assert n == 2, n
def test_find_stoptags():
ht = khmer.LabelHash(5, 1, 1)
ht.add_stop_tag("AAAAA")
assert ht.identify_stoptags_by_position("AAAAA") == [0]
assert ht.identify_stoptags_by_position("AAAAAA") == [0, 1]
assert ht.identify_stoptags_by_position("TTTTT") == [0]
assert ht.identify_stoptags_by_position("TTTTTT") == [0, 1]
def test_find_stoptags2():
ht = khmer.LabelHash(4, 1, 1)
ht.add_stop_tag("ATGC")
x = ht.identify_stoptags_by_position("ATGCATGCGCAT")
assert x == [0, 2, 4, 8], x
def test_get_ksize():
kh = khmer.LabelHash(22, 1, 1)
assert kh.ksize() == 22
def test_get_hashsizes():
kh = khmer.LabelHash(22, 100, 4)
assert kh.hashsizes() == [101, 103, 107, 109], kh.hashsizes()
def test_extract_unique_paths_0():
kh = khmer.LabelHash(10, 1e5, 4)
x = kh.extract_unique_paths('ATGGAGAGACACAGATAGACAGGAGTGGCGATG', 10, 1)
assert x == ['ATGGAGAGACACAGATAGACAGGAGTGGCGATG']
kh.consume('ATGGAGAGACACAGATAGACAGGAGTGGCGATG')
x = kh.extract_unique_paths('ATGGAGAGACACAGATAGACAGGAGTGGCGATG', 10, 1)
assert not x
def test_extract_unique_paths_1():
kh = khmer.LabelHash(10, 1e5, 4)
kh.consume('AGTGGCGATG')
x = kh.extract_unique_paths('ATGGAGAGACACAGATAGACAGGAGTGGCGATG', 10, 1)
print x
assert x == ['ATGGAGAGACACAGATAGACAGGAGTGGCGAT'] # all but the last k-mer
def test_extract_unique_paths_2():
kh = khmer.LabelHash(10, 1e5, 4)
kh.consume('ATGGAGAGAC')
x = kh.extract_unique_paths('ATGGAGAGACACAGATAGACAGGAGTGGCGATG', 10, 1)
print x
assert x == ['TGGAGAGACACAGATAGACAGGAGTGGCGATG'] # all but the 1st k-mer
def test_extract_unique_paths_3():
kh = khmer.LabelHash(10, 1e5, 4)
kh.consume('ATGGAGAGAC')
kh.consume('AGTGGCGATG')
x = kh.extract_unique_paths('ATGGAGAGACACAGATAGACAGGAGTGGCGATG', 10, 1)
print x
# all but the 1st/last k-mer
assert x == ['TGGAGAGACACAGATAGACAGGAGTGGCGAT']
def test_extract_unique_paths_4():
kh = khmer.LabelHash(10, 1e5, 4)
kh.consume('ATGGAGAGAC')
kh.consume('AGTGGCGATG')
kh.consume('ATAGACAGGA')
x = kh.extract_unique_paths('ATGGAGAGACACAGATAGACAGGAGTGGCGATG', 10, 1)
print x
assert x == ['TGGAGAGACACAGATAGACAGG', 'TAGACAGGAGTGGCGAT']
def test_find_unpart():
filename = utils.get_test_data('random-20-a.odd.fa')
filename2 = utils.get_test_data('random-20-a.even.fa')
K = 20 # size of kmer
HT_SIZE = 100000 # size of hashtable
N_HT = 3 # number of hashtables
ht = khmer.LabelHash(K, HT_SIZE, N_HT)
ht.consume_fasta_and_tag(filename)
subset = ht.do_subset_partition(0, 0)
ht.merge_subset(subset)
n, _ = ht.count_partitions()
assert n == 49
ht.find_unpart(filename2, True, False)
n, _ = ht.count_partitions()
assert n == 1, n # all sequences connect
def test_find_unpart_notraverse():
filename = utils.get_test_data('random-20-a.odd.fa')
filename2 = utils.get_test_data('random-20-a.even.fa')
K = 20 # size of kmer
HT_SIZE = 100000 # size of hashtable
N_HT = 3 # number of hashtables
ht = khmer.LabelHash(K, HT_SIZE, N_HT)
ht.consume_fasta_and_tag(filename)
subset = ht.do_subset_partition(0, 0)
ht.merge_subset(subset)
n, _ = ht.count_partitions()
assert n == 49
ht.find_unpart(filename2, False, False) # <-- don't traverse
n, _ = ht.count_partitions()
assert n == 99, n # all sequences disconnected
def test_find_unpart_fail():
filename = utils.get_test_data('random-20-a.odd.fa')
filename2 = utils.get_test_data('random-20-a.odd.fa') # <- switch to odd
K = 20 # size of kmer
HT_SIZE = 100000 # size of hashtable
N_HT = 3 # number of hashtables
ht = khmer.LabelHash(K, HT_SIZE, N_HT)
ht.consume_fasta_and_tag(filename)
subset = ht.do_subset_partition(0, 0)
ht.merge_subset(subset)
n, _ = ht.count_partitions()
assert n == 49
ht.find_unpart(filename2, True, False)
n, _ = ht.count_partitions()
assert n == 49, n # only 49 sequences worth of tags
def test_simple_median():
hi = khmer.LabelHash(6, 1e6, 2)
(median, average, stddev) = hi.get_median_count("AAAAAA")
print median, average, stddev
assert median == 0
assert average == 0.0
assert stddev == 0.0
hi.consume("AAAAAA")
(median, average, stddev) = hi.get_median_count("AAAAAA")
print median, average, stddev
assert median == 1
assert average == 1.0
assert stddev == 0.0
def test_bad_primes():
try:
hi = khmer._LabelHash.__new__(khmer.LabelHash, 6, ["a", "b", "c"])
assert 0, "Non number prime list should fail"
except TypeError as e:
print str(e)
```
|
{
"source": "Jessicanini/Dinosaur",
"score": 3
}
|
#### File: Jessicanini/Dinosaur/Dino1.py
```python
import numpy as np
from PIL import Image
import cv2 #opencv
import io
import time
import pandas as pd
import numpy as np
from IPython.display import clear_output
from random import randint
import os
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
#keras imports
from keras.models import model_from_json
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.optimizers import SGD , Adam
from keras.callbacks import TensorBoard
from collections import deque
import random
import pickle
from io import BytesIO
import base64
import json
#path variables
game_url = "chrome://dino"
chrome_driver_path = "../chromedriver"
loss_file_path = "./objects/loss_df.csv"
actions_file_path = "./objects/actions_df.csv"
q_value_file_path = "./objects/q_values.csv"
scores_file_path = "./objects/scores_df.csv"
#scripts
#create id for canvas for faster selection from DOM
init_script = "document.getElementsByClassName('runner-canvas')[0].id = 'runner-canvas'"
#get image from canvas
getbase64Script = "canvasRunner = document.getElementById('runner-canvas'); \
return canvasRunner.toDataURL().substring(22)"
#=============#
'''
* Game class: Selenium interfacing between the python and browser
* __init__(): Launch the broswer window using the attributes in chrome_options
* get_crashed() : return true if the agent as crashed on an obstacles. Gets javascript variable from game decribing the state
* get_playing(): true if game in progress, false is crashed or paused
* restart() : sends a signal to browser-javascript to restart the game
* press_up(): sends a single to press up get to the browser
* get_score(): gets current game score from javascript variables.
* pause(): pause the game
* resume(): resume a paused game if not crashed
* end(): close the browser and end the game
'''
class Game:
def __init__(self,custom_config=True):
chrome_options = Options()
chrome_options.add_argument("disable-infobars")
chrome_options.add_argument("--mute-audio")
self._driver = webdriver.Chrome(executable_path = chrome_driver_path,chrome_options=chrome_options)
self._driver.set_window_position(x=-10,y=0)
self._driver.get('chrome://dino')
self._driver.execute_script("Runner.config.ACCELERATION=0")
self._driver.execute_script(init_script)
def get_crashed(self):
return self._driver.execute_script("return Runner.instance_.crashed")
def get_playing(self):
return self._driver.execute_script("return Runner.instance_.playing")
def restart(self):
self._driver.execute_script("Runner.instance_.restart()")
def press_up(self):
self._driver.find_element_by_tag_name("body").send_keys(Keys.ARROW_UP)
def get_score(self):
score_array = self._driver.execute_script("return Runner.instance_.distanceMeter.digits")
score = ''.join(score_array) # the javascript object is of type array with score in the formate[1,0,0] which is 100.
return int(score)
def pause(self):
return self._driver.execute_script("return Runner.instance_.stop()")
def resume(self):
return self._driver.execute_script("return Runner.instance_.play()")
def end(self):
self._driver.close()
#==========#
class DinoAgent:
def __init__(self,game): #takes game as input for taking actions
self._game = game;
self.jump(); #to start the game, we need to jump once
def is_running(self):
return self._game.get_playing()
def is_crashed(self):
return self._game.get_crashed()
def jump(self):
self._game.press_up()
def duck(self):
self._game.press_down()
#==========#
class Game_sate:
def __init__(self,agent,game):
self._agent = agent
self._game = game
self._display = show_img() #display the processed image on screen using openCV, implemented using python coroutine
self._display.__next__() # initiliaze the display coroutine
def get_state(self,actions):
actions_df.loc[len(actions_df)] = actions[1] # storing actions in a dataframe
score = self._game.get_score()
reward = 0.1
is_over = False #game over
if actions[1] == 1:
self._agent.jump()
image = grab_screen(self._game._driver)
self._display.send(image) #display the image on screen
if self._agent.is_crashed():
scores_df.loc[len(loss_df)] = score # log the score when game is over
self._game.restart()
reward = -1
is_over = True
return image, reward, is_over #return the Experience tuple
#==========#
def save_obj(obj, name ):
with open('objects/'+ name + '.pkl', 'wb') as f: #dump files into objects folder
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open('objects/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def grab_screen(_driver):
image_b64 = _driver.execute_script(getbase64Script)
screen = np.array(Image.open(BytesIO(base64.b64decode(image_b64))))
image = process_img(screen)#processing image as required
return image
def process_img(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #RGB to Grey Scale
image = image[:300, :500] #Crop Region of Interest(ROI)
image = cv2.resize(image, (80,80))
return image
def show_img(graphs = False):
"""
Show images in new window
"""
while True:
screen = (yield)
window_title = "logs" if graphs else "game_play"
cv2.namedWindow(window_title, cv2.WINDOW_NORMAL)
imS = cv2.resize(screen, (800, 400))
cv2.imshow(window_title, screen)
if (cv2.waitKey(1) & 0xFF == ord('q')):
cv2.destroyAllWindows()
break
#
#Intialize log structures from file if exists else create new
loss_df = pd.read_csv(loss_file_path) if os.path.isfile(loss_file_path) else pd.DataFrame(columns =['loss'])
scores_df = pd.read_csv(scores_file_path) if os.path.isfile(loss_file_path) else pd.DataFrame(columns = ['scores'])
actions_df = pd.read_csv(actions_file_path) if os.path.isfile(actions_file_path) else pd.DataFrame(columns = ['actions'])
q_values_df =pd.read_csv(actions_file_path) if os.path.isfile(q_value_file_path) else pd.DataFrame(columns = ['qvalues'])
#game parameters
ACTIONS = 2 # possible actions: jump, do nothing
GAMMA = 0.99 # decay rate of past observations original 0.99
OBSERVATION = 100. # timesteps to observe before training
EXPLORE = 100000 # frames over which to anneal epsilon
FINAL_EPSILON = 0.0001 # final value of epsilon
INITIAL_EPSILON = 0.1 # starting value of epsilon
REPLAY_MEMORY = 50000 # number of previous transitions to remember
BATCH = 16 # size of minibatch
FRAME_PER_ACTION = 1
LEARNING_RATE = 1e-4
img_rows , img_cols = 80,80
img_channels = 4 #We stack 4 frames
# training variables saved as checkpoints to filesystem to resume training from the same step
def init_cache():
"""initial variable caching, done only once"""
save_obj(INITIAL_EPSILON,"epsilon")
t = 0
save_obj(t,"time")
D = deque()
save_obj(D,"D")
'''Call only once to init file structure
'''
#init_cache()
def buildmodel():
print("Now we build the model")
model = Sequential()
model.add(Conv2D(32, (8, 8), padding='same',strides=(4, 4),input_shape=(img_cols,img_rows,img_channels))) #80*80*4
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Activation('relu'))
model.add(Conv2D(64, (4, 4),strides=(2, 2), padding='same'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3),strides=(1, 1), padding='same'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(ACTIONS))
adam = Adam(lr=LEARNING_RATE)
model.compile(loss='mse',optimizer=adam)
#create model file if not present
if not os.path.isfile(loss_file_path):
model.save_weights('model.h5')
print("We finish building the model")
return model
'''
main training module
Parameters:
* model => Keras Model to be trained
* game_state => Game State module with access to game environment and dino
* observe => flag to indicate wherther the model is to be trained(weight updates), else just play
'''
def trainNetwork(model,game_state,observe=False):
last_time = time.time()
# store the previous observations in replay memory
D = load_obj("D") #load from file system
# get the first state by doing nothing
do_nothing = np.zeros(ACTIONS)
do_nothing[0] =1 #0 => do nothing,
#1=> jump
x_t, r_0, terminal = game_state.get_state(do_nothing) # get next step after performing the action
s_t = np.stack((x_t, x_t, x_t, x_t), axis=2) # stack 4 images to create placeholder input
s_t = s_t.reshape(1, s_t.shape[0], s_t.shape[1], s_t.shape[2]) #1*20*40*4
initial_state = s_t
if observe :
OBSERVE = 999999999 #We keep observe, never train
epsilon = FINAL_EPSILON
print ("Now we load weight")
model.load_weights("model.h5")
adam = Adam(lr=LEARNING_RATE)
model.compile(loss='mse',optimizer=adam)
print ("Weight load successfully")
else: #We go to training mode
OBSERVE = OBSERVATION
epsilon = load_obj("epsilon")
model.load_weights("model.h5")
adam = Adam(lr=LEARNING_RATE)
model.compile(loss='mse',optimizer=adam)
t = load_obj("time") # resume from the previous time step stored in file system
while (True): #endless running
loss = 0
Q_sa = 0
action_index = 0
r_t = 0 #reward at 4
a_t = np.zeros([ACTIONS]) # action at t
#choose an action epsilon greedy
if t % FRAME_PER_ACTION == 0: #parameter to skip frames for actions
if random.random() <= epsilon: #randomly explore an action
print("----------Random Action----------")
action_index = random.randrange(ACTIONS)
a_t[action_index] = 1
else: # predict the output
q = model.predict(s_t) #input a stack of 4 images, get the prediction
max_Q = np.argmax(q) # chosing index with maximum q value
action_index = max_Q
a_t[action_index] = 1 # o=> do nothing, 1=> jump
#We reduced the epsilon (exploration parameter) gradually
if epsilon > FINAL_EPSILON and t > OBSERVE:
epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE
#run the selected action and observed next state and reward
x_t1, r_t, terminal = game_state.get_state(a_t)
print('fps: {0}'.format(1 / (time.time()-last_time))) # helpful for measuring frame rate
last_time = time.time()
x_t1 = x_t1.reshape(1, x_t1.shape[0], x_t1.shape[1], 1) #1x20x40x1
s_t1 = np.append(x_t1, s_t[:, :, :, :3], axis=3) # append the new image to input stack and remove the first one
# store the transition in D
D.append((s_t, action_index, r_t, s_t1, terminal))
if len(D) > REPLAY_MEMORY:
D.popleft()
#only train if done observing
if t > OBSERVE:
#sample a minibatch to train on
minibatch = random.sample(D, BATCH)
inputs = np.zeros((BATCH, s_t.shape[1], s_t.shape[2], s_t.shape[3])) #32, 20, 40, 4
targets = np.zeros((inputs.shape[0], ACTIONS)) #32, 2
#Now we do the experience replay
for i in range(0, len(minibatch)):
state_t = minibatch[i][0] # 4D stack of images
action_t = minibatch[i][1] #This is action index
reward_t = minibatch[i][2] #reward at state_t due to action_t
state_t1 = minibatch[i][3] #next state
terminal = minibatch[i][4] #wheather the agent died or survided due the action
inputs[i:i + 1] = state_t
targets[i] = model.predict(state_t) # predicted q values
Q_sa = model.predict(state_t1) #predict q values for next step
if terminal:
targets[i, action_t] = reward_t # if terminated, only equals reward
else:
targets[i, action_t] = reward_t + GAMMA * np.max(Q_sa)
loss += model.train_on_batch(inputs, targets)
loss_df.loc[len(loss_df)] = loss
q_values_df.loc[len(q_values_df)] = np.max(Q_sa)
s_t = initial_state if terminal else s_t1 #reset game to initial frame if terminate
t = t + 1
# save progress every 1000 iterations
if t % 1000 == 0:
print("Now we save model")
game_state._game.pause() #pause game while saving to filesystem
model.save_weights("model.h5", overwrite=True)
save_obj(D,"D") #saving episodes
save_obj(t,"time") #caching time steps
save_obj(epsilon,"epsilon") #cache epsilon to avoid repeated randomness in actions
loss_df.to_csv("./objects/loss_df.csv",index=False)
scores_df.to_csv("./objects/scores_df.csv",index=False)
actions_df.to_csv("./objects/actions_df.csv",index=False)
q_values_df.to_csv(q_value_file_path,index=False)
with open("model.json", "w") as outfile:
json.dump(model.to_json(), outfile)
clear_output()
game_state._game.resume()
# print info
state = ""
if t <= OBSERVE:
state = "observe"
elif t > OBSERVE and t <= OBSERVE + EXPLORE:
state = "explore"
else:
state = "train"
print("TIMESTEP", t, "/ STATE", state, "/ EPSILON", epsilon, "/ ACTION", action_index, "/ REWARD", r_t, "/ Q_MAX " , np.max(Q_sa), "/ Loss ", loss)
print("Episode finished!")
print("************************")
#main function
def playGame(observe=False):
game = Game()
dino = DinoAgent(game)
game_state = Game_sate(dino,game)
model = buildmodel()
try:
trainNetwork(model,game_state,observe=observe)
except StopIteration:
game.end()
playGame(observe=False);
```
|
{
"source": "jessicant/practice",
"score": 3
}
|
#### File: LTPractice/BackEnd/joe.py
```python
from msilib.schema import ListBox
import tkinter
import tkinter.messagebox
root = tkinter.Tk()
root.title("To-Do List Practice by <NAME>")
def add_task():
task = entry_task.get()
if task != "":
listbox_tasks.insert(tkinter.END, task)
entry_task.delete(0, tkinter.END)
else:
tkinter.messagebox.showwarning(title="Warning!", message="Enter task")
def delete_task():
try:
task_index = listbox_tasks.curselection()[0]
listbox_tasks.delete(task_index)
except:
tkinter.messagebox.showwarning(title="Warning!", message="Select task")
def load_tasks():
pass
#create GUI
listbox_tasks = tkinter.Listbox(root, height=20, width=75)
listbox_tasks.pack()
entry_task = tkinter.Entry(root, width=50)
entry_task.pack()
button_add_task = tkinter.Button(root, text="Add task", width=48, command=add_task)
button_add_task.pack()
button_delete_task = tkinter.Button(root, text="Delete task", width=48, command=delete_task)
button_delete_task.pack()
root.mainloop()
```
|
{
"source": "jessica-ol/dbt-sugar",
"score": 2
}
|
#### File: core/task/base.py
```python
import abc
import os
import re
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
from dbt_sugar.core.clients.dbt import DbtProfile
from dbt_sugar.core.clients.yaml_helpers import open_yaml, save_yaml
from dbt_sugar.core.config.config import DbtSugarConfig
from dbt_sugar.core.connectors.postgres_connector import PostgresConnector
from dbt_sugar.core.connectors.redshift_connector import RedshiftConnector
from dbt_sugar.core.connectors.snowflake_connector import SnowflakeConnector
from dbt_sugar.core.flags import FlagParser
from dbt_sugar.core.logger import GLOBAL_LOGGER as logger
COLUMN_NOT_DOCUMENTED = "No description for this column."
MODEL_NOT_DOCUMENTED = "No description for this model."
DEFAULT_EXCLUDED_FOLDERS_PATTERN = r"\/target\/|\/dbt_modules\/"
DEFAULT_EXCLUDED_YML_FILES = r"dbt_project.yml|packages.yml"
DB_CONNECTORS = {
"postgres": PostgresConnector,
"snowflake": SnowflakeConnector,
"redshift": RedshiftConnector,
}
class BaseTask(abc.ABC):
"""Sets up basic API for task-like classes."""
def __init__(
self,
flags: FlagParser,
dbt_path: Path,
sugar_config: DbtSugarConfig,
dbt_profile: DbtProfile,
) -> None:
self.repository_path = dbt_path
self._sugar_config = sugar_config
self._flags = flags
self._dbt_profile = dbt_profile
# Populated by class methods
self._excluded_folders_from_search_pattern: str = self.setup_paths_exclusion()
self.all_dbt_models: Dict[str, Path] = {}
self.dbt_definitions: Dict[str, str] = {}
self.dbt_tests: Dict[str, List[Dict[str, Any]]] = {}
self.build_descriptions_dictionary()
def get_connector(self) -> Union[PostgresConnector, SnowflakeConnector, RedshiftConnector]:
dbt_credentials = self._dbt_profile.profile
connector = DB_CONNECTORS.get(dbt_credentials.get("type", ""))
if not connector:
raise NotImplementedError(
f"Connector '{dbt_credentials.get('type')}' is not implemented."
)
return connector(dbt_credentials)
def setup_paths_exclusion(self) -> str:
"""Appends excluded_folders to the default folder exclusion patten."""
if self._sugar_config.dbt_project_info["excluded_folders"]:
excluded_folders_from_search_pattern: str = r"\/|\/".join(
self._sugar_config.dbt_project_info["excluded_folders"]
)
return fr"{DEFAULT_EXCLUDED_FOLDERS_PATTERN}|\/{excluded_folders_from_search_pattern}\/"
else:
return DEFAULT_EXCLUDED_FOLDERS_PATTERN
def get_column_description_from_dbt_definitions(self, column_name: str) -> str:
"""Searches for the description of a column in all the descriptions in DBT.
Args:
column_name (str): column name to get the description from.
Returns:
str: with the description of the column.
"""
return self.dbt_definitions.get(column_name, COLUMN_NOT_DOCUMENTED)
def get_documented_columns(
self, schema_content: Dict[str, Any], model_name: str
) -> Dict[str, str]:
"""Method to get the documented columns from a model in a schema.yml.
Args:
content (Dict[str, Any]): content of the schema.yml.
model_name (str): model name to get the columns from.
Returns:
Dict[str, str]: with the columns names and descriptions documented.
"""
documented_columns = {}
for model in schema_content.get("models", []):
if model["name"] == model_name:
for column in model.get("columns", []):
if column.get("description", COLUMN_NOT_DOCUMENTED) != COLUMN_NOT_DOCUMENTED:
documented_columns[column["name"]] = column["description"]
return documented_columns
def column_has_primary_key_tests(
self, schema_content: Dict[str, Any], model_name: str, column_name: str
) -> Optional[bool]:
"""Method to check that the column with the primary key have the unique and not_null tests.
Args:
schema_content (Dict[str, Any]): content of the schema.yml.
model_name (str): model name to check.
column_name (str): column name with the primary key.
Returns:
Optional[bool]: True if the column have unique and not_null tests,
False if is missing one of them, None if the column don't exists.
"""
for model in schema_content.get("models", []):
if model["name"] == model_name:
for column in model.get("columns", []):
if column.get("name", "") == column_name:
column_tests = column.get("tests", [])
return "unique" in column_tests and "not_null" in column_tests
return None
def get_not_documented_columns(
self, schema_content: Dict[str, Any], model_name: str
) -> Dict[str, str]:
"""Method to get the undocumented columns from a model in a schema.yml.
Args:
schema_content (Dict[str, Any]): content of the schema.yml.
model_name (str): model name to get the columns from.
Returns:
Dict[str, str]: with the columns names and descriptions undocumented.
"""
not_documented_columns = {}
for model in schema_content.get("models", []):
if model["name"] == model_name:
for column in model.get("columns", []):
if column.get("description", COLUMN_NOT_DOCUMENTED) == COLUMN_NOT_DOCUMENTED:
not_documented_columns[column["name"]] = COLUMN_NOT_DOCUMENTED
return not_documented_columns
def combine_two_list_without_duplicates(self, list1: List[Any], list2: List[Any]) -> List[Any]:
"""
Method to combine two list without duplicates.
Args:
list1 (List[Any]): First list with any value.
list2 (List[Any]): Second list with any value.
Returns:
List[Any]: with the combine lists.
"""
if not list1:
return list2
for item in list1:
if item not in list2:
list2.append(item)
return list2
def update_model_description_test_tags(
self,
path_file: Path,
model_name: str,
dict_column_description_to_update: Dict[str, Dict[str, Any]],
):
"""
Method to update a schema.yml with a Dict of columns names, tests, and tags.
Args:
path_file (Path): Path of the schema.yml file to update.
model_name (str): Name of the model to update.
dict_column_description_to_update (Dict[str, Dict[str, Any]]): Dict with the column name with
the description, tags and tests to update.
"""
content = open_yaml(
path_file,
preserve_yaml_order=self._sugar_config.config.get("preserve_yaml_order", False),
)
for model in content.get("models", []):
if model["name"] == model_name:
for column in model.get("columns", []):
column_name = column["name"]
if column_name in dict_column_description_to_update:
# Update the description
description = dict_column_description_to_update[column_name].get(
"description"
)
if description:
column["description"] = description
# Update the tests without duplicating them.
tests = dict_column_description_to_update[column_name].get("tests")
if tests:
column["tests"] = self.combine_two_list_without_duplicates(
column.get("tests", []), tests
)
# Update the tags without duplicating them.
tags = dict_column_description_to_update[column_name].get("tags")
if tags:
column["tags"] = self.combine_two_list_without_duplicates(
column.get("tags", []), tags
)
save_yaml(
path_file,
content,
preserve_yaml_order=self._sugar_config.config.get("preserve_yaml_order", False),
)
def update_column_description_from_schema(
self, path_file: Path, dict_column_description_to_update: Dict[str, Dict[str, Any]]
) -> None:
"""Method to update a schema.yml with a Dict of columns names and description.
Args:
path_file (Path): Path to the schema.yml file to update the columns descriptions from.
dict_column_description_to_update (Dict[str, Dict[str, Any]]): Dict with the column name with
the description to update.
"""
content = open_yaml(
path_file,
preserve_yaml_order=self._sugar_config.config.get("preserve_yaml_order", False),
)
for model in content.get("models", []):
for column in model.get("columns", []):
column_name = column["name"]
if column_name in dict_column_description_to_update:
new_description = dict_column_description_to_update[column_name].get(
"description"
)
if new_description:
column["description"] = new_description
save_yaml(
path_file,
content,
preserve_yaml_order=self._sugar_config.config.get("preserve_yaml_order", False),
)
def update_column_descriptions(
self, dict_column_description_to_update: Dict[str, Dict[str, Any]]
) -> None:
"""Method to update all the schema.ymls from a dbt project with a Dict of columns names and description.
Args:
dict_column_description_to_update (Dict[str, Dict[str, Any]]): Dict with the column name with
the description to update.
"""
for root, _, files in os.walk(self.repository_path):
if not re.search(self._excluded_folders_from_search_pattern, root):
files = [
f
for f in files
if f.lower().endswith(".yml")
and not re.search(DEFAULT_EXCLUDED_YML_FILES, f.lower())
]
for file in files:
path_file = Path(os.path.join(root, file))
self.update_column_description_from_schema(
path_file, dict_column_description_to_update
)
def update_test_in_dbt_tests(self, model_name: str, column: Dict[str, Any]) -> None:
"""Update a column tests in the global tests dictionary.
Args:
model_name (str): with the model name.
column (Dict[str, Any]): column information.
"""
if model_name not in self.dbt_tests:
self.dbt_tests[model_name] = [
{"name": column["name"], "tests": column.get("tests", [])}
]
else:
self.dbt_tests[model_name].append(
{"name": column["name"], "tests": column.get("tests", [])}
)
def update_description_in_dbt_descriptions(
self, column_name: str, column_description: str
) -> None:
"""Update a column description in the global description dictionary.
Args:
column_name (str): column name to update.
column_description (str): column description to update.
"""
if not column_description:
column_description = COLUMN_NOT_DOCUMENTED
self.dbt_definitions[column_name] = column_description
def remove_excluded_models(self, content: Dict[str, Any]) -> Optional[List[Dict[str, Any]]]:
"""Removes models that are excluded_models from the models dict"""
models = content.get("models", [])
# if self._sugar_config.dbt_project_info.get("excluded_models"):
logger.debug(models)
if models:
return [
model_dict
for model_dict in models
if model_dict["name"] not in self._sugar_config.dbt_project_info["excluded_models"]
]
return None
def read_file(self, filename_path: Path) -> str:
"""
Method to read a file.
Args:
filename_path (Path): full path to the file we want to read.
Returns:
str: content of the file.
"""
content = ""
if Path(filename_path).exists():
with open(filename_path, "r") as reader:
content = reader.read()
return content
def load_descriptions_from_a_schema_file(
self, content: Dict[str, Any], path_schema: Path
) -> None:
"""Load the columns descriptions from a schema.yml into the global descriptions cache.
This cache is used so that we can homogenise descriptions across models and import
already documented ones.
Args:
content (Dict[str, Any]): content of the schema.yaml.
"""
if not content:
return
models = self.remove_excluded_models(content)
if not models:
return
for model in models:
self.all_dbt_models[model["name"]] = path_schema
for column in model.get("columns", []):
column_description = column.get("description", None)
self.update_description_in_dbt_descriptions(column["name"], column_description)
self.update_test_in_dbt_tests(model["name"], column)
def get_file_path_from_sql_model(self, model_name: str) -> Optional[Path]:
"""Get the complete file path from a model name.
Args:
model_name (str): with the model name to find.
Returns:
Optional[Path]: Path of the SQL file, None if the file doens't exists.
"""
for root, _, files in os.walk(self.repository_path):
if not re.search(self._excluded_folders_from_search_pattern, root):
for file_name in files:
file_name = file_name.lower()
if file_name == f"{model_name}.sql" and not re.search(
DEFAULT_EXCLUDED_YML_FILES, file_name
):
return Path(os.path.join(root, file_name))
return None
def build_descriptions_dictionary(self) -> None:
"""Load the columns descriptions from all schema files in a dbt project.
This is purely responsble for building the knowledge of all possible definitions.
In other words it is independent from the documentation orchestration.
This happens in the `doc` task
"""
for root, _, files in os.walk(self.repository_path):
if not re.search(self._excluded_folders_from_search_pattern, root):
files = [
f
for f in files
if f.lower().endswith(".yml")
and not re.search(DEFAULT_EXCLUDED_YML_FILES, f.lower())
]
for file in files:
path_file = Path(os.path.join(root, file))
content = open_yaml(
path_file,
preserve_yaml_order=self._sugar_config.config.get(
"preserve_yaml_order", False
),
)
logger.debug(path_file)
if content.get("models"):
self.load_descriptions_from_a_schema_file(content, path_file)
def is_model_in_schema_content(self, content, model_name) -> bool:
"""Method to check if a model exists in a schema.yaml content.
Args:
content (Dict[str, Any]): content of the schema.yaml.
model_name (str): model name to search.
Returns:
boolean: is true if the model is present in the schema.yaml.
"""
if not content:
return False
return any(model["name"] == model_name for model in content.get("models", []))
def find_model_schema_file(self, model_name: str) -> Tuple[Optional[Path], bool, bool]:
for root, _, files in os.walk(self.repository_path):
if not re.search(self._excluded_folders_from_search_pattern, root):
schema_file_path = None
model_file_found = False
schema_file_exists = False
is_already_documented = False
for file in files:
# check the model file exists and if it does return the path
# of the schema.yml it's in.
if file == f"{model_name}.sql":
model_file_found = True
logger.debug(f"Found sql file for '{model_name}'")
schema_file_path = self.all_dbt_models.get(model_name, None)
# if it's not in a schema file, then it's not documented and we
# need to create a schema.yml "dummy" to place it in.
if not schema_file_path and model_file_found:
logger.debug(
f"'{model_name}' was not contained in a schema file. Creating one at {root}"
)
schema_file_path = Path(os.path.join(root, "schema.yml"))
# check whether there is a schema file already present
schema_file_exists = False
if schema_file_path.exists():
schema_file_exists = True
return (schema_file_path, schema_file_exists, is_already_documented)
if schema_file_path and model_file_found:
logger.debug(
f"'{model_name}' found in '{schema_file_path}', we'll update entry."
)
is_already_documented = True
schema_file_exists = True
return (schema_file_path, schema_file_exists, is_already_documented)
return None, False, False
def is_exluded_model(self, model_name: str) -> bool:
if model_name in self._sugar_config.dbt_project_info.get("excluded_models", []):
raise ValueError(
f"You decided to exclude '{model_name}' from dbt-sugar's scope. "
f"You run `{self._flags.task}` on it you will need to remove "
"it from the excluded_models list in the sugar_config.yml"
)
return True
@abc.abstractmethod
def run(self) -> int:
"""Orchestrator method that calls all the needed stuff to run a documentation task."""
...
```
|
{
"source": "jessicapardo/Web-scraping-challenge",
"score": 3
}
|
#### File: Web-scraping-challenge/Missions_to_Mars/scrape_mars.py
```python
from splinter import Browser
from bs4 import BeautifulSoup as bs
from webdriver_manager.chrome import ChromeDriverManager
import pandas as pd
def init_browser():
executable_path = {"executable_path": ChromeDriverManager().install()}
return Browser("chrome", **executable_path, headless=False)
def scrape():
###########################################
## NASA Mars News Web Scraping
browser = init_browser()
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
# Scrape page into Soup
html = browser.html
soup = bs(html, "lxml")
# Finding the latest title and paragraph text
slide_element = soup.select_one("ul.item_list li.slide")
# Scrape the Latest News Title
news_title = slide_element.find("div", class_="content_title").text
# Scrape the Latest Paragraph Text
news_p = slide_element.find("div", class_="article_teaser_body").text
# Close the browser after scraping
browser.quit()
###########################################
## JPL Mars Space Image - Feured Image
# Link doesn't work - Instructor said to skip - Used the example url given
featured_image_url = 'https://mars.nasa.gov/system/downloadable_items/45346_PIA22743-2.jpg'
###########################################
## Mars Facts Web Scraping
url = 'https://space-facts.com/mars/'
# Use pandas to read the html table data
mars_facts = pd.read_html(url)
# Read the first dictionary in the list into a pandas dataframe and name columns
mars_facts_df = mars_facts[0]
mars_facts_df .columns = ['Parameter', 'Value']
# Convert the dataframe table into an html table
mars_facts_table = mars_facts_df.to_html(index=False)
mars_facts_table = mars_facts_table.replace('\n', '')
mars_facts_table = mars_facts_table.replace('right', 'left')
mars_facts_table = mars_facts_table.replace('dataframe', 'table table-striped table-bordered table-responsive-sm')
###########################################
## Mars Hemispheres Web Scraping
browser = init_browser()
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
# Scrape page into Soup
html = browser.html
soup = bs(html,"lxml")
# Get the html containing the titles and put into a list
title_list = soup.find_all('div', class_='description')
# Loop through
# Initiate the list to store dictionaries
hemisphere_image_urls = []
for title in title_list:
# Navigate browser to page then click on title link to hires image page
browser.visit(url)
browser.click_link_by_partial_text(title.a.h3.text)
# Get page html and make beautifulsoup object
html = browser.html
soup = bs(html,"lxml")
# Parse the image relative url then append to domain name
img_url_list = soup.find('img', class_='wide-image')
img_url = f"https://astrogeology.usgs.gov{img_url_list['src']}"
# Create dictionary for hemisphere_image_urls
post = {
'title': title.a.h3.text,
'img_url': img_url
}
hemisphere_image_urls.append(post)
# Close the browser after scraping
browser.quit()
###########################################
## Store data in a dictionary
mars_data = {
'news_title': news_title,
'news_paragraph': news_p,
'fact_table': mars_facts_table,
'hemisphere_image_urls': hemisphere_image_urls
}
print(mars_data)
return mars_data
```
|
{
"source": "jessicarush/review-app",
"score": 3
}
|
#### File: app/auth/email.py
```python
from flask import render_template, current_app
from app.main.email import send_email
def send_password_reset_email(user):
'''Sends an email with a link to reset password.'''
token = user.get_reset_password_token()
send_email(current_app.config['PROJECT_NAME'] + ': Password Reset',
sender=current_app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/reset_password.txt',
user=user, token=token),
html_body=render_template('email/reset_password.html',
user=user, token=token))
```
|
{
"source": "JessicaS11/icebath",
"score": 2
}
|
#### File: icebath/core/build_gdf.py
```python
import datetime as dt
import dask
import dask.array as da
import numpy as np
from geocube.api.core import make_geocube
import geopandas as gpd
import os
import pandas as pd
import rasterio
import rioxarray
from rioxarray.exceptions import NoDataInBounds
import scipy.stats as stats
from shapely.geometry import Polygon
import xarray as xr
import warnings
import itertools as it
from icebath.core import fjord_props
from icebath.core import fl_ice_calcs as icalcs
from icebath.utils import raster_ops
from icebath.utils import vector_ops
def xarray_to_gdf(ds):
"""
Takes an xarray DataSet and generates a geodataframe of icebergs from the DEMs
"""
berg_gdf = gpd.GeoDataFrame(data=None)
for num in range(0, len(ds['dtime'])):
temp_berg_df = gdf_of_bergs(ds.isel({'dtime':num}))
berg_gdf = berg_gdf.append(temp_berg_df, ignore_index=True)
# print(berg_gdf)
# print(len(berg_gdf))
try:
berg_gdf.crs = ds.attrs['crs']
berg_gdf.sl_adjust.attrs['note'] = "sea level adjustment is relative to tidal height, not 0msl"
berg_gdf.sl_adjust.attrs['units'] = "meters"
except AttributeError:
pass
return berg_gdf
def gdf_of_bergs(onedem, usedask=True):
"""
Takes an xarray dataarray for one time period and returns the needed geodataframe of icebergs
"""
try:
onedem.elevation.attrs['crs'] = onedem.attrs['crs']
except KeyError:
try:
onedem.elevation.attrs['proj4'] = onedem.attrs['proj4']
except KeyError:
print("Your input DEM does not have a CRS attribute")
# process the raster and polygonize the potential icebergs
poss_bergs = get_poss_bergs_fr_raster(onedem, usedask)
print(len(poss_bergs))
if len(poss_bergs) == 0:
return gpd.GeoDataFrame()
# Exclude icebergs that don't meet the requirements
bergs, elevs, sl_adjs = filter_pot_bergs(poss_bergs, onedem, usedask)
# delete generator object so no issues between DEMs
try:
del poss_bergs
except NameError:
pass
temp_berg_df = gpd.GeoDataFrame({"DEMarray":elevs, 'sl_adjust':sl_adjs, 'berg_poly':bergs}, geometry='berg_poly')
# look at DEM-wide sea level adjustments
# filter out "icebergs" that have sea level adjustments outside the median +/- two times the median absolute deviation
# sl_adj_med = np.nanmedian(temp_berg_df.sl_adjust)
# sl_adj_mad = stats.median_abs_deviation(temp_berg_df.sl_adjust, nan_policy='omit')
# print(sl_adj_med)
# print(sl_adj_mad)
# temp_berg_df = temp_berg_df[(temp_berg_df['sl_adjust'] > sl_adj_med - 2*sl_adj_mad) &
# (temp_berg_df['sl_adjust'] < sl_adj_med + 2*sl_adj_mad)]
# print(len(temp_berg_df))
# Description of above filter: For a given DEM, an individual iceberg's sea level adjustment
# (***more on that later***) must fall within the median +/- 2 mean absolute deviations of
# the sea level adjustments across that entire DEM. Otherwise the candidate iceberg is excluded
# because it is likely subject to DEM generation errors or not correctly adjusted due to a
# lack of nearby open water pixels in the DEM.
# add values that are same for all icebergs in DEM
names = ['fjord', 'date', 'tidal_ht_offset', 'tidal_ht_min', 'tidal_ht_max']
col_val = [onedem.attrs['fjord'], onedem['dtime'].values, onedem['tidal_corr'].values, onedem['min_tidal_ht'].values, onedem['max_tidal_ht'].values]
for name,val in (zip(names,col_val)):
temp_berg_df[name] = val
print("Generated geodataframe of icebergs for this image")
return temp_berg_df
def get_poss_bergs_fr_raster(onedem, usedask):
flipax=[]
if pd.Series(onedem.x).is_monotonic_decreasing:
flipax.append(1)
if pd.Series(onedem.y).is_monotonic_increasing:
flipax.append(0)
fjord = onedem.attrs['fjord']
min_area = fjord_props.get_min_berg_area(fjord)
res = onedem.attrs['res'][0] #Note: the pixel area will be inaccurate if the resolution is not the same in x and y
if usedask==True:
# Daskify the iceberg segmentation process. Note that dask-image has some functionality to operate
# directly on dask arrays (e.g. dask_image.ndfilters.sobel), which would need to be put into utils.raster.py
# https://dask-image.readthedocs.io/en/latest/dask_image.ndfilters.html
# However, as of yet there doesn't appear to be a way to easily implement the watershed segmentation, other than in chunks
# print(onedem)
# see else statement with non-dask version for descriptions of what each step is doing
def seg_wrapper(tiles):
return raster_ops.labeled_from_segmentation(tiles, markers=[3,10], resolution=res, min_area=min_area, flipax=[])
def filter_wrapper(tiles, elevs):
return raster_ops.border_filtering(tiles, elevs, flipax=[])
elev_copy = onedem.elevation.data # should return a dask array
for ax in flipax:
elev_copy = da.flip(elev_copy, axis=ax)
# import matplotlib.pyplot as plt
# print(plt.imshow(elev_copy))
try:
elev_overlap = da.overlap.overlap(elev_copy, depth=10, boundary='nearest')
except ValueError:
elev_copy = elev_copy.rechunk(onedem.chunks['x'][0]+1024)
elev_overlap = da.overlap.overlap(elev_copy, depth=10, boundary='nearest')
seglabeled_overlap = da.map_overlap(seg_wrapper, elev_overlap, trim=False) # including depth=10 here will ADD another overlap
labeled_overlap = da.map_overlap(filter_wrapper, seglabeled_overlap, elev_overlap, trim=False, dtype='int32')
print("Got labeled raster of potential icebergs for an image")
labeled_arr = da.overlap.trim_overlap(labeled_overlap, depth=10)
# re-flip the labeled_arr so it matches the orientation of the original elev data that's within the xarray
for ax in flipax:
labeled_arr = da.flip(labeled_arr, axis=ax)
# import matplotlib.pyplot as plt
# print(plt.imshow(labeled_arr))
try:
del elev_copy
del elev_overlap
del seglabeled_overlap
del labeled_overlap
except NameError:
pass
print("about to get the list of possible bergs")
print('Please note the transform computation is very application specific (negative y coordinates) and may need generalizing')
print("this transform computation is particularly sensitive to axis order (y,x) because it is accessed by index number")
'''
# I think that by using concatenate=True, it might not actually be using dask for the computation
def get_bergs(labeled_blocks):
# Note: features.shapes returns a generator. However, if we try to iterate through it with a for loop, the StopIteration exception
# is not passed up into the for loop and execution hangs when it hits the end of the for loop without completing the function
block_bergs = list(poly[0]['coordinates'][0] for poly in rasterio.features.shapes(
labeled_blocks.astype('int32'), transform=onedem.attrs['transform']))[:-1]
poss_bergs_list.append(block_bergs)
da.blockwise(get_bergs, '', labeled_arr, 'ij',
meta=pd.DataFrame({'c':[]}), concatenate=True).compute()
# print(poss_bergs_list[0])
# print(type(poss_bergs_list))
poss_bergs_gdf = gpd.GeoDataFrame({'geometry':[Polygon(poly) for poly in poss_bergs_list[0]]})
# another approach could be to try and coerce the output from map_blocks into an array, but I suspect you'd still have the geospatial issue
# https://github.com/dask/dask/issues/3590#issuecomment-464609620
'''
# URL: https://stackoverflow.com/questions/66232232/produce-vector-output-from-a-dask-array/66245347?noredirect=1#comment117119583_66245347
def getpx(chunkid, chunksz):
amin = chunkid[0] * chunksz[0][0]
amax = amin + chunksz[0][0]
bmin = chunkid[1] * chunksz[1][0]
bmax = bmin + chunksz[1][0]
return (amin, amax, bmin, bmax)
def get_bl_transform(onedem, chunk0, chunk1):
# order of all inputs (and outputs) should be y, x when axis order is used
chunksz = (onedem.chunks['y'], onedem.chunks['x'])
# rasterio_trans = rasterio.transform.guard_transform(onedem.attrs["transform"])
# print(rasterio_trans)
ymini, ymaxi, xmini, xmaxi = getpx((chunk0, chunk1), chunksz)
# use rasterio Windows and rioxarray to construct transform
# https://rasterio.readthedocs.io/en/latest/topics/windowed-rw.html#window-transforms
chwindow = rasterio.windows.Window(xmini, ymini, xmaxi-xmini, ymaxi-ymini)
return onedem.rio.isel_window(chwindow).rio.transform(recalc=True)
@dask.delayed
def polyganize_bergs(labeled_blocks, trans):
# print("running the dask delayed function")
# NOTE: From Don: Originally, onedem was called within the delayed function...
# I have a feeling this might have caused onedem to be copied in memory a whole bunch of time
# Among the x number of workers ...
# I have pulled out the figuring out transform to the outside as a non-delayed function.
# I found that this transform calculation was very quick, so it should be okay being non-parallel.
# For future reference, I suggest scattering the data if you want to be able to access it within the workers
# https://distributed.dask.org/en/latest/locality.html
return list(poly[0]['coordinates'][0] for poly in rasterio.features.shapes(
labeled_blocks.astype('int32'), transform=trans))[:-1]
poss_bergs_list = []
# NOTE: Itertools would flatten the dask delayeds so you don't have a for loop
# this would make the complexity O(n) rather than O(n^2)
grid_delayeds = [d for d in it.chain.from_iterable(labeled_arr.to_delayed())]
for labeled_blocks in grid_delayeds:
_, chunk0, chunk1 = labeled_blocks.key
trans = get_bl_transform(onedem, chunk0, chunk1)
piece = polyganize_bergs(labeled_blocks, trans) # If a function already have delayed decorator, don't need it anymore
poss_bergs_list.append(piece)
# for __, obj in enumerate(labeled_arr.to_delayed()):
# for bl in obj:
# bl_trans = get_bl_trans(onedem, *bl.key)
# piece = polyganize_bergs(bl, bl_trans)
# # piece = dask.delayed(polyganize_bergs)(bl, *bl.key, chunksz)
# poss_bergs_list.append(piece)
# del piece
poss_bergs_list = dask.compute(*poss_bergs_list)
# tried working with this instead of the for loops above
# poss_bergs_list = dask.compute([get_bergs(bl, *bl.key) for bl in obj for __, obj in enumerate(labeled_arr.to_delayed())])[0]
# print(poss_bergs_list)
# unnest the list of polygons returned by using dask to polygonize
concat_list = [item for sublist in poss_bergs_list for item in sublist if len(item)!=0]
# print(concat_list)
poss_bergs_gdf = gpd.GeoDataFrame({'geometry':[Polygon(poly) for poly in concat_list]})
# convert to a geodataframe, combine geometries (in case any bergs were on chunk borders), and generate new polygon list
# print(poss_bergs_gdf)
# print(poss_bergs_gdf.geometry.plot())
if len(poss_bergs_gdf) == 0:
return poss_bergs_gdf
else:
poss_berg_combined = gpd.overlay(poss_bergs_gdf, poss_bergs_gdf, how='union')
# print(poss_berg_combined)
# print(poss_berg_combined.geometry.plot())
poss_bergs = [berg for berg in poss_berg_combined.geometry]
# print(poss_bergs)
# print(len(poss_bergs))
try:
del labeled_arr
del poss_bergs_list
del concat_list
del poss_berg_combined
except NameError:
pass
else:
print("NOT USING DASK")
# create copy of elevation values so original dataset values are not impacted by image manipulations
# and positive/negative coordinate systems can be ignored (note flipax=[] below)
# something wonky is happening and when I ran this code on Pangeo I needed to NOT flip the elevation values here and then switch the bounding box y value order
# Not entirely sure what's going on, but need to be aware of this!!
# print("Note: check for proper orientation of results depending on compute environment. Pangeo results were upside down.")
elev_copy = np.copy(np.flip(onedem.elevation.values, axis=flipax))
# flipax=[]
# elev_copy = np.copy(onedem.elevation.values)
# import matplotlib.pyplot as plt
# print(plt.imshow(elev_copy))
# generate a labeled array of potential iceberg features, excluding those that are too large or small
seglabeled_arr = raster_ops.labeled_from_segmentation(elev_copy, [3,10], resolution=res, min_area=min_area, flipax=[])
print("Got labeled raster of potential icebergs for an image")
# remove features whose borders are >50% no data values (i.e. the "iceberg" edge is really a DEM edge)
labeled_arr = raster_ops.border_filtering(seglabeled_arr, elev_copy, flipax=[]).astype(seglabeled_arr.dtype)
# apparently rasterio can't handle int64 inputs, which is what border_filtering returns
import matplotlib.pyplot as plt
print(plt.imshow(labeled_arr))
# create iceberg polygons
# somehow a < 1 pixel berg made it into this list... I'm doing a secondary filtering by area in the iceberg filter step for now
poss_bergs_list = list(poly[0]['coordinates'][0] for poly in rasterio.features.shapes(labeled_arr, transform=onedem.attrs['transform']))[:-1]
poss_bergs = [Polygon(poly) for poly in poss_bergs_list]
try:
del elev_copy
del seglabeled_arr
del labeled_arr
except NameError:
pass
return poss_bergs
def getexval(potvals, coord, val):
idx = (np.abs(potvals - val)).argmin()
nearval = potvals.isel({coord: idx}).item()
return nearval
def filter_pot_bergs(poss_bergs, onedem, usedask):
"""
Test each potential iceberg for validity, and if valid compute the sea level adjustment and
get elevation pixel values for putting into the geodataframe.
Parameter
---------
poss_bergs : list of potential iceberg geometries
"""
fjord = onedem.attrs['fjord']
max_freebd = fjord_props.get_ice_thickness(fjord)/10.0
minfree = fjord_props.get_min_freeboard(fjord)
res = onedem.attrs['res'][0] #Note: the pixel area will be inaccurate if the resolution is not the same in x and y
try:
crs = onedem.attrs['crs']
except KeyError:
try:
crs = onedem.attrs['proj4']
except KeyError:
print("Your input DEM does not have a CRS attribute")
# for berg in poss_bergs:
# try: hold = Polygon(berg)
# except NotImplementedError:
# print(berg)
# Note: list of poss_bergs must be a list of shapely geometry types
# the previous version, which used Polygon(berg) for berg in poss_bergs in the next line,
# was a problem when a multipolygon got created after combining results from dask chunks
poss_gdf = gpd.GeoDataFrame({'origberg': poss_bergs}, geometry='origberg')
poss_gdf = poss_gdf.set_crs(crs)
print("Potential icebergs found: " + str(len(poss_gdf)))
if len(poss_gdf) == 0:
return [], [], []
# remove empty or invalid geometries
poss_gdf = poss_gdf[~poss_gdf.origberg.is_empty]
poss_gdf = poss_gdf[poss_gdf.origberg.is_valid]
# print(len(poss_gdf))
# 10 pixel buffer
buffer = 10 * res
# create a negatively buffered berg outline to exclude border/water pixels
poss_gdf['berg'] = poss_gdf.origberg.buffer(-buffer)
# get the largest polygon from a multipolygon (if one was created during buffering)
def get_largest_from_multi(multipolygons):
bergs = []
for multipolygon in multipolygons:
subbergs = list(multipolygon)
area = []
for sb in subbergs:
sb = Polygon(sb)
area.append(sb.area)
# print(area)
idx = np.where(area == np.nanmax(area))[0]
berg = Polygon(subbergs[idx[0]])
bergs.append(berg)
return bergs
poss_multis = (poss_gdf.berg.geom_type == "MultiPolygon")
poss_gdf.loc[poss_multis, 'berg'] = get_largest_from_multi(poss_gdf[poss_multis].berg)
del poss_multis
# print(len(poss_gdf))
# remove holes, where present in buffered polygons
poss_ints = ([len(interior) > 0 for interior in poss_gdf.berg.interiors])
poss_gdf.loc[poss_ints, 'berg'] = [Polygon(list(getcoords.exterior.coords)) for getcoords in poss_gdf[poss_ints].berg]
del poss_ints
poss_gdf = poss_gdf[~poss_gdf.berg.is_empty]
poss_gdf = poss_gdf[poss_gdf.berg.is_valid]
# print("Potential icebergs after buffering and invalid, multi, and interior polygons removed: " + str(len(poss_gdf)))
# get the polygon complexity and remove if it's above the threshold
poss_gdf['complexity'] = [vector_ops.poly_complexity(oneberg) for oneberg in poss_gdf.berg]
if res == 2.0:
complexthresh = 0.07
elif res ==4.0:
complexthresh = 0.10
else:
complexthresh = 0.08
print("using a default complexity threshold value - add one for your resolution")
poss_gdf = poss_gdf[poss_gdf.complexity < complexthresh]
print("Potential icebergs after complex ones removed: " + str(len(poss_gdf)))
if len(poss_gdf) == 0:
return [], [], []
poss_gdf = poss_gdf.reset_index().drop(columns=["index", "complexity"])
total_bounds = poss_gdf.total_bounds
try: onedem = onedem.rio.slice_xy(*total_bounds)
except NoDataInBounds:
coords = ('x','y','x','y')
exbound_box = []
for a, b in zip(total_bounds, coords):
exbound_box.append(getexval(onedem[b], b, a))
onedem = onedem.rio.slice_xy(*exbound_box)
# onedem = onedem.chunk({'x': 1024, 'y':1024})
# onedem = onedem.rio.clip_box(*total_bounds).chunk({'x': 1024, 'y':1024})
# rasterize the icebergs; get the buffered iceberg elevation values for computing draft
poss_gdf['bergkey'] = poss_gdf.index.astype(int)
poss_gdf["geometry"] = poss_gdf.berg
gdf_grid = make_geocube(vector_data=poss_gdf,
measurements=["bergkey"],
like=onedem,
fill=np.nan
)
# gdf_grid = gdf_grid.chunk({'x': 1024, 'y':1024}) #DevGoal: make this a variable
poss_gdf["freeboardmed"] = [0.0] * len(poss_gdf.index)
poss_gdf["elevs"] = '' # so that it's object type, not int, for a variable length array
for bkey in poss_gdf["bergkey"]:
bergdem = onedem.where(gdf_grid["bergkey"] == bkey, drop=True)
pxvals = bergdem["elevation"].values
pxvals = pxvals[~np.isnan(pxvals)]
poss_gdf.at[poss_gdf[poss_gdf["bergkey"]==bkey].index[0], "elevs"] = pxvals
poss_gdf.at[poss_gdf[poss_gdf["bergkey"]==bkey].index[0], "freeboardmed"] = np.nanmedian(pxvals)
del bergdem
del gdf_grid
# skip bergs that returned all nan elevation values (and thus a nan median value)
poss_gdf = poss_gdf[poss_gdf["freeboardmed"] != np.nan]
# print(len(poss_gdf))
# skip bergs that likely contain a lot of cloud (or otherwise unrealistic elevation) pixels
poss_gdf = poss_gdf[poss_gdf['freeboardmed'] < max_freebd] # units in meters, matching those of the DEM elevation
print("Potential icebergs after too-tall ones removed: " + str(len(poss_gdf)))
if len(poss_gdf) == 0:
return [], [], []
# print(poss_gdf)
# get the regional elevation values and use to determine the sea level adjustment
def get_sl_poly(origberg):
"""
Create a polygon (with a hole) for getting pixels to use for the sea level adjustment
"""
# outer extent of ocean pixels used
outer = list(origberg.buffer(2*buffer).exterior.coords)
# inner extent of ocean pixels used
inner = list(origberg.buffer(buffer).exterior.coords)
return Polygon(outer, holes=[inner])
poss_gdf['sl_aroundberg'] = poss_gdf.origberg.apply(get_sl_poly)
def get_sl_adj(sl_aroundberg):
"""
Clip the polygon from the elevation DEM and get the pixel values.
Compute the sea level offset
"""
try:
slvals = onedem.elevation.rio.clip([sl_aroundberg], crs=onedem.attrs['crs']).values.flatten() #from_disk=True
except NoDataInBounds:
if sl_aroundberg.area < (res**2.0) * 10.0:
slvals = []
else:
try:
slvals = onedem.elevation.rio.clip([sl_aroundberg], crs=onedem.attrs['crs'], all_touched=True).values.flatten()
except NoDataInBounds:
print("Manually check this DEM for usability")
print(sl_aroundberg.area)
print((res**2.0) * 10.0)
print(onedem.elevation.rio.bounds(recalc=True))
print(sl_aroundberg.bounds)
sl_adj = np.nanmedian(slvals)
return sl_adj
# print(onedem)
onedem['elevation'] = onedem.elevation.rio.write_crs(onedem.attrs['crs'], inplace=True)
# NOTE: sea level adjustment (m) is relative to tidal height at the time of image acquisition, not 0 msl
poss_gdf["sl_adj"] = poss_gdf.sl_aroundberg.apply(get_sl_adj)
# check that the median freeboard elevation (pre-filtering) is at least x m above sea level
poss_gdf = poss_gdf[abs(poss_gdf.freeboardmed - poss_gdf.sl_adj) > minfree]
print("Potential icebergs after too small ones removed: " + str(len(poss_gdf)))
if len(poss_gdf) == 0:
return [], [], []
# apply the sea level adjustment to the elevation values
def decrease_offset_wrapper(gpdrow):
corrpxvals = icalcs.apply_decrease_offset(gpdrow["elevs"], gpdrow["sl_adj"])
# gpdrow["elevs"] = corrpxvals
return corrpxvals
poss_gdf["elevs"] = poss_gdf.apply(decrease_offset_wrapper, axis=1)
print("Final icebergs for estimating water depths: " + str(len(poss_gdf)))
return poss_gdf.berg, poss_gdf.elevs, poss_gdf.sl_adj
# Attempts to use dask to eliminate memory crashing issues; some had minor errors, but overall it
# was coarsening the data that proved most effective. This is also leftover from moving away from groupby
# if usedask == True:
# @dask.delayed
# def get_berg_px_vals(bkey, onedem, gdf_grid):
# pxvals = onedem.where(gdf_grid["bergkey"] == bkey, drop=True)["elevation"].values
# pxvals = pxvals[~np.isnan(pxvals)]
# return {key, pxvals}
# pxdict = {}
# print("using dask to iterate through the berg keys")
# bkey_delayeds = [d for d in it.chain.from_iterable(poss_gdf["bergkey"])]
# for bkey in bkey_delayeds:
# keypx_dict = get_berg_px_vals(bkey, onedem, gdf_grid)
# pxdict.update(keypx_dict)
# pxdict = dask.compute(*pxdict)
# print(pxdict)
# print(type(pxdict))
# for key, val in pxdict.items():
# poss_gdf.at[poss_gdf[poss_gdf["bergkey"]==key].index[0], "elevs"] = val
# poss_gdf.at[poss_gdf[poss_gdf["bergkey"]==key].index[0], "freeboardmed"] = np.nanmedian(val)
# del pxdict
'''
gdf_grid['elev'] = onedem.reset_coords(drop=True)["elevation"]
gdf_grid = gdf_grid.chunk({'x': 1024, 'y':1024}) #DevGoal: make this a variable
grouped = gdf_grid.drop("spatial_ref").groupby(gdf_grid.bergkey)
@dask.delayed
def get_berg_px_vals(key, vals):
pxvals = vals.elev.values
pxvals = pxvals[~np.isnan(pxvals)]
return {key: pxvals}
if usedask == True:
pxdict = {}
print("using dask to iterate through the groups")
group_delayeds = [d for d in it.chain.from_iterable(grouped.to_delayed())]
for key, vals in group_delayeds:
keypx_dict = get_berg_px_vals(key, vals)
pxdict.update(keypx_dict)
pxdict = dask.compute(*pxdict)
print(pxdict)
print(type(pxdict))
for key, val in pxdict.items():
poss_gdf.at[poss_gdf[poss_gdf["bergkey"]==key].index[0], "elevs"] = val
poss_gdf.at[poss_gdf[poss_gdf["bergkey"]==key].index[0], "freeboardmed"] = np.nanmedian(val)
del pxdict
else:
for key, vals in grouped:
pxvals = vals.elev.values
pxvals = pxvals[~np.isnan(pxvals)]
poss_gdf.at[poss_gdf[poss_gdf["bergkey"]==key].index[0], "elevs"] = pxvals
poss_gdf.at[poss_gdf[poss_gdf["bergkey"]==key].index[0], "freeboardmed"] = np.nanmedian(pxvals)
del grouped
'''
# NOTE: sea level adjustment (m) is relative to tidal height at the time of image acquisition, not 0 msl
# if usedask == True:
# print("using dask geopandas to iterate through the bergs")
# import dask_geopandas as dgpd
# dask_poss_gdf = dgpd.from_geopandas(poss_gdf, npartitions=2)
# sl_adjs = dask_poss_gdf.apply(get_sl_adj).compute()
# poss_gdf["sl_adj"] = sl_adjs
# del dask_poss_gdf
# del sl_adjs
# else:
```
|
{
"source": "JessicaS11/intake",
"score": 2
}
|
#### File: intake/tests/test_utils.py
```python
import os
import posixpath
import shutil
import pytest
import yaml
from intake.utils import make_path_posix, no_duplicate_yaml
def test_windows_file_path():
path = 'C:\\Users\\user\\fake.file'
actual = make_path_posix(path)
expected = 'C:/Users/user/fake.file'
assert actual == expected
def test_make_path_posix_removes_double_sep():
path = 'user//fake.file'
actual = make_path_posix(path)
expected = 'user/fake.file'
assert actual == expected
@pytest.mark.parametrize('path', [
'~/fake.file',
'https://example.com',
])
def test_noops(path):
"""For non windows style paths, make_path_posix should be a noop"""
assert make_path_posix(path) == path
def test_roundtrip_file_path():
path = os.path.dirname(__file__)
actual = make_path_posix(path)
assert '\\' not in actual
assert os.path.samefile(actual, path)
def test_yaml_tuples():
data = (1, 2)
text = yaml.dump(data)
with no_duplicate_yaml():
assert yaml.safe_load(text) == data
def copy_test_file(filename, target_dir):
if not os.path.exists(target_dir):
os.makedirs(target_dir) # can't use exist_ok in Python 2.7
target_dir = make_path_posix(target_dir)
# Put a catalog file in the user catalog directory
test_dir = make_path_posix(os.path.dirname(__file__))
test_catalog = posixpath.join(test_dir, filename)
target_catalog = posixpath.join(target_dir, '__unit_test_'+filename)
shutil.copyfile(test_catalog, target_catalog)
return target_catalog
```
|
{
"source": "JessicaS11/NSIDC-Data-Tutorials",
"score": 3
}
|
#### File: iceflow/iceflow/processing.py
```python
import h5py
import pandas as pd
import geopandas as gpd
from pathlib import PurePosixPath
import numpy as np
from datetime import datetime, timedelta
class IceFlowProcessing:
@staticmethod
def to_geopandas(filename):
with h5py.File(filename, 'r') as h5:
if 'd_elev' in list(h5.keys()):
# GLAS dataframe
df_data = {
'latitude': h5['d_lat'],
'longitude': h5['d_lon'],
'elevation': h5['d_elev'],
'time': pd.to_datetime(h5['utc_datetime'])
}
if 'elevation' in list(h5.keys()):
# ATM data
df_data = {
'latitude': h5['latitude'],
'longitude': h5['longitude'],
'elevation': h5['elevation'],
'time': pd.to_datetime(h5['utc_datetime'])
}
if 'gt1l' in list(h5.keys()): # ICESat-2
# Get dataproduct name
dataproduct = h5.attrs['identifier_product_type'].decode()
# Set variables for each ATL* product
VARIABLES = {
'ATL06': [
'/gt1l/land_ice_segments/delta_time',
'/gt1l/land_ice_segments/h_li',
'/gt1l/land_ice_segments/latitude',
'/gt1l/land_ice_segments/longitude',
'/gt1r/land_ice_segments/delta_time',
'/gt1r/land_ice_segments/h_li',
'/gt1r/land_ice_segments/latitude',
'/gt1r/land_ice_segments/longitude',
'/gt2l/land_ice_segments/delta_time',
'/gt2l/land_ice_segments/h_li',
'/gt2l/land_ice_segments/latitude',
'/gt2l/land_ice_segments/longitude',
'/gt2r/land_ice_segments/delta_time',
'/gt2r/land_ice_segments/h_li',
'/gt2r/land_ice_segments/latitude',
'/gt2r/land_ice_segments/longitude',
'/gt3l/land_ice_segments/delta_time',
'/gt3l/land_ice_segments/h_li',
'/gt3l/land_ice_segments/latitude',
'/gt3l/land_ice_segments/longitude',
'/gt3r/land_ice_segments/delta_time',
'/gt3r/land_ice_segments/h_li',
'/gt3r/land_ice_segments/latitude',
'/gt3r/land_ice_segments/longitude'],
}
# Convert variable paths to 'Path' objects for easy manipulation
variables = [PurePosixPath(v) for v in VARIABLES[dataproduct]]
# Get set of beams to extract individially as dataframes combining in the end
beams = {list(v.parents)[-2].name for v in variables}
dfs = []
for beam in beams:
data_dict = {}
beam_variables = [v for v in variables if beam in str(v)]
for variable in beam_variables:
# Use variable 'name' as column name. Beam will be specified in 'beam' column
column = variable.name
variable = str(variable)
try:
values = h5[variable][:]
# Convert invalid data to np.nan (only for float columns)
if 'float' in str(values.dtype):
if 'valid_min' in h5[variable].attrs:
values[values < h5[variable].attrs['valid_min']] = np.nan
if 'valid_max' in h5[variable].attrs:
values[values > h5[variable].attrs['valid_max']] = np.nan
if '_FillValue' in h5[variable].attrs:
values[values == h5[variable].attrs['_FillValue']] = np.nan
data_dict[column] = values
except KeyError:
print(f'Variable {variable} not found in {filename}.')
df_data = pd.DataFrame.from_dict(data_dict)
dfs.append(df_data)
df_data = pd.concat(dfs, sort=True)
# Add filename column for book-keeping and reset index
df_data = df_data.reset_index(drop=True)
EPOCH = datetime(2018, 1, 1, 0, 0, 0)
df_data['delta_time'] = df_data['delta_time'].map(lambda x: EPOCH + timedelta(seconds=x))
df_data.rename(columns={"delta_time": "time", "h_li": "elevation"}, inplace = True)
df_data = df_data[['time','latitude','longitude','elevation']]
df = pd.DataFrame(data=df_data)
geopandas_df = gpd.GeoDataFrame(df,
geometry=gpd.points_from_xy(df['longitude'],
df['latitude'],
crs='epsg:4326'))
geopandas_df = geopandas_df.set_index('time')
return geopandas_df
@staticmethod
def get_common_df(filename):
"""
Returns a minimal pandas dataframe for the different IceFlow datasets with the following keys
latitude,longitude,elevation,time.
Params: hdf_f, an h5py file object
"""
with h5py.File(filename, 'r') as h5:
if 'd_elev' in list(h5.keys()):
# GLAS dataframe
df_data = {
'latitude': h5['d_lat'],
'longitude': h5['d_lon'],
'elevation': h5['d_elev'],
'time': pd.to_datetime(h5['utc_datetime'])
}
if 'elevation' in list(h5.keys()):
# ATM data
df_data = {
'latitude': h5['latitude'],
'longitude': h5['longitude'],
'elevation': h5['elevation'],
'time': pd.to_datetime(h5['utc_datetime'])
}
df = pd.DataFrame(data=df_data)
return df
@staticmethod
def get_common_dictionary(dataset):
"""
Returns a simple dictionary with key values for different datasets
"""
if dataset == 'GLAS':
data_dict = {
'latitude': 'd_lat',
'longitude': 'd_lon',
'elevation': 'd_elev',
'time': 'utc_datetime'
}
return data_dict
if dataset == 'ATM':
# ATM data
data_dict = {
'latitude': 'latitude',
'longitude': 'longitude',
'elevation': 'elevation',
'time': 'utc_datetime'
}
return data_dict
```
#### File: notebooks/ICESat-2_MODIS_Arctic_Sea_Ice/tutorial_helper_functions.py
```python
import h5py
from pathlib import Path
import pandas as pd
import numpy as np
import geopandas as gpd
from datetime import datetime, timedelta
import pyproj
import requests
import json
from statistics import mean
from xml.etree import ElementTree as ET
import os
import pprint
import shutil
import zipfile
import io
import time
def print_cmr_metadata(entry, fields=['dataset_id', 'version_id']):
'''
Prints metadata from query to CMR collections.json
entry - Metadata entry for a dataset
fields - list of metdata fields to print
'''
print(', '.join([f"{field}: {entry[field]}" for field in fields]))
def granule_info(data_dict):
'''
Prints number of granules based on inputted data set short name, version, bounding box, and temporal range. Queries the CMR and pages over results.
data_dict - a dictionary with the following CMR keywords:
'short_name',
'version',
'bounding_box',
'temporal'
'''
# set CMR API endpoint for granule search
granule_search_url = 'https://cmr.earthdata.nasa.gov/search/granules'
# add page size and page num to dictionary
data_dict['page_size'] = 100
data_dict['page_num'] = 1
granules = []
headers={'Accept': 'application/json'}
while True:
response = requests.get(granule_search_url, params=data_dict, headers=headers)
results = json.loads(response.content)
if len(results['feed']['entry']) == 0:
# Out of results, so break out of loop
break
# Collect results and increment page_num
granules.extend(results['feed']['entry'])
data_dict['page_num'] += 1
# calculate granule size
granule_sizes = [float(granule['granule_size']) for granule in granules]
print('There are', len(granules), 'granules of', data_dict['short_name'], 'version', data_dict['version'], 'over my area and time of interest.')
print(f'The average size of each granule is {mean(granule_sizes):.2f} MB and the total size of all {len(granules)} granules is {sum(granule_sizes):.2f} MB')
return len(granules)
def print_service_options(data_dict, response):
'''
Prints the available subsetting, reformatting, and reprojection services available based on inputted data set name, version, and Earthdata Login username and password.
data_dict - a dictionary with the following keywords:
'short_name',
'version',
'uid',
'pswd'
'''
root = ET.fromstring(response.content)
#collect lists with each service option
subagent = [subset_agent.attrib for subset_agent in root.iter('SubsetAgent')]
# variable subsetting
variables = [SubsetVariable.attrib for SubsetVariable in root.iter('SubsetVariable')]
variables_raw = [variables[i]['value'] for i in range(len(variables))]
variables_join = [''.join(('/',v)) if v.startswith('/') == False else v for v in variables_raw]
variable_vals = [v.replace(':', '/') for v in variables_join]
# reformatting
formats = [Format.attrib for Format in root.iter('Format')]
format_vals = [formats[i]['value'] for i in range(len(formats))]
if format_vals : format_vals.remove('')
# reprojection options
projections = [Projection.attrib for Projection in root.iter('Projection')]
proj_vals = []
for i in range(len(projections)):
if (projections[i]['value']) != 'NO_CHANGE' :
proj_vals.append(projections[i]['value'])
#print service information depending on service availability and select service options
print('Services available for', data_dict['short_name'],':')
print()
if len(subagent) < 1 :
print('No customization services available.')
else:
subdict = subagent[0]
if subdict['spatialSubsetting'] == 'true':
print('Bounding box subsetting')
if subdict['spatialSubsettingShapefile'] == 'true':
print('Shapefile subsetting')
if subdict['temporalSubsetting'] == 'true':
print('Temporal subsetting')
if len(variable_vals) > 0:
print('Variable subsetting')
if len(format_vals) > 0 :
print('Reformatting to the following options:', format_vals)
if len(proj_vals) > 0 :
print('Reprojection to the following options:', proj_vals)
def request_data(param_dict,session):
'''
Request data from NSIDC's API based on inputted key-value-pairs from param_dict.
Different request methods depending on 'async' or 'sync' options.
In addition to param_dict, input Earthdata login `uid` and `pswd`.
'''
# Create an output folder if the folder does not already exist.
path = str(os.getcwd() + '/Outputs')
if not os.path.exists(path):
os.mkdir(path)
# Define base URL
base_url = 'https://n5eil02u.ecs.nsidc.org/egi/request'
# Different access methods depending on request mode:
if param_dict['request_mode'] == 'async':
request = session.get(base_url, params=param_dict)
print('Request HTTP response: ', request.status_code)
# Raise bad request: Loop will stop for bad response code.
request.raise_for_status()
print()
print('Order request URL: ', request.url)
print()
esir_root = ET.fromstring(request.content)
#print('Order request response XML content: ', request.content)
#Look up order ID
orderlist = []
for order in esir_root.findall("./order/"):
orderlist.append(order.text)
orderID = orderlist[0]
print('order ID: ', orderID)
#Create status URL
statusURL = base_url + '/' + orderID
print('status URL: ', statusURL)
#Find order status
request_response = session.get(statusURL)
print('HTTP response from order response URL: ', request_response.status_code)
# Raise bad request: Loop will stop for bad response code.
request_response.raise_for_status()
request_root = ET.fromstring(request_response.content)
statuslist = []
for status in request_root.findall("./requestStatus/"):
statuslist.append(status.text)
status = statuslist[0]
#print('Data request is submitting...')
print()
print('Initial request status is ', status)
print()
#Continue loop while request is still processing
loop_response = session.get(statusURL)
loop_root = ET.fromstring(loop_response.content)
while status == 'pending' or status == 'processing':
print('Status is not complete. Trying again.')
time.sleep(10)
loop_response = session.get(statusURL)
# Raise bad request: Loop will stop for bad response code.
loop_response.raise_for_status()
loop_root = ET.fromstring(loop_response.content)
#find status
statuslist = []
for status in loop_root.findall("./requestStatus/"):
statuslist.append(status.text)
status = statuslist[0]
print('Retry request status is: ', status)
if status == 'pending' or status == 'processing':
continue
#Order can either complete, complete_with_errors, or fail:
# Provide complete_with_errors error message:
if status == 'failed':
messagelist = []
for message in loop_root.findall("./processInfo/"):
messagelist.append(message.text)
print('error messages:')
pprint.pprint(messagelist)
print()
# Download zipped order if status is complete or complete_with_errors
if status == 'complete' or status == 'complete_with_errors':
downloadURL = 'https://n5eil02u.ecs.nsidc.org/esir/' + orderID + '.zip'
print('Zip download URL: ', downloadURL)
print('Beginning download of zipped output...')
zip_response = session.get(downloadURL)
# Raise bad request: Loop will stop for bad response code.
zip_response.raise_for_status()
with zipfile.ZipFile(io.BytesIO(zip_response.content)) as z:
z.extractall(path)
print('Data request is complete.')
else: print('Request failed.')
else:
print('Requesting...')
request = session.get(s.url,auth=(uid,pswd))
print('HTTP response from order response URL: ', request.status_code)
request.raise_for_status()
d = request.headers['content-disposition']
fname = re.findall('filename=(.+)', d)
dirname = os.path.join(path,fname[0].strip('\"'))
print('Downloading...')
open(dirname, 'wb').write(request.content)
print('Data request is complete.')
# Unzip outputs
for z in os.listdir(path):
if z.endswith('.zip'):
zip_name = path + "/" + z
zip_ref = zipfile.ZipFile(zip_name)
zip_ref.extractall(path)
zip_ref.close()
os.remove(zip_name)
def clean_folder():
'''
Cleans up output folder by removing individual granule folders.
'''
path = str(os.getcwd() + '/Outputs')
for root, dirs, files in os.walk(path, topdown=False):
for file in files:
try:
shutil.move(os.path.join(root, file), path)
except OSError:
pass
for name in dirs:
os.rmdir(os.path.join(root, name))
def load_icesat2_as_dataframe(filepath, VARIABLES):
'''
Load points from an ICESat-2 granule 'gt<beam>' groups as DataFrame of points. Uses VARIABLES mapping
to select subset of '/gt<beam>/...' variables (Assumes these variables share dimensions)
Arguments:
filepath to ATL0# granule
'''
ds = h5py.File(filepath, 'r')
# Get dataproduct name
dataproduct = ds.attrs['identifier_product_type'].decode()
# Convert variable paths to 'Path' objects for easy manipulation
variables = [Path(v) for v in VARIABLES[dataproduct]]
# Get set of beams to extract individially as dataframes combining in the end
beams = {list(v.parents)[-2].name for v in variables}
dfs = []
for beam in beams:
data_dict = {}
beam_variables = [v for v in variables if beam in str(v)]
for variable in beam_variables:
# Use variable 'name' as column name. Beam will be specified in 'beam' column
column = variable.name
variable = str(variable)
try:
values = ds[variable][:]
# Convert invalid data to np.nan (only for float columns)
if 'float' in str(values.dtype):
if 'valid_min' in ds[variable].attrs:
values[values < ds[variable].attrs['valid_min']] = np.nan
if 'valid_max' in ds[variable].attrs:
values[values > ds[variable].attrs['valid_max']] = np.nan
if '_FillValue' in ds[variable].attrs:
values[values == ds[variable].attrs['_FillValue']] = np.nan
data_dict[column] = values
except KeyError:
print(f'Variable {variable} not found in {filepath}. Likely an empty granule.')
raise
df = pd.DataFrame.from_dict(data_dict)
df['beam'] = beam
dfs.append(df)
df = pd.concat(dfs, sort=True)
# Add filename column for book-keeping and reset index
df['filename'] = Path(filepath).name
df = df.reset_index(drop=True)
return df
def convert_to_gdf(df):
'''
Converts a DataFrame of points with 'longitude' and 'latitude' columns to a
GeoDataFrame
'''
gdf = gpd.GeoDataFrame(
df,
geometry=gpd.points_from_xy(df.longitude, df.latitude),
crs={'init': 'epsg:4326'},
)
return gdf
def convert_delta_time(delta_time):
'''
Convert ICESat-2 'delta_time' parameter to UTC datetime
'''
EPOCH = datetime(2018, 1, 1, 0, 0, 0)
utc_datetime = EPOCH + timedelta(seconds=delta_time)
return utc_datetime
# def compute_distance(df):
# '''
# Calculates along track distance for each point within the 'gt1l', 'gt2l', and 'gt3l' beams, beginning with first beam index.
# Arguments:
# df: DataFrame with icesat-2 data
# Returns:
# add_dist added as new column to initial df
# '''
# beam_1 = df[df['beam'] == 'gt1l']
# beam_2 = df[df['beam'] == 'gt2l']
# beam_3 = df[df['beam'] == 'gt3l']
# add_dist = []
# add_dist.append(beam_1.height_segment_length_seg.values[0])
# for i in range(1, len(beam_1)):
# add_dist.append(add_dist[i-1] + beam_1.height_segment_length_seg.values[i])
# add_dist_se = pd.Series(add_dist)
# beam_1.insert(loc=0, column='add_dist', value=add_dist_se.values)
# beam_1
# add_dist = []
# add_dist.append(beam_2.height_segment_length_seg.values[0])
# for i in range(1, len(beam_2)):
# add_dist.append(add_dist[i-1] + beam_2.height_segment_length_seg.values[i])
# add_dist_se = pd.Series(add_dist)
# beam_2.insert(loc=0, column='add_dist', value=add_dist_se.values)
# beam_2
# add_dist = []
# add_dist.append(beam_3.height_segment_length_seg.values[0])
# for i in range(1, len(beam_3)):
# add_dist.append(add_dist[i-1] + beam_3.height_segment_length_seg.values[i])
# add_dist_se = pd.Series(add_dist)
# beam_3.insert(loc=0, column='add_dist', value=add_dist_se.values)
# beam_3
# beams = [beam_1,beam_2,beam_3]
# df = pd.concat(beams,ignore_index=True)
# return df
```
|
{
"source": "jessicasena/WearableSensorDataGenerator",
"score": 3
}
|
#### File: jessicasena/WearableSensorDataGenerator/example.py
```python
import sys
import numpy as np
import random
from sklearn.metrics.classification import accuracy_score, recall_score, f1_score
import scipy.stats as st
from sensordata_generator import DataGenerator
import keras
import pickle
keras.backend.set_image_data_format('channels_first')
def custom_model(shape, n_classes):
"""Dummy CNN model to classify sensor-based human activities"""
activation = 'relu'
inp = keras.layers.Input((shape[1], shape[2], shape[3]))
H = keras.layers.Conv2D(filters=16, kernel_size=(5, 1))(inp)
H = keras.layers.Activation(activation)(H)
H = keras.layers.MaxPooling2D(pool_size=(2, 1))(H)
H = keras.layers.Conv2D(filters=32, kernel_size=(5, 1))(H)
H = keras.layers.Activation(activation)(H)
H = keras.layers.MaxPooling2D(pool_size=(2, 1))(H)
H = keras.layers.Flatten()(H)
H = keras.layers.Dense(n_classes)(H)
H = keras.layers.Activation('softmax')(H)
model = keras.models.Model([inp], H)
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='Adadelta')
return model
if __name__ == '__main__':
np.random.seed(12227)
# Just some variables that we will need in this example :)
dataset_fold = 'Z:/Datasets/LOSO/uncrompressed_datasets/UTD-MHAD1_1s'
batch_size = 32
n_epochs = 50
avg_acc = []
avg_recall = []
avg_f1 = []
# Loading the information regarding the dataset
# These two files are generated by npz_to_fold.py
# as well as the samples read by the DataGenerator
folds = np.load(dataset_fold + "/folds.npy", allow_pickle=True)
labels = pickle.load(open(dataset_fold + "/labels.pkl", "rb"))
for i in range(0, len(folds)):
train_idx = folds[i][0]
test_idx = folds[i][1]
# Creates generator objects. Is important to set the batch size of the testing
# generator to 1 or by a number divisible by the number of test samples.
# Otherwise, the generator will return fewer samples than expected.
training_generator = DataGenerator(dataset_fold, train_idx, labels, batch_size, shuffle=True)
validation_generator = DataGenerator(dataset_fold, train_idx, labels, batch_size, shuffle=True)
testing_generator = DataGenerator(dataset_fold, test_idx, labels, 1, shuffle=False)
# Here some useful functions to get shape and n_classes information
n_classes = training_generator.get_nclasses()
shape = training_generator.get_shape()
# Building a dummy CNN to classify the data
model = custom_model(shape, n_classes)
# Model fit using Keras generator
model.fit_generator(generator=training_generator,
epochs=n_epochs,
use_multiprocessing=False,
workers=1,
verbose=0,
steps_per_epoch=int(np.floor(len(train_idx) / batch_size)),
validation_data=validation_generator,
validation_steps=int(np.floor(len(train_idx) / batch_size)))
# Model predict using Keras generator
y_pred = model.predict_generator(testing_generator)
# Evaluation proposed by Artur et al. in the benchmark
y_pred = np.argmax(y_pred, axis=1)
y_true = np.argmax([labels[key] for key in test_idx], axis=1)
acc_fold = accuracy_score(y_true, y_pred)
avg_acc.append(acc_fold)
recall_fold = recall_score(y_true, y_pred, average='macro')
avg_recall.append(recall_fold)
f1_fold = f1_score(y_true, y_pred, average='macro')
avg_f1.append(f1_fold)
print('Accuracy[{:.4f}] Recall[{:.4f}] F1[{:.4f}] at fold[{}]'.format(acc_fold, recall_fold, f1_fold, i))
print('______________________________________________________')
del model
ic_acc = st.t.interval(0.9, len(avg_acc) - 1, loc=np.mean(avg_acc), scale=st.sem(avg_acc))
ic_recall = st.t.interval(0.9, len(avg_recall) - 1, loc=np.mean(avg_recall), scale=st.sem(avg_recall))
ic_f1 = st.t.interval(0.9, len(avg_f1) - 1, loc=np.mean(avg_f1), scale=st.sem(avg_f1))
print('Mean Accuracy[{:.4f}] IC [{:.4f}, {:.4f}]'.format(np.mean(avg_acc), ic_acc[0], ic_acc[1]))
print('Mean Recall[{:.4f}] IC [{:.4f}, {:.4f}]'.format(np.mean(avg_recall), ic_recall[0], ic_recall[1]))
print('Mean F1[{:.4f}] IC [{:.4f}, {:.4f}]'.format(np.mean(avg_f1), ic_f1[0], ic_f1[1]))
```
#### File: jessicasena/WearableSensorDataGenerator/sensordata_generator.py
```python
import numpy as np
import keras
import sys
import os
class DataGenerator(keras.utils.Sequence):
"""Generates data for Keras"""
def __init__(self, dataset_path, list_ids, labels, batch_size, shuffle, multimodal = False):
"""Initialization"""
self.indexes = np.arange(len(list_ids))
self.batch_size = batch_size
self.labels = labels
self.n_classes = len(list(labels.values())[0])
self.list_ids = list_ids
self.dataset_path = dataset_path
self.shuffle = shuffle
self.input_number = 1 if not multimodal else self.n_inputs()
self.multimodal = multimodal
self.on_epoch_end()
def __len__(self):
"""Denotes the number of batches per epoch"""
return int(np.floor(len(self.list_ids) / self.batch_size))
def __getitem__(self, index):
"""Generate one batch of data"""
indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
list_ids_temp = [self.list_ids[k] for k in indexes]
x, y = self.__data_generation(list_ids_temp)
return x, y
def on_epoch_end(self):
"""Updates indexes after each epoch"""
if self.shuffle:
np.random.shuffle(self.indexes)
def __data_generation(self, list_ids_temp):
"""Generates data containing batch_size samples"""
shape = self.get_shape()
y = np.empty((self.batch_size, self.n_classes))
if self.multimodal:
x = [np.empty((self.batch_size, shape[1], shape[2], 3)) for k in
range(self.input_number)]
for i, ID in enumerate(list_ids_temp):
sample = np.load(self.dataset_path + '/samples/' + ID + '.npy')
for j, data in enumerate(self.select_sensors(sample)):
x[j][i,] = data
y[i] = self.labels[ID]
else:
x = np.empty((self.batch_size, shape[1], shape[2], shape[3]))
for i, ID in enumerate(list_ids_temp):
sample[i,] = np.load(self.dataset_path + '/samples/' + ID + '.npy')
y[i] = self.labels[ID]
return x, y
def n_inputs(self):
dataset_name = self.dataset_path.split("/")[-1]
sample = np.load(self.dataset_path + '/samples/' + self.list_ids[0] + '.npy')
input_vec = self.select_sensors(sample)
return len(input_vec)
def select_sensors(self, sample):
dataset_name = os.path.normpath(self.dataset_path).split(os.path.sep)[-1]
data = []
if dataset_name == 'MHEALTH':
data.append(sample[:, :, 0:3]) # ACC chest-sensor
data.append(sample[:, :, 5:8]) # ACC left-ankle sensor
data.append(sample[:, :, 8:11]) # GYR left-ankle sensor
data.append(sample[:, :, 11:14]) # MAG left-ankle sensor
data.append(sample[:, :, 14:17]) # ACC right-lower-arm
data.append(sample[:, :, 17:20]) # GYR right-lower-arm
data.append(sample[:, :, 20:23]) # MAG right-lower-arm
elif dataset_name == 'PAMAP2P':
data.append(sample[:, :, 1:4]) # ACC1 over the wrist on the dominant arm
data.append(sample[:, :, 4:7]) # ACC2 over the wrist on the dominant arm
data.append(sample[:, :, 7:10]) # GYR over the wrist on the dominant arm
data.append(sample[:, :, 10:13]) # MAG over the wrist on the dominant arm
data.append(sample[:, :, 14:17]) # ACC1 chest-sensor
data.append(sample[:, :, 17:20]) # ACC2 chest-sensor
data.append(sample[:, :, 20:23]) # GYR chest-sensor
data.append(sample[:, :, 23:26]) # MAG chest-sensor
data.append(sample[:, :, 27:30]) # ACC1 on the dominant side's ankle
data.append(sample[:, :, 30:33]) # ACC2 on the dominant side's ankle
data.append(sample[:, :, 33:36]) # GYR on the dominant side's ankle
data.append(sample[:, :, 36:39]) # MAG on the dominant side's ankle
elif dataset_name == 'UTD-MHAD1_1s' or dataset_name == 'UTD-MHAD2_1s' or dataset_name == 'USCHAD':
# UTD-MHAD1_1s: ACC right-wrist
# UTD-MHAD2_1s: ACC right-thigh
# USCHAD: ACC subject’s front right hip inside a mobile phone pouch
data.append(sample[:, :, 0:3])
# UTD-MHAD1_1s: GYR right-wrist
# UTD-MHAD2_1s: GYR right-thigh
# USCHAD: GYR subject’s front right hip inside a mobile phone pouch
data.append(sample[:, :, 3:6])
elif dataset_name == 'WHARF' or dataset_name == 'WISDM':
# WHARF: ACC right-wrist
# WISDM: ACC 5 different body positions (apparently)
data.append(sample[:, :, 0:3])
else:
sys.exit("Dataset name ({}) is wrong.".format(dataset_name))
return data
def get_shape(self):
"""Get dataset shape"""
sample = np.load(self.dataset_path + '/samples/' + self.list_ids[0] + '.npy')
if self.multimodal:
shape = (len(self.list_ids), sample.shape[0], sample.shape[1], 3)
else:
shape = (len(self.list_ids), sample.shape[0], sample.shape[1], sample.shape[2])
return shape
def get_nclasses(self):
"""Get number of classes"""
return self.n_classes
def get_moda_names(self):
dataset_name = os.path.normpath(self.dataset_path).split(os.path.sep)[-1]
data = []
if dataset_name == 'MHEALTH':
names = ["a_chest", "a_left-ankle", "g_left-ankle", "m_left-ankle",
"a_right-wrist", "g_right-wrist", "m_right-wrist"]
elif dataset_name == 'PAMAP2P':
names = ["a1_dominant-wrist", "a2_dominant-wrist", "g_dominant-wrist", "m_dominant-wrist",
"a1_chest", "a2_chest", "g_chest", "m_chest",
"a1_dominant_ankle", "a2_dominant_ankle", "g_dominant_ankle", "m_dominant_ankle"]
elif dataset_name == 'UTD-MHAD1_1s':
names = ["a_right-wrist", "g_right-wrist"]
elif dataset_name == 'UTD-MHAD2_1s':
names = ["a_right-thigh", "g_right-thigh"]
elif dataset_name == 'USCHAD':
names = ["a_front-right-hip", "g_front-right-hip"]
elif dataset_name == 'WHARF':
names = ["a_right-wrist"]
elif dataset_name == 'WISDM':
names = ["acc"]
else:
sys.exit("Dataset name ({}) is wrong.".format(dataset_name))
return names
```
|
{
"source": "jessica-tandazo/nodux_contabilidad",
"score": 2
}
|
#### File: doctype/nodux_item_price/nodux_item_price.py
```python
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import throw, _
class NoduxItemPrice(Document):
def validate(self):
self.validate_item()
self.validate_price_list()
self.check_duplicate_item()
self.update_price_list_details()
self.update_item_details()
def validate_item(self):
if not frappe.db.exists("Item", self.item_code):
throw(_("Item {0} not found").format(self.item_code))
def validate_price_list(self):
enabled = frappe.db.get_value("Nodux Price List", self.price_list, "enabled")
if not enabled:
throw(_("Price List {0} is disabled").format(self.price_list))
def check_duplicate_item(self):
if frappe.db.sql("""select name from `tabNodux Item Price`
where item_code=%s and price_list=%s and name!=%s""", (self.item_code, self.price_list, self.name)):
frappe.throw(_("Item {0} appears multiple times in Price List {1}").format(self.item_code, self.price_list),
NoduxItemPriceDuplicateItem)
# def update_price_list_details(self):
# self.buying, self.selling, self.currency = \
# #frappe.db.get_value("Nodux Price List", {"name": self.price_list, "enabled": 1},
# frappe.db.get_value("Nodux Price List", {"name": self.price_list},
# ["buying", "selling", "currency"])
def update_price_list_details(self):
self.buying, self.selling, self.currency = \
frappe.db.get_value("Nodux Price List", {"name": self.price_list, "enabled": 1},
["buying", "selling", "currency"])
def update_item_details(self):
self.item_name, self.item_description = frappe.db.get_value("Item",
self.item_code, ["item_name", "description"])
```
|
{
"source": "jessica-tandazo/nodux-jessica",
"score": 2
}
|
#### File: nodux-jessica/nodux_sales_invoice/purchase_invoice.py
```python
from __future__ import unicode_literals
import frappe
import json
import copy
from frappe import throw, _
from frappe.utils import flt, cint
from frappe import _
from frappe.model.document import Document
from frappe.model.mapper import get_mapped_doc
from frappe import msgprint, _
def validate(doc, event):
if cint(doc.no_genera_retencion):
print "NO GENERA"
@frappe.whitelist()
def make_purchase_invoice_prueba(source_name, target_doc=None):
def postprocess(source, target):
set_missing_values(source, target)
#Get the advance paid Journal Entries in Purchase Invoice Advance
target.set_advances()
def update_item(obj, target, source_parent):
# target.amount = flt(obj.amount) - flt(obj.billed_amt)
# target.base_amount = target.amount * flt(source_parent.conversion_rate)
# target.qty = target.amount / flt(obj.rate) if (flt(obj.rate) and flt(obj.billed_amt)) else flt(obj.qty)
target.qty = flt(obj.qty)
# item = frappe.db.get_value("Item", target.item_code, ["item_group", "buying_cost_center"], as_dict=1)
item = frappe.db.get_value("Item", target.item_code, ["cost_price"], as_dict=1)
print "ITEM", item
# target.cost_center = frappe.db.get_value("Project", obj.project, "cost_center") \
# or item.buying_cost_center \
# or frappe.db.get_value("Item Group", item.item_group, "default_cost_center")
doc = get_mapped_doc("Purchase Order", source_name, {
"Purchase Order": {
"doctype": "Purchase Invoice",
"validation": {
"docstatus": ["=", 1],
}
},
"Purchase Order Item": {
"doctype": "Purchase Invoice Item",
"field_map": {
"name": "po_detail",
"parent": "purchase_order",
},
"postprocess": update_item
# "condition": lambda doc: (doc.base_amount==0 or abs(doc.billed_amt) < abs(doc.amount))
}
# "Purchase Taxes and Charges": {
# "doctype": "Purchase Taxes and Charges",
# "add_if_empty": True
# }
}, target_doc, postprocess)
return doc
def set_missing_values(source, target):
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
@frappe.whitelist()
def update_taxes(iva)
print "VALOR ARGS:", iva
imp = frappe.db.sql("""select percentage from `tabImpuesto`
where naming_series = %s""",
(iva), as_dict = 1)
if not imp:
frappe.throw(_("Impuesto {0} doesn't have a defined percentage").format(iva))
item = item[0]
ret ={
'total_retencion' : imp.percentage
}
return ret
```
#### File: nodux_sales_invoice/sales/sales_invoice.py
```python
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import _, throw
from frappe.utils import cint, flt
import frappe.defaults
class SalesInvoice(Document):
def before_save(self):
self.docstatus = 1
for item in self.items:
if item.item_code:
product = frappe.get_doc("Item", item.item_code)
if product.total<0:
frappe.throw(("El producto {0} no se encuentra disponible en stock").format(item.item_name))
elif product.total < item.qty:
frappe.throw(("No cuenta con suficiente stock del producto {0}").format(item.item_name))
else:
product.total = product.total-item.qty
product.save()
def get_item_details_sale(self, args=None, for_update=False):
item = frappe.db.sql("""select stock_uom, description, image, item_name,
list_price, list_price_with_tax, barcode, tax from `tabItem`
where name = %s
and disabled=0
and (end_of_life is null or end_of_life='0000-00-00' or end_of_life > %s)""",
(args.get('item_code'), nowdate()), as_dict = 1)
if not item:
frappe.throw(_("Item {0} is not active or end of life has been reached").format(args.get("item_code")))
item = item[0]
ret = {
'uom' : item.stock_uom,
'description' : item.description,
'item_name' : item.item_name,
'qty' : 1,
'barcode' : item.barcode,
'unit_price' : item.list_price,
'unit_price_with_tax' : item.list_price_with_tax,
'subtotal' : item.list_price_with_tax
}
return ret
def get_item_code_sale(self, args=None, for_update=False):
throw(_("START"))
if cint(self.definir_otra_regla):
throw(_("REGLA DEFINIDA"))
if self.pricing_rule:
rule = self.pricing_rule
porcentaje = frappe.db.get_value("Pricing Rule", {"title":("like rule")}, "margin_rate_or_amount")
item = frappe.db.sql("""select price_list_rate from `tabItem Price`
where item_code = %s""",
(args.get('item_code')), as_dict = 1)
if not item:
frappe.throw(_("Item {0} doesn't have a defined price").format(args.get("item_code")))
item = item[0]
precio_unit = item * (1 + porcentaje/100)
#item = item[0]
ret = {
"barcode" : 123,
"qty" : 1,
'rate' : precio_unit
# 'description' : item.description,
# 'item_name' : item.item_name,
# 'qty' : 1,
# 'barcode' : item.barcode,
# 'unit_price' : item.list_price,
# 'unit_price_with_tax' : item.list_price_with_tax,
# 'subtotal' : item.list_price_with_tax
}
return ret
else:
throw(_("Debe elegir una regla de precios"))
# item = frappe.db.sql("""select stock_uom, description, image, item_name,
# list_price, name, list_price_with_tax from `tabItem`
# where barcode = %s
# and disabled=0
# and (end_of_life is null or end_of_life='0000-00-00' or end_of_life > %s)""",
# (args.get('barcode'), nowdate()), as_dict = 1)
#
# if not item:
# frappe.throw(_("No existe producto con codigo de barra {0}").format(args.get("barcode")))
#
# item = item[0]
#
# ret = {
# 'uom' : item.stock_uom,
# 'description' : item.description,
# 'item_name' : item.item_name,
# 'item_code' : item.name,
# 'qty' : 1,
# 'unit_price' : item.list_price,
# 'unit_price_with_tax' : item.list_price_with_tax,
# 'subtotal' : item.list_price_with_tax
# }
#
# return ret
```
|
{
"source": "jessica-taylor/quipp2",
"score": 3
}
|
#### File: python/examples/factor_analysis.py
```python
from quipp import *
def run():
num_components = 2
point_dim = 5
ComponentsType = Vector(num_components, Double)
PointType = Vector(point_dim, Double)
get_point = rand_function(ComponentsType, PointType)
def sample():
components = [normal(0, 1) for i in range(num_components)]
return (components, get_point(components))
return sample
run_factor_analysis_example(run)
```
|
{
"source": "jessica-tu/jupyter",
"score": 2
}
|
#### File: model_selection/tests/test_search.py
```python
from collections.abc import Iterable, Sized
from io import StringIO
from itertools import chain, product
from functools import partial
import pickle
import sys
from types import GeneratorType
import re
import numpy as np
import scipy.sparse as sp
import pytest
from sklearn.utils.fixes import sp_version
from sklearn.utils._testing import assert_raises
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import assert_warns_message
from sklearn.utils._testing import assert_raise_message
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.base import clone
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import fit_grid_point
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
from sklearn.model_selection._search import BaseSearchCV
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.model_selection.tests.common import OneTimeSplitter
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier:
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert len(X) == len(Y)
self.classes_ = np.unique(Y)
return self
def predict(self, T):
return T.shape[0]
def transform(self, X):
return X + self.foo_param
def inverse_transform(self, X):
return X - self.foo_param
predict_proba = predict
predict_log_proba = predict
decision_function = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert list(grid) == [grid[i] for i in range(len(grid))]
@pytest.mark.parametrize("klass", [ParameterGrid,
partial(ParameterSampler, n_iter=10)])
@pytest.mark.parametrize(
"input, error_type, error_message",
[(0, TypeError, r'Parameter .* is not a dict or a list \(0\)'),
([{'foo': [0]}, 0], TypeError, r'Parameter .* is not a dict \(0\)'),
({'foo': 0}, TypeError, "Parameter.* value is not iterable .*"
r"\(key='foo', value=0\)")]
)
def test_validate_parameter_input(klass, input, error_type, error_message):
with pytest.raises(error_type, match=error_message):
klass(input)
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert isinstance(grid1, Iterable)
assert isinstance(grid1, Sized)
assert len(grid1) == 3
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert len(grid2) == 6
# loop to assert we can iterate over the grid multiple times
for i in range(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert (points ==
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert len(empty) == 1
assert list(empty) == [{}]
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert len(has_empty) == 4
assert list(has_empty) == [{'C': 1}, {'C': 10}, {}, {'C': .5}]
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert grid_search.best_estimator_.foo_param == 2
assert_array_equal(grid_search.cv_results_["param_foo_param"].data,
[1, 2, 3])
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
def test_grid_search_pipeline_steps():
# check that parameters that are estimators are cloned before fitting
pipe = Pipeline([('regressor', LinearRegression())])
param_grid = {'regressor': [LinearRegression(), Ridge()]}
grid_search = GridSearchCV(pipe, param_grid, cv=2)
grid_search.fit(X, y)
regressor_results = grid_search.cv_results_['param_regressor']
assert isinstance(regressor_results[0], LinearRegression)
assert isinstance(regressor_results[1], Ridge)
assert not hasattr(regressor_results[0], 'coef_')
assert not hasattr(regressor_results[1], 'coef_')
assert regressor_results[0] is not grid_search.best_estimator_
assert regressor_results[1] is not grid_search.best_estimator_
# check that we didn't modify the parameter grid that was passed
assert not hasattr(param_grid['regressor'][0], 'coef_')
assert not hasattr(param_grid['regressor'][1], 'coef_')
@pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV])
def test_SearchCV_with_fit_params(SearchCV):
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam', 'eggs'])
searcher = SearchCV(
clf, {'foo_param': [1, 2, 3]}, cv=2, error_score="raise"
)
# The CheckingClassifier generates an assertion error if
# a parameter is missing or has length != len(X).
err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen."
with pytest.raises(AssertionError, match=err_msg):
searcher.fit(X, y, spam=np.ones(10))
err_msg = "Fit parameter spam has length 1; expected"
with pytest.raises(AssertionError, match=err_msg):
searcher.fit(X, y, spam=np.ones(1), eggs=np.zeros(10))
searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10))
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert grid_search_no_score.best_params_ == grid_search.best_params_
# check that we can call score and that it gives the correct result
assert grid_search.score(X, y) == grid_search_no_score.score(X, y)
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc'
).fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = search_no_scoring.score(X, y)
score_accuracy = search_accuracy.score(X, y)
score_no_score_auc = search_no_score_method_auc.score(X, y)
score_auc = search_auc.score(X, y)
# ensure the test is sane
assert score_auc < 1.0
assert score_accuracy < 1.0
assert score_auc != score_accuracy
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_groups():
# Check if ValueError (when groups is None) propagates to GridSearchCV
# And also check if groups is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2),
GroupKFold(n_splits=3), GroupShuffleSplit()]
for cv in group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The 'groups' parameter should not be None.",
gs.fit, X, y)
gs.fit(X, y, groups=groups)
non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_classes__property():
# Test that classes_ property matches best_estimator_.classes_
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
Cs = [.1, 1, 10]
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
grid_search.fit(X, y)
assert_array_equal(grid_search.best_estimator_.classes_,
grid_search.classes_)
# Test that regressors do not have a classes_ attribute
grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]})
grid_search.fit(X, y)
assert not hasattr(grid_search, 'classes_')
# Test that the grid searcher has no classes_ attribute before it's fit
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
assert not hasattr(grid_search, 'classes_')
# Test that the grid searcher has no classes_ attribute without a refit
grid_search = GridSearchCV(LinearSVC(random_state=0),
{'C': Cs}, refit=False)
grid_search.fit(X, y)
assert not hasattr(grid_search, 'classes_')
def test_trivial_cv_results_attr():
# Test search over a "grid" with only one point.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]}, cv=3)
grid_search.fit(X, y)
assert hasattr(grid_search, "cv_results_")
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1, cv=3)
random_search.fit(X, y)
assert hasattr(grid_search, "cv_results_")
def test_no_refit():
# Test that GSCV can be used for model selection alone without refitting
clf = MockClassifier()
for scoring in [None, ['accuracy', 'precision']]:
grid_search = GridSearchCV(
clf, {'foo_param': [1, 2, 3]}, refit=False, cv=3
)
grid_search.fit(X, y)
assert not hasattr(grid_search, "best_estimator_") and \
hasattr(grid_search, "best_index_") and \
hasattr(grid_search, "best_params_")
# Make sure the functions predict/transform etc raise meaningful
# error messages
for fn_name in ('predict', 'predict_proba', 'predict_log_proba',
'transform', 'inverse_transform'):
assert_raise_message(NotFittedError,
('refit=False. %s is available only after '
'refitting on the best parameters'
% fn_name), getattr(grid_search, fn_name), X)
# Test that an invalid refit param raises appropriate error messages
for refit in ["", 5, True, 'recall', 'accuracy']:
assert_raise_message(ValueError, "For multi-metric scoring, the "
"parameter refit must be set to a scorer key",
GridSearchCV(clf, {}, refit=refit,
scoring={'acc': 'accuracy',
'prec': 'precision'}
).fit,
X, y)
def test_grid_search_error():
# Test that grid search will capture errors on data with different length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC(gamma='auto')
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_when_param_grid_includes_range():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = None
grid_search = GridSearchCV(clf, {'foo_param': range(1, 4)}, cv=3)
grid_search.fit(X, y)
assert grid_search.best_estimator_.foo_param == 2
def test_grid_search_bad_param_grid():
param_dict = {"C": 1}
clf = SVC(gamma='auto')
assert_raise_message(
ValueError,
"Parameter grid for parameter (C) needs to"
" be a list or numpy array, but got (<class 'int'>)."
" Single values need to be wrapped in a list"
" with one element.",
GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a non-empty sequence.",
GridSearchCV, clf, param_dict)
param_dict = {"C": "1,2,3"}
clf = SVC(gamma='auto')
assert_raise_message(
ValueError,
"Parameter grid for parameter (C) needs to"
" be a list or numpy array, but got (<class 'str'>)."
" Single values need to be wrapped in a list"
" with one element.",
GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones((3, 2))}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert np.mean(y_pred == y_pred2) >= .9
assert C == C2
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert C == C2
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert C == C3
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert cv.best_score_ >= 0
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert np.mean(y_pred == y_test) >= 0
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert not hasattr(self, 'has_been_fit_')
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_refit_callable():
"""
Test refit=callable, which adds flexibility in identifying the
"best" estimator.
"""
def refit_callable(cv_results):
"""
A dummy function tests `refit=callable` interface.
Return the index of a model that has the least
`mean_test_score`.
"""
# Fit a dummy clf with `refit=True` to get a list of keys in
# clf.cv_results_.
X, y = make_classification(n_samples=100, n_features=4,
random_state=42)
clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]},
scoring='precision', refit=True)
clf.fit(X, y)
# Ensure that `best_index_ != 0` for this dummy clf
assert clf.best_index_ != 0
# Assert every key matches those in `cv_results`
for key in clf.cv_results_.keys():
assert key in cv_results
return cv_results['mean_test_score'].argmin()
X, y = make_classification(n_samples=100, n_features=4,
random_state=42)
clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]},
scoring='precision', refit=refit_callable)
clf.fit(X, y)
assert clf.best_index_ == 0
# Ensure `best_score_` is disabled when using `refit=callable`
assert not hasattr(clf, 'best_score_')
def test_refit_callable_invalid_type():
"""
Test implementation catches the errors when 'best_index_' returns an
invalid result.
"""
def refit_callable_invalid_type(cv_results):
"""
A dummy function tests when returned 'best_index_' is not integer.
"""
return None
X, y = make_classification(n_samples=100, n_features=4,
random_state=42)
clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.1, 1]},
scoring='precision', refit=refit_callable_invalid_type)
with pytest.raises(TypeError,
match='best_index_ returned is not an integer'):
clf.fit(X, y)
@pytest.mark.parametrize('out_bound_value', [-1, 2])
@pytest.mark.parametrize('search_cv', [RandomizedSearchCV, GridSearchCV])
def test_refit_callable_out_bound(out_bound_value, search_cv):
"""
Test implementation catches the errors when 'best_index_' returns an
out of bound result.
"""
def refit_callable_out_bound(cv_results):
"""
A dummy function tests when returned 'best_index_' is out of bounds.
"""
return out_bound_value
X, y = make_classification(n_samples=100, n_features=4,
random_state=42)
clf = search_cv(LinearSVC(random_state=42), {'C': [0.1, 1]},
scoring='precision', refit=refit_callable_out_bound)
with pytest.raises(IndexError, match='best_index_ index out of range'):
clf.fit(X, y)
def test_refit_callable_multi_metric():
"""
Test refit=callable in multiple metric evaluation setting
"""
def refit_callable(cv_results):
"""
A dummy function tests `refit=callable` interface.
Return the index of a model that has the least
`mean_test_prec`.
"""
assert 'mean_test_prec' in cv_results
return cv_results['mean_test_prec'].argmin()
X, y = make_classification(n_samples=100, n_features=4,
random_state=42)
scoring = {'Accuracy': make_scorer(accuracy_score), 'prec': 'precision'}
clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]},
scoring=scoring, refit=refit_callable)
clf.fit(X, y)
assert clf.best_index_ == 0
# Ensure `best_score_` is disabled when using `refit=callable`
assert not hasattr(clf, 'best_score_')
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(
check_X=check_X, check_y=check_y, methods_to_check=["fit"],
)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert hasattr(grid_search, "cv_results_")
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(
check_X=lambda x: isinstance(x, list), methods_to_check=["fit"],
)
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert hasattr(grid_search, "cv_results_")
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(
check_y=lambda x: isinstance(x, list), methods_to_check=["fit"],
)
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert hasattr(grid_search, "cv_results_")
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
def check_df(x):
return isinstance(x, InputFeatureType)
def check_series(x):
return isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert hasattr(grid_search, "cv_results_")
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(n_samples=50, random_state=0)
km = KMeans(random_state=0, init="random", n_init=1)
# Multi-metric evaluation unsupervised
scoring = ['adjusted_rand_score', 'fowlkes_mallows_score']
for refit in ['adjusted_rand_score', 'fowlkes_mallows_score']:
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring=scoring, refit=refit)
grid_search.fit(X, y)
# Both ARI and FMS can find the right number :)
assert grid_search.best_params_["n_clusters"] == 3
# Single metric evaluation unsupervised
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='fowlkes_mallows_score')
grid_search.fit(X, y)
assert grid_search.best_params_["n_clusters"] == 3
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert grid_search.best_params_["n_clusters"] == 4
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert search.best_params_['bandwidth'] == .1
assert search.best_score_ == 42
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert len(samples) == 10
for sample in samples:
assert sample["kernel"] in ["rbf", "linear"]
assert 0 <= sample["C"] <= 1
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert [x for x in sampler] == [x for x in sampler]
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert [x for x in sampler] == [x for x in sampler]
def check_cv_results_array_types(search, param_keys, score_keys):
# Check if the search `cv_results`'s array are of correct types
cv_results = search.cv_results_
assert all(isinstance(cv_results[param], np.ma.MaskedArray)
for param in param_keys)
assert all(cv_results[key].dtype == object for key in param_keys)
assert not any(isinstance(cv_results[key], np.ma.MaskedArray)
for key in score_keys)
assert all(cv_results[key].dtype == np.float64
for key in score_keys if not key.startswith('rank'))
scorer_keys = search.scorer_.keys() if search.multimetric_ else ['score']
for key in scorer_keys:
assert cv_results['rank_test_%s' % key].dtype == np.int32
def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand):
# Test the search.cv_results_ contains all the required results
assert_array_equal(sorted(cv_results.keys()),
sorted(param_keys + score_keys + ('params',)))
assert all(cv_results[key].shape == (n_cand,)
for key in param_keys + score_keys)
def test_grid_search_cv_results():
X, y = make_classification(n_samples=50, n_features=4,
random_state=42)
n_splits = 3
n_grid_points = 6
params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]),
dict(kernel=['poly', ], degree=[1, 2])]
param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_candidates = n_grid_points
search = GridSearchCV(SVC(), cv=n_splits, param_grid=params,
return_train_score=True)
search.fit(X, y)
cv_results = search.cv_results_
# Check if score and timing are reasonable
assert all(cv_results['rank_test_score'] >= 1)
assert (all(cv_results[k] >= 0) for k in score_keys
if k != 'rank_test_score')
assert (all(cv_results[k] <= 1) for k in score_keys
if 'time' not in k and
k != 'rank_test_score')
# Check cv_results structure
check_cv_results_array_types(search, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates)
# Check masking
cv_results = search.cv_results_
n_candidates = len(search.cv_results_['params'])
assert all((cv_results['param_C'].mask[i] and
cv_results['param_gamma'].mask[i] and
not cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'linear')
assert all((not cv_results['param_C'].mask[i] and
not cv_results['param_gamma'].mask[i] and
cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'rbf')
def test_random_search_cv_results():
X, y = make_classification(n_samples=50, n_features=4, random_state=42)
n_splits = 3
n_search_iter = 30
params = [{'kernel': ['rbf'], 'C': expon(scale=10),
'gamma': expon(scale=0.1)},
{'kernel': ['poly'], 'degree': [2, 3]}]
param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_cand = n_search_iter
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter,
cv=n_splits,
param_distributions=params,
return_train_score=True)
search.fit(X, y)
cv_results = search.cv_results_
# Check results structure
check_cv_results_array_types(search, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_cand)
n_candidates = len(search.cv_results_['params'])
assert all((cv_results['param_C'].mask[i] and
cv_results['param_gamma'].mask[i] and
not cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'linear')
assert all((not cv_results['param_C'].mask[i] and
not cv_results['param_gamma'].mask[i] and
cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'rbf')
@pytest.mark.parametrize(
"SearchCV, specialized_params",
[(GridSearchCV, {'param_grid': {'C': [1, 10]}}),
(RandomizedSearchCV,
{'param_distributions': {'C': [1, 10]}, 'n_iter': 2})]
)
def test_search_default_iid(SearchCV, specialized_params):
# Test the IID parameter TODO: Clearly this test does something else???
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
common_params = {'estimator': SVC(), 'cv': cv,
'return_train_score': True}
search = SearchCV(**common_params, **specialized_params)
search.fit(X, y)
test_cv_scores = np.array(
[search.cv_results_['split%d_test_score' % s][0]
for s in range(search.n_splits_)]
)
test_mean = search.cv_results_['mean_test_score'][0]
test_std = search.cv_results_['std_test_score'][0]
train_cv_scores = np.array(
[search.cv_results_['split%d_train_score' % s][0]
for s in range(search.n_splits_)]
)
train_mean = search.cv_results_['mean_train_score'][0]
train_std = search.cv_results_['std_train_score'][0]
assert search.cv_results_['param_C'][0] == 1
# scores are the same as above
assert_allclose(test_cv_scores, [1, 1. / 3.])
assert_allclose(train_cv_scores, [1, 1])
# Unweighted mean/std is used
assert test_mean == pytest.approx(np.mean(test_cv_scores))
assert test_std == pytest.approx(np.std(test_cv_scores))
# For the train scores, we do not take a weighted mean irrespective of
# i.i.d. or not
assert train_mean == pytest.approx(1)
assert train_std == pytest.approx(0)
def test_grid_search_cv_results_multimetric():
X, y = make_classification(n_samples=50, n_features=4, random_state=42)
n_splits = 3
params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]),
dict(kernel=['poly', ], degree=[1, 2])]
grid_searches = []
for scoring in ({'accuracy': make_scorer(accuracy_score),
'recall': make_scorer(recall_score)},
'accuracy', 'recall'):
grid_search = GridSearchCV(SVC(), cv=n_splits,
param_grid=params,
scoring=scoring, refit=False)
grid_search.fit(X, y)
grid_searches.append(grid_search)
compare_cv_results_multimetric_with_single(*grid_searches)
def test_random_search_cv_results_multimetric():
X, y = make_classification(n_samples=50, n_features=4, random_state=42)
n_splits = 3
n_search_iter = 30
# Scipy 0.12's stats dists do not accept seed, hence we use param grid
params = dict(C=np.logspace(-4, 1, 3),
gamma=np.logspace(-5, 0, 3, base=0.1))
for refit in (True, False):
random_searches = []
for scoring in (('accuracy', 'recall'), 'accuracy', 'recall'):
# If True, for multi-metric pass refit='accuracy'
if refit:
probability = True
refit = 'accuracy' if isinstance(scoring, tuple) else refit
else:
probability = False
clf = SVC(probability=probability, random_state=42)
random_search = RandomizedSearchCV(clf, n_iter=n_search_iter,
cv=n_splits,
param_distributions=params,
scoring=scoring,
refit=refit, random_state=0)
random_search.fit(X, y)
random_searches.append(random_search)
compare_cv_results_multimetric_with_single(*random_searches)
compare_refit_methods_when_refit_with_acc(
random_searches[0], random_searches[1], refit)
def compare_cv_results_multimetric_with_single(
search_multi, search_acc, search_rec):
"""Compare multi-metric cv_results with the ensemble of multiple
single metric cv_results from single metric grid/random search"""
assert search_multi.multimetric_
assert_array_equal(sorted(search_multi.scorer_),
('accuracy', 'recall'))
cv_results_multi = search_multi.cv_results_
cv_results_acc_rec = {re.sub('_score$', '_accuracy', k): v
for k, v in search_acc.cv_results_.items()}
cv_results_acc_rec.update({re.sub('_score$', '_recall', k): v
for k, v in search_rec.cv_results_.items()})
# Check if score and timing are reasonable, also checks if the keys
# are present
assert all((np.all(cv_results_multi[k] <= 1) for k in (
'mean_score_time', 'std_score_time', 'mean_fit_time',
'std_fit_time')))
# Compare the keys, other than time keys, among multi-metric and
# single metric grid search results. np.testing.assert_equal performs a
# deep nested comparison of the two cv_results dicts
np.testing.assert_equal({k: v for k, v in cv_results_multi.items()
if not k.endswith('_time')},
{k: v for k, v in cv_results_acc_rec.items()
if not k.endswith('_time')})
def compare_refit_methods_when_refit_with_acc(search_multi, search_acc, refit):
"""Compare refit multi-metric search methods with single metric methods"""
assert search_acc.refit == refit
if refit:
assert search_multi.refit == 'accuracy'
else:
assert not search_multi.refit
return # search cannot predict/score without refit
X, y = make_blobs(n_samples=100, n_features=4, random_state=42)
for method in ('predict', 'predict_proba', 'predict_log_proba'):
assert_almost_equal(getattr(search_multi, method)(X),
getattr(search_acc, method)(X))
assert_almost_equal(search_multi.score(X, y), search_acc.score(X, y))
for key in ('best_index_', 'best_score_', 'best_params_'):
assert getattr(search_multi, key) == getattr(search_acc, key)
def test_search_cv_results_rank_tie_breaking():
X, y = make_blobs(n_samples=50, random_state=42)
# The two C values are close enough to give similar models
# which would result in a tie of their mean cv-scores
param_grid = {'C': [1, 1.001, 0.001]}
grid_search = GridSearchCV(SVC(), param_grid=param_grid,
return_train_score=True)
random_search = RandomizedSearchCV(SVC(), n_iter=3,
param_distributions=param_grid,
return_train_score=True)
for search in (grid_search, random_search):
search.fit(X, y)
cv_results = search.cv_results_
# Check tie breaking strategy -
# Check that there is a tie in the mean scores between
# candidates 1 and 2 alone
assert_almost_equal(cv_results['mean_test_score'][0],
cv_results['mean_test_score'][1])
assert_almost_equal(cv_results['mean_train_score'][0],
cv_results['mean_train_score'][1])
assert not np.allclose(cv_results['mean_test_score'][1],
cv_results['mean_test_score'][2])
assert not np.allclose(cv_results['mean_train_score'][1],
cv_results['mean_train_score'][2])
# 'min' rank should be assigned to the tied candidates
assert_almost_equal(search.cv_results_['rank_test_score'], [1, 1, 3])
def test_search_cv_results_none_param():
X, y = [[1], [2], [3], [4], [5]], [0, 0, 0, 0, 1]
estimators = (DecisionTreeRegressor(), DecisionTreeClassifier())
est_parameters = {"random_state": [0, None]}
cv = KFold()
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv,
).fit(X, y)
assert_array_equal(grid_search.cv_results_['param_random_state'],
[0, None])
@ignore_warnings()
def test_search_cv_timing():
svc = LinearSVC(random_state=0)
X = [[1, ], [2, ], [3, ], [4, ]]
y = [0, 1, 1, 0]
gs = GridSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0)
rs = RandomizedSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0, n_iter=2)
for search in (gs, rs):
search.fit(X, y)
for key in ['mean_fit_time', 'std_fit_time']:
# NOTE The precision of time.time in windows is not high
# enough for the fit/score times to be non-zero for trivial X and y
assert np.all(search.cv_results_[key] >= 0)
assert np.all(search.cv_results_[key] < 1)
for key in ['mean_score_time', 'std_score_time']:
assert search.cv_results_[key][1] >= 0
assert search.cv_results_[key][0] == 0.0
assert np.all(search.cv_results_[key] < 1)
assert hasattr(search, "refit_time_")
assert isinstance(search.refit_time_, float)
assert search.refit_time_ >= 0
def test_grid_search_correct_score_results():
# test that correct scores are used
n_splits = 3
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score, cv=n_splits)
cv_results = grid_search.fit(X, y).cv_results_
# Test scorer names
result_keys = list(cv_results.keys())
expected_keys = (("mean_test_score", "rank_test_score") +
tuple("split%d_test_score" % cv_i
for cv_i in range(n_splits)))
assert all(np.in1d(expected_keys, result_keys))
cv = StratifiedKFold(n_splits=n_splits)
n_splits = grid_search.n_splits_
for candidate_i, C in enumerate(Cs):
clf.set_params(C=C)
cv_scores = np.array(
list(grid_search.cv_results_['split%d_test_score'
% s][candidate_i]
for s in range(n_splits)))
for i, (train, test) in enumerate(cv.split(X, y)):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, cv_scores[i])
# FIXME remove test_fit_grid_point as the function will be removed on 0.25
@ignore_warnings(category=FutureWarning)
def test_fit_grid_point():
X, y = make_classification(random_state=0)
cv = StratifiedKFold()
svc = LinearSVC(random_state=0)
scorer = make_scorer(accuracy_score)
for params in ({'C': 0.1}, {'C': 0.01}, {'C': 0.001}):
for train, test in cv.split(X, y):
this_scores, this_params, n_test_samples = fit_grid_point(
X, y, clone(svc), params, train, test,
scorer, verbose=False)
est = clone(svc).set_params(**params)
est.fit(X[train], y[train])
expected_score = scorer(est, X[test], y[test])
# Test the return values of fit_grid_point
assert_almost_equal(this_scores, expected_score)
assert params == this_params
assert n_test_samples == test.size
# Should raise an error upon multimetric scorer
assert_raise_message(ValueError, "For evaluating multiple scores, use "
"sklearn.model_selection.cross_validate instead.",
fit_grid_point, X, y, svc, params, train, test,
{'score': scorer}, verbose=True)
# FIXME remove test_fit_grid_point_deprecated as
# fit_grid_point will be removed on 0.25
def test_fit_grid_point_deprecated():
X, y = make_classification(random_state=0)
svc = LinearSVC(random_state=0)
scorer = make_scorer(accuracy_score)
msg = ("fit_grid_point is deprecated in version 0.23 "
"and will be removed in version 0.25")
params = {'C': 0.1}
train, test = next(StratifiedKFold().split(X, y))
with pytest.warns(FutureWarning, match=msg):
fit_grid_point(X, y, svc, params, train, test, scorer, verbose=False)
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True, cv=3)
grid_search.fit(X, y)
grid_search_pickled = pickle.loads(pickle.dumps(grid_search))
assert_array_almost_equal(grid_search.predict(X),
grid_search_pickled.predict(X))
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3, cv=3)
random_search.fit(X, y)
random_search_pickled = pickle.loads(pickle.dumps(random_search))
assert_array_almost_equal(random_search.predict(X),
random_search_pickled.predict(X))
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold()
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
res_params = grid_search.cv_results_['params']
for cand_i in range(len(res_params)):
est.set_params(**res_params[cand_i])
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(
correct_score,
grid_search.cv_results_['split%d_test_score' % i][cand_i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
res_params = random_search.cv_results_['params']
for cand_i in range(len(res_params)):
est.set_params(**res_params[cand_i])
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(
correct_score,
random_search.cv_results_['split%d_test_score'
% i][cand_i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert not hasattr(gs, "predict_proba")
def test_grid_search_allows_nans():
# Test GridSearchCV with SimpleImputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', SimpleImputer(strategy='mean', missing_values=np.nan)),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def score(self, X=None, Y=None):
return 0.
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
n_candidates = len(gs.cv_results_['params'])
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
def get_cand_scores(i):
return np.array(list(gs.cv_results_['split%d_test_score' % s][i]
for s in range(gs.n_splits_)))
assert all((np.all(get_cand_scores(cand_i) == 0.0)
for cand_i in range(n_candidates)
if gs.cv_results_['param_parameter'][cand_i] ==
FailingClassifier.FAILING_PARAMETER))
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
n_candidates = len(gs.cv_results_['params'])
assert all(np.all(np.isnan(get_cand_scores(cand_i)))
for cand_i in range(n_candidates)
if gs.cv_results_['param_parameter'][cand_i] ==
FailingClassifier.FAILING_PARAMETER)
ranks = gs.cv_results_['rank_test_score']
# Check that succeeded estimators have lower ranks
assert ranks[0] <= 2 and ranks[1] <= 2
# Check that failed estimator has the highest rank
assert ranks[clf.FAILING_PARAMETER] == 3
assert gs.best_index_ != clf.FAILING_PARAMETER
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise warning if n_iter is bigger than total parameter space
params = [{'first': [0, 1], 'second': ['a', 'b', 'c']},
{'third': ['two', 'values']}]
sampler = ParameterSampler(params, n_iter=9)
n_iter = 9
grid_size = 8
expected_warning = ('The total space of parameters %d is smaller '
'than n_iter=%d. Running %d iterations. For '
'exhaustive searches, use GridSearchCV.'
% (grid_size, n_iter, grid_size))
assert_warns_message(UserWarning, expected_warning,
list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=8)
samples = list(sampler)
assert len(samples) == 8
for values in ParameterGrid(params):
assert values in samples
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert len(samples) == 99
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert len(set(hashable_samples)) == 99
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert len(samples) == 7
def test_stochastic_gradient_loss_param():
# Make sure the predict_proba works when loss is specified
# as one of the parameters in the param_grid.
param_grid = {
'loss': ['log'],
}
X = np.arange(24).reshape(6, -1)
y = [0, 0, 0, 1, 1, 1]
clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),
param_grid=param_grid, cv=3)
# When the estimator is not fitted, `predict_proba` is not available as the
# loss is 'hinge'.
assert not hasattr(clf, "predict_proba")
clf.fit(X, y)
clf.predict_proba(X)
clf.predict_log_proba(X)
# Make sure `predict_proba` is not available when setting loss=['hinge']
# in param_grid
param_grid = {
'loss': ['hinge'],
}
clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),
param_grid=param_grid, cv=3)
assert not hasattr(clf, "predict_proba")
clf.fit(X, y)
assert not hasattr(clf, "predict_proba")
def test_search_train_scores_set_to_false():
X = np.arange(6).reshape(6, -1)
y = [0, 0, 0, 1, 1, 1]
clf = LinearSVC(random_state=0)
gs = GridSearchCV(clf, param_grid={'C': [0.1, 0.2]}, cv=3)
gs.fit(X, y)
def test_grid_search_cv_splits_consistency():
# Check if a one time iterable is accepted as a cv parameter.
n_samples = 100
n_splits = 5
X, y = make_classification(n_samples=n_samples, random_state=0)
gs = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=OneTimeSplitter(n_splits=n_splits,
n_samples=n_samples),
return_train_score=True)
gs.fit(X, y)
gs2 = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=KFold(n_splits=n_splits), return_train_score=True)
gs2.fit(X, y)
# Give generator as a cv parameter
assert isinstance(KFold(n_splits=n_splits,
shuffle=True, random_state=0).split(X, y),
GeneratorType)
gs3 = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=KFold(n_splits=n_splits, shuffle=True,
random_state=0).split(X, y),
return_train_score=True)
gs3.fit(X, y)
gs4 = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=KFold(n_splits=n_splits, shuffle=True,
random_state=0), return_train_score=True)
gs4.fit(X, y)
def _pop_time_keys(cv_results):
for key in ('mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time'):
cv_results.pop(key)
return cv_results
# Check if generators are supported as cv and
# that the splits are consistent
np.testing.assert_equal(_pop_time_keys(gs3.cv_results_),
_pop_time_keys(gs4.cv_results_))
# OneTimeSplitter is a non-re-entrant cv where split can be called only
# once if ``cv.split`` is called once per param setting in GridSearchCV.fit
# the 2nd and 3rd parameter will not be evaluated as no train/test indices
# will be generated for the 2nd and subsequent cv.split calls.
# This is a check to make sure cv.split is not called once per param
# setting.
np.testing.assert_equal({k: v for k, v in gs.cv_results_.items()
if not k.endswith('_time')},
{k: v for k, v in gs2.cv_results_.items()
if not k.endswith('_time')})
# Check consistency of folds across the parameters
gs = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.1, 0.2, 0.2]},
cv=KFold(n_splits=n_splits, shuffle=True),
return_train_score=True)
gs.fit(X, y)
# As the first two param settings (C=0.1) and the next two param
# settings (C=0.2) are same, the test and train scores must also be
# same as long as the same train/test indices are generated for all
# the cv splits, for both param setting
for score_type in ('train', 'test'):
per_param_scores = {}
for param_i in range(4):
per_param_scores[param_i] = list(
gs.cv_results_['split%d_%s_score' % (s, score_type)][param_i]
for s in range(5))
assert_array_almost_equal(per_param_scores[0],
per_param_scores[1])
assert_array_almost_equal(per_param_scores[2],
per_param_scores[3])
def test_transform_inverse_transform_round_trip():
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3)
grid_search.fit(X, y)
X_round_trip = grid_search.inverse_transform(grid_search.transform(X))
assert_array_equal(X, X_round_trip)
def test_custom_run_search():
def check_results(results, gscv):
exp_results = gscv.cv_results_
assert sorted(results.keys()) == sorted(exp_results)
for k in results:
if not k.endswith('_time'):
# XXX: results['params'] is a list :|
results[k] = np.asanyarray(results[k])
if results[k].dtype.kind == 'O':
assert_array_equal(exp_results[k], results[k],
err_msg='Checking ' + k)
else:
assert_allclose(exp_results[k], results[k],
err_msg='Checking ' + k)
def fit_grid(param_grid):
return GridSearchCV(clf, param_grid,
return_train_score=True).fit(X, y)
class CustomSearchCV(BaseSearchCV):
def __init__(self, estimator, **kwargs):
super().__init__(estimator, **kwargs)
def _run_search(self, evaluate):
results = evaluate([{'max_depth': 1}, {'max_depth': 2}])
check_results(results, fit_grid({'max_depth': [1, 2]}))
results = evaluate([{'min_samples_split': 5},
{'min_samples_split': 10}])
check_results(results, fit_grid([{'max_depth': [1, 2]},
{'min_samples_split': [5, 10]}]))
# Using regressor to make sure each score differs
clf = DecisionTreeRegressor(random_state=0)
X, y = make_classification(n_samples=100, n_informative=4,
random_state=0)
mycv = CustomSearchCV(clf, return_train_score=True).fit(X, y)
gscv = fit_grid([{'max_depth': [1, 2]},
{'min_samples_split': [5, 10]}])
results = mycv.cv_results_
check_results(results, gscv)
for attr in dir(gscv):
if (attr[0].islower() and attr[-1:] == '_' and
attr not in {'cv_results_', 'best_estimator_',
'refit_time_', 'classes_'}):
assert getattr(gscv, attr) == getattr(mycv, attr), \
"Attribute %s not equal" % attr
def test__custom_fit_no_run_search():
class NoRunSearchSearchCV(BaseSearchCV):
def __init__(self, estimator, **kwargs):
super().__init__(estimator, **kwargs)
def fit(self, X, y=None, groups=None, **fit_params):
return self
# this should not raise any exceptions
NoRunSearchSearchCV(SVC()).fit(X, y)
class BadSearchCV(BaseSearchCV):
def __init__(self, estimator, **kwargs):
super().__init__(estimator, **kwargs)
with pytest.raises(NotImplementedError,
match="_run_search not implemented."):
# this should raise a NotImplementedError
BadSearchCV(SVC()).fit(X, y)
def test_empty_cv_iterator_error():
# Use global X, y
# create cv
cv = KFold(n_splits=3).split(X)
# pop all of it, this should cause the expected ValueError
[u for u in cv]
# cv is empty now
train_size = 100
ridge = RandomizedSearchCV(Ridge(), {'alpha': [1e-3, 1e-2, 1e-1]},
cv=cv, n_jobs=4)
# assert that this raises an error
with pytest.raises(ValueError,
match='No fits were performed. '
'Was the CV iterator empty\\? '
'Were there no candidates\\?'):
ridge.fit(X[:train_size], y[:train_size])
def test_random_search_bad_cv():
# Use global X, y
class BrokenKFold(KFold):
def get_n_splits(self, *args, **kw):
return 1
# create bad cv
cv = BrokenKFold(n_splits=3)
train_size = 100
ridge = RandomizedSearchCV(Ridge(), {'alpha': [1e-3, 1e-2, 1e-1]},
cv=cv, n_jobs=4)
# assert that this raises an error
with pytest.raises(ValueError,
match='cv.split and cv.get_n_splits returned '
'inconsistent results. Expected \\d+ '
'splits, got \\d+'):
ridge.fit(X[:train_size], y[:train_size])
def test_n_features_in():
# make sure grid search and random search delegate n_features_in to the
# best estimator
n_features = 4
X, y = make_classification(n_features=n_features)
gbdt = HistGradientBoostingClassifier()
param_grid = {'max_iter': [3, 4]}
gs = GridSearchCV(gbdt, param_grid)
rs = RandomizedSearchCV(gbdt, param_grid, n_iter=1)
assert not hasattr(gs, 'n_features_in_')
assert not hasattr(rs, 'n_features_in_')
gs.fit(X, y)
rs.fit(X, y)
assert gs.n_features_in_ == n_features
assert rs.n_features_in_ == n_features
def test_search_cv__pairwise_property_delegated_to_base_estimator():
"""
Test implementation of BaseSearchCV has the _pairwise property
which matches the _pairwise property of its estimator.
This test make sure _pairwise is delegated to the base estimator.
Non-regression test for issue #13920.
"""
est = BaseEstimator()
attr_message = "BaseSearchCV _pairwise property must match estimator"
for _pairwise_setting in [True, False]:
setattr(est, '_pairwise', _pairwise_setting)
cv = GridSearchCV(est, {'n_neighbors': [10]})
assert _pairwise_setting == cv._pairwise, attr_message
def test_search_cv__pairwise_property_equivalence_of_precomputed():
"""
Test implementation of BaseSearchCV has the _pairwise property
which matches the _pairwise property of its estimator.
This test ensures the equivalence of 'precomputed'.
Non-regression test for issue #13920.
"""
n_samples = 50
n_splits = 2
X, y = make_classification(n_samples=n_samples, random_state=0)
grid_params = {'n_neighbors': [10]}
# defaults to euclidean metric (minkowski p = 2)
clf = KNeighborsClassifier()
cv = GridSearchCV(clf, grid_params, cv=n_splits)
cv.fit(X, y)
preds_original = cv.predict(X)
# precompute euclidean metric to validate _pairwise is working
X_precomputed = euclidean_distances(X)
clf = KNeighborsClassifier(metric='precomputed')
cv = GridSearchCV(clf, grid_params, cv=n_splits)
cv.fit(X_precomputed, y)
preds_precomputed = cv.predict(X_precomputed)
attr_message = "GridSearchCV not identical with precomputed metric"
assert (preds_original == preds_precomputed).all(), attr_message
@pytest.mark.parametrize(
"SearchCV, param_search",
[(GridSearchCV, {'a': [0.1, 0.01]}),
(RandomizedSearchCV, {'a': uniform(1, 3)})]
)
def test_scalar_fit_param(SearchCV, param_search):
# unofficially sanctioned tolerance for scalar values in fit_params
# non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/15805
class TestEstimator(BaseEstimator, ClassifierMixin):
def __init__(self, a=None):
self.a = a
def fit(self, X, y, r=None):
self.r_ = r
def predict(self, X):
return np.zeros(shape=(len(X)))
model = SearchCV(TestEstimator(), param_search)
X, y = make_classification(random_state=42)
model.fit(X, y, r=42)
assert model.best_estimator_.r_ == 42
@pytest.mark.parametrize(
"SearchCV, param_search",
[(GridSearchCV, {'alpha': [0.1, 0.01]}),
(RandomizedSearchCV, {'alpha': uniform(0.01, 0.1)})]
)
def test_scalar_fit_param_compat(SearchCV, param_search):
# check support for scalar values in fit_params, for instance in LightGBM
# that do not exactly respect the scikit-learn API contract but that we do
# not want to break without an explicit deprecation cycle and API
# recommendations for implementing early stopping with a user provided
# validation set. non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/15805
X_train, X_valid, y_train, y_valid = train_test_split(
*make_classification(random_state=42), random_state=42
)
class _FitParamClassifier(SGDClassifier):
def fit(self, X, y, sample_weight=None, tuple_of_arrays=None,
scalar_param=None, callable_param=None):
super().fit(X, y, sample_weight=sample_weight)
assert scalar_param > 0
assert callable(callable_param)
# The tuple of arrays should be preserved as tuple.
assert isinstance(tuple_of_arrays, tuple)
assert tuple_of_arrays[0].ndim == 2
assert tuple_of_arrays[1].ndim == 1
return self
def _fit_param_callable():
pass
model = SearchCV(
_FitParamClassifier(), param_search
)
# NOTE: `fit_params` should be data dependent (e.g. `sample_weight`) which
# is not the case for the following parameters. But this abuse is common in
# popular third-party libraries and we should tolerate this behavior for
# now and be careful not to break support for those without following
# proper deprecation cycle.
fit_params = {
'tuple_of_arrays': (X_valid, y_valid),
'callable_param': _fit_param_callable,
'scalar_param': 42,
}
model.fit(X_train, y_train, **fit_params)
```
|
{
"source": "jessicatwes/ME-classifier",
"score": 3
}
|
#### File: jessicatwes/ME-classifier/2_count_distances.py
```python
import os
import sys
import csv
def count_distances(input_file_name):
input_file_name = open(input_file_name)
input_csv = csv.reader(input_file_name, delimiter=',')
line = None
counts = [0 for _ in range(2999)]
next(input_csv) # remove header
for row in input_csv:
counts[1499+int(row[1])] += int(row[2])
return counts
print(counts)
def write_distance_counts(output_path, all_counts):
output_file_name = os.path.join(output_path, 'md_table.csv')
if os.path.exists(output_file_name):
os.remove(output_file_name)
output_file = open(output_file_name, 'w')
header = ['ID', 'SRZ'] + [str(i) for i in range(-1499,1499)]
output_file.write(','.join(header) + '\n')
for counts in all_counts:
print(counts)
output_file.write(','.join(counts) + '\n')
output_file.close()
def count_all_distances(input_path):
all_counts = []
for srr_folder in os.listdir(input_path):
srr_path = os.path.join(input_path, srr_folder)
if not os.path.isdir(srr_path):
continue
for motif_folder in os.listdir(srr_path):
motif_path = os.path.join(srr_path, motif_folder)
if not os.path.isdir(motif_path):
continue
print('counting calls for', srr_folder, 'and', motif_folder)
input_file_name = os.path.join(motif_path, 'raw_barcode_vals.csv')
if not os.path.isfile(input_file_name):
continue
counts = count_distances(input_file_name)
all_counts.append([motif_folder, srr_folder] + [str(i) for i in counts])
return all_counts
if __name__ == '__main__':
input_path = sys.argv[1]
output_path = sys.argv[2]
all_counts = count_all_distances(input_path)
write_distance_counts(output_path, all_counts)
```
|
{
"source": "JessicaUppal/biodiversity-interactive-web-visualisation",
"score": 3
}
|
#### File: JessicaUppal/biodiversity-interactive-web-visualisation/app.py
```python
import pandas as pd
import numpy as np
from sqlalchemy.ext.automap import automap_base
from flask_sqlalchemy import SQLAlchemy
from flask import Flask, jsonify, render_template
# Setup flask
app = Flask(__name__)
# Establish connection
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///db/bellybutton.sqlite"
db = SQLAlchemy(app)
Base = automap_base()
Base.prepare(db.engine, reflect=True)
Samples_Metadata = Base.classes.sample_metadata
Samples = Base.classes.samples
# Set initial route
@app.route("/")
def index():
# Return to homepage
return render_template("index.html")
# Set route
@app.route("/names")
def names():
# Perform SQL query
stmt = db.session.query(Samples).statement
df = pd.read_sql_query(stmt, db.session.bind)
# Get complete names list
return jsonify(list(df.columns)[2:])
# Set route
@app.route("/metadata/<sample>")
def sample_metadata(sample):
sel = [
Samples_Metadata.sample,
Samples_Metadata.ETHNICITY,
Samples_Metadata.GENDER,
Samples_Metadata.AGE,
Samples_Metadata.LOCATION,
Samples_Metadata.BBTYPE,
Samples_Metadata.WFREQ,
]
results = db.session.query(*sel).filter(Samples_Metadata.sample == sample).all()
# Create information dictionary
sample_metadata = {}
for result in results:
sample_metadata["sample"] = result[0]
sample_metadata["ETHNICITY"] = result[1]
sample_metadata["GENDER"] = result[2]
sample_metadata["AGE"] = result[3]
sample_metadata["LOCATION"] = result[4]
sample_metadata["BBTYPE"] = result[5]
sample_metadata["WFREQ"] = result[6]
print(sample_metadata)
return jsonify(sample_metadata)
#Set route
@app.route("/samples/<sample>")
def samples(sample):
stmt = db.session.query(Samples).statement
df = pd.read_sql_query(stmt, db.session.bind)
# Filter data values above 1
sample_data = df.loc[df[sample] > 1, ["otu_id", "otu_label", sample]]
sample_data.sort_values(by=sample, ascending=False, inplace=True)
# Jsonify data
data = {
"otu_ids": sample_data.otu_id.values.tolist(),
"sample_values": sample_data[sample].values.tolist(),
"otu_labels": sample_data.otu_label.tolist(),
}
return jsonify(data)
# Run app
if __name__ == "__main__":
app.run()
```
|
{
"source": "JessicaUppal/NASA-web-scraping",
"score": 3
}
|
#### File: NASA-web-scraping/Missions_to_Mars/mars.py
```python
from flask import Flask, render_template, redirect, url_for
from flask_pymongo import PyMongo
import scrape_mars
# Setup flask
app = Flask(__name__)
# Establish mongo connection
app.config["MONGO_URI"] = "mongodb://localhost:27017/mars_app"
mongo = PyMongo(app)
@app.route("/")
def index():
#Find record of data from mongo db
mars = mongo.db.mars.find_one()
#Return template and data
return render_template("index.html", mars=mars)
@app.route("/scrape")
def scrape():
mars = mongo.db.mars
# Run scrape function
mars_data = scrape_mars.scrape()
# Update mongo database
mars.update({}, mars_data, upsert=True)
#Return to homepage
return redirect("/", code=302)
if __name__ == "__main__":
app.run()
```
#### File: NASA-web-scraping/Missions_to_Mars/scrape_mars.py
```python
from splinter import Browser
from bs4 import BeautifulSoup as soup
import pandas as pd
from webdriver_manager.chrome import ChromeDriverManager
executable_path = {'executable_path': ChromeDriverManager().install()}
def scrape():
# Path to driver
browser = Browser('chrome', **executable_path, headless=True)
news_title, news_paragraph = mars_news(browser)
# Scrape and store results in a dictionary
data = {
"news_title": news_title,
"news_paragraph": news_paragraph,
"facts": mars_facts(),
"featured_image": featured_image(browser),
"hemispheres": hemispheres(browser),
}
# Return data from scrape
return data
# Scrape mars news site
def mars_news(browser):
url = 'https://redplanetscience.com/'
browser.visit(url)
html = browser.html
news_soup = soup(html, 'html.parser')
# Find news title and paragraph and save in a variable
slide_elem = news_soup.select_one('div.list_text')
news_title = slide_elem.find("div", class_="content_title").get_text()
news_p = slide_elem.find("div", class_="article_teaser_body").get_text()
# Return data
return news_title, news_p
# Scrape mars space images
def featured_image(browser):
# Visit URL
url = 'https://spaceimages-mars.com'
browser.visit(url)
# Click the full image button
full_image_elem = browser.find_by_tag('button')[1]
full_image_elem.click()
# Parsing html with soup
html = browser.html
img_soup = soup(html, 'html.parser')
img_url_rel = img_soup.find('img', class_='fancybox-image').get('src')
img_url = f'https://spaceimages-mars.com/{img_url_rel}'
# Return the image
return img_url
def mars_facts():
# Scrape mars facts html into a dataframe
df = pd.read_html("https://galaxyfacts-mars.com")[0]
# Set columns of dataframe
df.columns = ["Description", "Mars", "Earth"]
# Return dataframe as html
return df.to_html(classes="table table-striped")
def hemispheres(browser):
url = 'https://marshemispheres.com/'
browser.visit(url + 'index.html')
# Click and return href
hemisphere_url = []
for i in range(4):
browser.find_by_css("a.product-item img")[i].click()
hemi_data = scrape_hemisphere(browser.html)
hemi_data['img_url'] = url + hemi_data['img_url']
# Add hemisphere to list
hemisphere_url.append(hemi_data)
# Return to browser
browser.back()
# Return list
return hemisphere_url
def scrape_hemisphere(html_text):
# parsing html with soup
hemisphere_text = soup(html_text, "html.parser")
title_text = hemisphere_text.find("h2", class_="title").get_text()
image_text= hemisphere_text.find("a", text="Sample").get("href")
hemispheres = {
"title": title_text,
"img_url": image_text
}
return hemispheres
if __name__ == "__main__":
# Print scraped data
print(scrape())
```
|
{
"source": "jessicawang225/caltech-ee148-spring2020-hw01",
"score": 3
}
|
#### File: jessicawang225/caltech-ee148-spring2020-hw01/run_visualizations.py
```python
import json
import numpy as np
from PIL import Image, ImageDraw
import os
def draw(I, boxes):
for box in boxes:
draw = ImageDraw.Draw(I)
# Draw bounding box in neon yellow
draw.rectangle(box, outline=(204, 255, 0))
del draw
return I
# set the path to the downloaded data:
data_path = './data'
# set a path for saving predictions:
preds_path = './predictions'
# set a path for saving visualizations:
vis_path = './visualizations'
os.makedirs(preds_path, exist_ok=True) # create directory if needed
os.makedirs(vis_path, exist_ok=True) # create directory if needed
# get sorted list of files:
file_names = sorted(os.listdir(data_path))
# remove any non-JPEG files:
file_names = [f for f in file_names if '.jpg' in f]
# get bounding boxes
with open(os.path.join(preds_path, 'preds.json')) as f:
bounding_boxes = json.load(f)
for i in range(len(file_names)):
# read image using PIL:
I = Image.open(os.path.join(data_path, file_names[i]))
I = draw(I, bounding_boxes[file_names[i]])
I.save(os.path.join(vis_path, file_names[i]))
```
|
{
"source": "JessicaWiedemeier/IDV",
"score": 3
}
|
#### File: resources/python/griddiag.py
```python
def GRAVITY():
""" Gravity constant """
return DerivedGridFactory.GRAVITY;
# Math functions
def atn2(S1,S2,WA=0):
""" Wrapper for atan2 built-in
<div class=jython>
ATN2 (S1, S2) = ATAN ( S1 / S2 )<br>
WA = use WEIGHTED_AVERAGE (default NEAREST_NEIGHBOR)
</div>
"""
return GridMath.atan2(S1,S2,WA)
def add(S1,S2,WA=0):
""" Addition
<div class=jython>
ADD (S1, S2) = S1 + S2<br>
WA = use WEIGHTED_AVERAGE (default NEAREST_NEIGHBOR)
</div>
"""
return GridMath.add(S1,S2,WA)
def mul(S1,S2,WA=0):
""" Multiply
<div class=jython>
MUL (S1, S2) = S1 * S2<br>
WA = use WEIGHTED_AVERAGE (default NEAREST_NEIGHBOR)
</div>
"""
return GridMath.multiply(S1,S2,WA)
def quo(S1,S2,WA=0):
""" Divide
<div class=jython>
QUO (S1, S2) = S1 / S2<br>
WA = use WEIGHTED_AVERAGE (default NEAREST_NEIGHBOR)
</div>
"""
return GridMath.divide(S1,S2,WA)
def sub(S1,S2,WA=0):
""" Subtract
<div class=jython>
SUB (S1, S2) = S1 - S2<br>
WA = use WEIGHTED_AVERAGE (default NEAREST_NEIGHBOR)
</div>
"""
return GridMath.subtract(S1,S2,WA)
# Scalar quantities
def adv(S,V):
""" Horizontal Advection, negative by convention
<div class=jython>
ADV ( S, V ) = - ( u * DDX (S) + v * DDY (S) )
</div>
"""
return -add(mul(ur(V),ddx(S)),mul(vr(V),ddy(S)))
def avg(S1,S2):
""" Average of 2 scalars
<div class=jython>
AVG (S1, S2) = ( S1 + S2 ) / 2
</div>
"""
return add(S1,S2)/2
def avor(V):
""" Absolute Vorticity
<div class=jython>
AVOR ( V ) = VOR ( V ) + CORL(V)
</div>
"""
relv = vor(V)
return add(relv,corl(relv))
def circs(S, D=2):
"""
<div class=jython>
Apply a circular aperature smoothing to the grid points. The weighting
function is the circular aperature diffraction function. D is
the radius of influence in grid increments, increasing D increases
the smoothing. (default D=2)
</div>
"""
return GridUtil.smooth(S, "CIRC", int(D))
def corl(S):
""" Coriolis Parameter for all points in a grid
<div class=jython>
CORL = TWO_OMEGA*sin(latr)
</div>
"""
return DerivedGridFactory.createCoriolisGrid(S)
def cress(S, D=2):
"""
<div class=jython>
Apply a Cressman smoothing to the grid points. The smoothed value
is given by a weighted average of surrounding grid points. D is
the radius of influence in grid increments,
increasing D increases the smoothing. (default D=2)
</div>
"""
return GridUtil.smooth(S, "CRES", int(D))
def cros(V1,V2):
""" Vector cross product magnitude
<div class=jython>
CROS ( V1, V2 ) = u1 * v2 - u2 * v1
</div>
"""
return sub(mul(ur(V1),vr(V2)),mul(ur(V2),vr(V1)))
def ddx(S):
""" Take the derivative with respect to the domain's X coordinate
"""
return GridMath.ddx(S);
def ddy(S):
""" Take the derivative with respect to the domain's Y coordinate
"""
return GridMath.ddy(S);
def defr(V):
""" Total deformation
<div class=jython>
DEF ( V ) = ( STRD (V) ** 2 + SHR (V) ** 2 ) ** .5
</div>
"""
return mag(strd(V),shr(V))
def div(V):
""" Horizontal Divergence
<div class=jython>
DIV ( V ) = DDX ( u ) + DDY ( v )
</div>
"""
return add(ddx(ur(V)),ddy(vr(V)))
def dirn(V):
""" North relative direction of a vector
<div class=jython>
DIRN ( V ) = DIRR ( un(v), vn(v) )
</div>
"""
return dirr(DerivedGridFactory.createTrueFlowVector(V))
def dirr(V):
""" Grid relative direction of a vector
"""
return DerivedGridFactory.createVectorDirection(V)
def dot(V1,V2):
""" Vector dot product
<div class=jython>
DOT ( V1, V2 ) = u1 * u2 + v1 * v2
</div>
"""
product = mul(V1,V2)
return add(ur(product),vr(product))
def gwfs(S, N=6):
"""
<div class=jython>
Horizontal smoothing using normally distributed weights
with theoretical response of 1/e for N * delta-x wave.
Increasing N increases the smoothing. (default N=6)
</div>
"""
return GridUtil.smooth(S, "GWFS", int(N))
def jcbn(S1,S2):
""" Jacobian Determinant
<div class=jython>
JCBN ( S1, S2 ) = DDX (S1) * DDY (S2) - DDY (S1) * DDX (S2)
</div>
"""
return sub(mul(ddx(S1),ddy(S2)),mul(ddy(S1),ddx(S2)))
def latr(S):
""" Latitudue all points in a grid
"""
return DerivedGridFactory.createLatitudeGrid(S)
def lap(S):
""" Laplacian operator
<div class=jython>
LAP ( S ) = DIV ( GRAD (S) )
</div>
"""
grads = grad(S)
return div(grads)
def lav(S,level1=None,level2=None, unit=None):
""" Layer Average of a multi layer grid
<div class=jython>
LAV ( S ) = ( S (level1) + S (level2) ) / 2.
</div>
"""
if level1 == None:
return GridMath.applyFunctionOverLevels(S, GridMath.FUNC_AVERAGE)
else:
return layerAverage(S,level1,level2, unit)
def ldf(S,level1,level2, unit=None):
""" Layer Difference
<div class=jython>
LDF ( S ) = S (level1) - S (level2)
</div>
"""
return layerDiff(S,level1,level2, unit);
def mag(*a):
""" Magnitude of a vector
"""
if (len(a) == 1):
return DerivedGridFactory.createVectorMagnitude(a[0]);
else:
return DerivedGridFactory.createVectorMagnitude(a[0],a[1]);
def mixr(temp,rh):
""" Mixing Ratio from Temperature, RH (requires pressure domain)
"""
return DerivedGridFactory.createMixingRatio(temp,rh)
def relh(temp,mixr):
""" Create Relative Humidity from Temperature, mixing ratio (requires pressure domain)
"""
return DerivedGridFactory.createRelativeHumidity(temp,mixr)
def pvor(S,V):
""" Potetial Vorticity (usually from theta and wind)
"""
return DerivedGridFactory.createPotentialVorticity(S,V)
def rects(S, D=2):
"""
<div class=jython>
Apply a rectangular aperature smoothing to the grid points. The weighting
function is the product of the rectangular aperature diffraction function
in the x and y directions. D is the radius of influence in grid
increments, increasing D increases the smoothing. (default D=2)
</div>
"""
return GridUtil.smooth(S, "RECT", int(D))
def savg(S):
""" Average over whole grid
<div class=jython>
SAVG ( S ) = average of all non-missing grid point values
</div>
"""
return GridMath.applyFunctionToLevels(S, GridMath.FUNC_AVERAGE)
def savs(S):
""" Average over grid subset
<div class=jython>
SAVS ( S ) = average of all non-missing grid point values in the subset
area
</div>
"""
return savg(S)
def sdiv(S,V):
""" Horizontal Flux Divergence
<div class=jython>
SDIV ( S, V ) = S * DIV ( V ) + DOT ( V, GRAD ( S ) )
</div>
"""
return add(mul(S,(div(V))) , dot(V,grad(S)))
def shr(V):
""" Shear Deformation
<div class=jython>
SHR ( V ) = DDX ( v ) + DDY ( u )
</div>
"""
return add(ddx(vr(V)),ddy(ur(V)))
def sm5s(S):
""" Smooth a scalar grid using a 5-point smoother
<div class=jython>
SM5S ( S ) = .5 * S (i,j) + .125 * ( S (i+1,j) + S (i,j+1) +
S (i-1,j) + S (i,j-1) )
</div>
"""
return GridUtil.smooth(S, "SM5S")
def sm9s(S):
""" Smooth a scalar grid using a 9-point smoother
<div class=jython>
SM9S ( S ) = .25 * S (i,j) + .125 * ( S (i+1,j) + S (i,j+1) +
S (i-1,j) + S (i,j-1) )
+ .0625 * ( S (i+1,j+1) +
S (i+1,j-1) +
S (i-1,j+1) +
S (i-1,j-1) )
</div>
"""
return GridUtil.smooth(S, "SM9S")
def strd(V):
""" Stretching Deformation
<div class=jython>
STRD ( V ) = DDX ( u ) - DDY ( v )
</div>
"""
return sub(ddx(ur(V)),ddy(vr(V)))
def thta(temp):
""" Potential Temperature from Temperature (requires pressure domain)
"""
return DerivedGridFactory.createPotentialTemperature(temp)
def thte(temp,rh):
""" Equivalent Potential Temperature from Temperature and Relative
humidity (requires pressure domain)
"""
return DerivedGridFactory.createEquivalentPotentialTemperature(temp,rh)
def un(V):
""" North relative u component
"""
return ur(DerivedGridFactory.createTrueFlowVector(V))
def ur(V):
""" Grid relative u component
"""
return DerivedGridFactory.getUComponent(V)
def vn(V):
""" North relative v component
"""
return vr(DerivedGridFactory.createTrueFlowVector(V))
def vor(V):
""" Relative Vorticity
<div class=jython>
VOR ( V ) = DDX ( v ) - DDY ( u )
</div>
"""
return sub(ddx(vr(V)),ddy(ur(V)))
def vr(V):
""" Grid relative v component
"""
return DerivedGridFactory.getVComponent(V)
def xav(S):
""" Average along a grid row
<div class=jython>
XAV (S) = ( S (X1) + S (X2) + ... + S (KXD) ) / KNT
KXD = number of points in row
KNT = number of non-missing points in row
XAV for a row is stored at every point in that row.
</div>
"""
return GridMath.applyFunctionToAxis(S, GridMath.FUNC_AVERAGE, GridMath.AXIS_X)
def xsum(S):
""" Sum along a grid row
<div class=jython>
XSUM (S) = ( S (X1) + S (X2) + ... + S (KXD) )
KXD = number of points in row
XSUM for a row is stored at every point in that row.
</div>
"""
return GridMath.applyFunctionToAxis(S, GridMath.FUNC_SUM, GridMath.AXIS_X)
def yav(S):
""" Average along a grid column
<div class=jython>
YAV (S) = ( S (Y1) + S (Y2) + ... + S (KYD) ) / KNT
KYD = number of points in column
KNT = number of non-missing points in column
</div>
"""
return GridMath.applyFunctionToAxis(S, GridMath.FUNC_AVERAGE, GridMath.AXIS_Y)
def ysum(S):
""" Sum along a grid column
<div class=jython>
YSUM (S) = ( S (Y1) + S (Y2) + ... + S (KYD) )
KYD = number of points in row
YSUM for a column is stored at every point in that column.
</div>
"""
return GridMath.applyFunctionToAxis(S, GridMath.FUNC_SUM, GridMath.AXIS_Y)
def zav(S):
""" Average across the levels of a grid at all points
<div class=jython>
ZAV (S) = ( S (Z1) + S (Z2) + ... + S (KZD) ) / KNT
KZD = number of levels
KNT = number of non-missing points in column
</div>
"""
return GridMath.applyFunctionToLevels(S, GridMath.FUNC_AVERAGE)
def zsum(S):
""" Sum across the levels of a grid at all points
<div class=jython>
ZSUM (S) = ( S (Z1) + S (Z2) + ... + S (KZD) )
KZD = number of levels
ZSUM for a vertical column is stored at every point
</div>
"""
return GridMath.applyFunctionOverLevels(S, GridMath.FUNC_SUM)
def wshr(V, Z, top, bottom):
""" Magnitude of the vertical wind shear in a layer
<div class=jython>
WSHR ( V ) = MAG [ VLDF (V) ] / LDF (Z)
</div>
"""
dv = mag(vldf(V,top,bottom))
dz = ldf(Z,top,bottom)
return quo(dv,dz)
# Vector output
def age(obs,geo):
""" Ageostrophic wind
<div class=jython>
AGE ( S ) = [ u (OBS) - u (GEO(S)), v (OBS) - v (GEO(S)) ]
</div>
"""
return sub(obs,geo)
def circv(S, D=2):
"""
<div class=jython>
Apply a circular aperature smoothing to the grid points. The weighting
function is the circular aperature diffraction function. D is
the radius of influence in grid increments, increasing D increases
the smoothing. (default D=2)
</div>
"""
return GridUtil.smooth(S, "CIRC", int(D))
def cresv(S, D=2):
"""
<div class=jython>
Apply a Cressman smoothing to the grid points. The smoothed value
is given by a weighted average of surrounding grid points. D is
the radius of influence in grid increments,
increasing D increases the smoothing. (default D=2)
</div>
"""
return GridUtil.smooth(S, "CRES", int(D))
def dvdx(V):
""" Partial x derivative of a vector
<div class=jython>
DVDX ( V ) = [ DDX (u), DDX (v) ]
</div>
"""
return vecr(ddx(ur(V)), ddx(vr(V)))
def dvdy(V):
""" Partial x derivative of a vector
<div class=jython>
DVDY ( V ) = [ DDY (u), DDY (v) ]
</div>
"""
return vecr(ddy(ur(V)), ddy(vr(V)))
def frnt(S,V):
""" Frontogenesis function from theta and the wind
<div class=jython>
FRNT ( THTA, V ) = 1/2 * MAG ( GRAD (THTA) ) *
( DEF * COS (2 * BETA) - DIV ) <p>
Where: BETA = ASIN ( (-DDX (THTA) * COS (PSI) <br>
- DDY (THTA) * SIN (PSI))/ <br>
MAG ( GRAD (THTA) ) ) <br>
PSI = 1/2 ATAN2 ( SHR / STR ) <br>
</div>
"""
shear = shr(V)
strch = strd(V)
psi = .5*atn2(shear,strch)
dxt = ddx(S)
dyt = ddy(S)
cosd = cos(psi)
sind = sin(psi)
gradt = grad(S)
mgradt = mag(gradt)
a = -cosd*dxt-sind*dyt
beta = asin(a/mgradt)
frnto = .5*mgradt*(defr(V)*cos(2*beta)-div(V))
return frnto
def geo(z):
""" geostrophic wind from height
<div class=jython>
GEO ( S ) = [ - DDY (S) * const / CORL, DDX (S) * const / CORL ]
</div>
"""
return DerivedGridFactory.createGeostrophicWindVector(z)
def grad(S):
""" Gradient of a scalar
<div class=jython>
GRAD ( S ) = [ DDX ( S ), DDY ( S ) ]
</div>
"""
return vecr(ddx(S),ddy(S))
def gwfv(V, N=6):
"""
<div class=jython>
Horizontal smoothing using normally distributed weights
with theoretical response of 1/e for N * delta-x wave.
Increasing N increases the smoothing. (default N=6)
</div>
"""
return gwfs(V, N)
def inad(V1,V2):
""" Inertial advective wind
<div class=jython>
INAD ( V1, V2 ) = [ DOT ( V1, GRAD (u2) ),
DOT ( V1, GRAD (v2) ) ]
</div>
"""
return vecr(dot(V1,grad(ur(V2))),dot(V1,grad(vr(V2))))
def qvec(S,V):
""" Q-vector at a level ( K / m / s )
<div class=jython>
QVEC ( S, V ) = [ - ( DOT ( DVDX (V), GRAD (S) ) ),
- ( DOT ( DVDY (V), GRAD (S) ) ) ]
where S can be any thermal paramenter, usually THTA.
</div>
"""
grads = grad(S)
qvecu = newName(-dot(dvdx(V),grads),"qvecu")
qvecv = newName(-dot(dvdy(V),grads),"qvecv")
return vecr(qvecu,qvecv)
def qvcl(THTA,V):
""" Q-vector ( K / m / s )
<div class=jython>
QVCL ( THTA, V ) = ( 1/( D (THTA) / DP ) ) *
[ ( DOT ( DVDX (V), GRAD (THTA) ) ),
( DOT ( DVDY (V), GRAD (THTA) ) ) ]
</div>
"""
dtdp = GridMath.partial(THTA,2)
gradt = grad(THTA)
qvecudp = newName(quo(dot(dvdx(V),gradt),dtdp),"qvecudp")
qvecvdp = newName(quo(dot(dvdy(V),gradt),dtdp),"qvecvdp")
return vecr(qvecudp,qvecvdp)
def rectv(S, D=2):
"""
<div class=jython>
Apply a rectangular aperature smoothing to the grid points. The weighting
function is the product of the rectangular aperature diffraction function
in the x and y directions. D is the radius of influence in grid
increments, increasing D increases the smoothing. (default D=2)
</div>
"""
return GridUtil.smooth(S, "RECT", int(D))
def sm5v(V):
""" Smooth a scalar grid using a 5-point smoother (see sm5s)
"""
return sm5s(V)
def sm9v(V):
""" Smooth a scalar grid using a 9-point smoother (see sm9s)
"""
return sm9s(V)
def thrm(S, level1, level2, unit=None):
""" Thermal wind
<div class=jython>
THRM ( S ) = [ u (GEO(S)) (level1) - u (GEO(S)) (level2),
v (GEO(S)) (level1) - v (GEO(S)) (level2) ]
</div>
"""
return vldf(geo(S),level1,level2, unit)
def vadd(V1,V2):
""" add the components of 2 vectors
<div class=jython>
VADD (V1, V2) = [ u1+u2, v1+v2 ]
</div>
"""
return add(V1,V2)
def vecn(S1,S2):
""" Make a true north vector from two components
<div class=jython>
VECN ( S1, S2 ) = [ S1, S2 ]
</div>
"""
return makeTrueVector(S1,S2)
def vecr(S1,S2):
""" Make a vector from two components
<div class=jython>
VECR ( S1, S2 ) = [ S1, S2 ]
</div>
"""
return makeVector(S1,S2)
def vlav(V,level1,level2, unit=None):
""" calculate the vector layer average
<div class=jython>
VLDF(V) = [(u(level1) - u(level2))/2,
(v(level1) - v(level2))/2]
</div>
"""
return layerAverage(V, level1, level2, unit)
def vldf(V,level1,level2, unit=None):
""" calculate the vector layer difference
<div class=jython>
VLDF(V) = [u(level1) - u(level2),
v(level1) - v(level2)]
</div>
"""
return layerDiff(V,level1,level2, unit)
def vmul(V1,V2):
""" Multiply the components of 2 vectors
<div class=jython>
VMUL (V1, V2) = [ u1*u2, v1*v2 ]
</div>
"""
return mul(V1,V2)
def vquo(V1,V2):
""" Divide the components of 2 vectors
<div class=jython>
VQUO (V1, V2) = [ u1/u2, v1/v2 ]
</div>
"""
return quo(V1,V2)
def vsub(V1,V2):
""" subtract the components of 2 vectors
<div class=jython>
VSUB (V1, V2) = [ u1-u2, v1-v2 ]
</div>
"""
return sub(V1,V2)
def LPIndex(u, v, z, t, top, bottom, unit):
""" calculate the wind shear between discrete layers
<div class=jython>
LP = 7.268DUDZ + 0.718DTDN + 0.318DUDN - 2.52
</div>
"""
Z = windShear(u, v, z, top, bottom, unit)*7.268
uwind = getSliceAtLevel(u, top)
vwind = getSliceAtLevel(v, top)
temp = newUnit(getSliceAtLevel(t, top), "temperature", "celsius")
HT = sqrt(ddx(temp)*ddx(temp) + ddy(temp)*ddy(temp))*0.718
HU = (ddx(vwind) + ddy(uwind))*0.318
L = add(noUnit(Z), add(noUnit(HU), noUnit(HT)))
L = (L - 2.520)*(-0.59)
P= 1.0/(1.0 + GridMath.applyFunctionOverGridsExt(L,"exp"))
LP = setLevel(P ,top, unit)
return LP
def EllrodIndex(u, v, z, top, bottom, unit):
""" calculate the wind shear between discrete layers
<div class=jython>
EI = VWS X ( DEF + DIV)
</div>
"""
VWS = windShear(u, v, z, top, bottom, unit)*100.0
#
uwind = getSliceAtLevel(u, top)
vwind = getSliceAtLevel(v, top)
DIV = (ddx(uwind) + ddy(vwind))* (-1.0)
#
DSH = ddx(vwind) + ddy(uwind)
DST = ddx(uwind) - ddy(vwind)
DEF = sqrt(DSH * DSH + DST * DST)
EI = mul(noUnit(VWS), add(noUnit(DEF), noUnit(DIV)))
return setLevel(EI, top, unit)
```
|
{
"source": "Jessica-Woods/Compare-Laptops-Australia",
"score": 3
}
|
#### File: laptopscraper/spiders/scorptec.py
```python
import scrapy
import csv
import re
from functools import partial
class ScorptecSpider(scrapy.Spider):
name = 'scorptec'
download_delay = 3
current_offset = 0
def start_requests(self):
endpoints = [
{ 'url3': 'notebooks', 'subid': '613' },
{ 'url3': 'gaming-notebooks', 'subid': '1032' },
{ 'url3': 'ultrabook', 'subid': '999' }
]
for endpoint in endpoints:
url3 = endpoint['url3']
subid = endpoint['subid']
yield self.infinite_scroll_request(
offset = self.current_offset,
callback = partial(self.parse, url3=url3, subid=subid),
url3 = url3,
subid = subid
)
def parse(self, response, url3, subid):
# Parse the details of all returned products
rows = response.css('.col-md-12')
if not rows:
self.logger.info("No rows found, exiting")
return
for product in rows:
intro = product.css('.item_intro::text').get()
if intro:
price = product.css('.item_price_discounted::text').get() or product.css('.item_price::text').get()
if price and len(re.findall('\d+', price)) > 0:
data = {
'price': price,
'intro': self.clean_intro(intro),
'url': product.css('.desc a::attr(href)').get()
}
clean_data = self.add_missing_data(self.clean_data(data))
self.logger.info(clean_data)
yield clean_data
else:
self.logger.info("Skipping row, no price found")
else:
self.logger.info("Skipping row, no intro found")
# Handle the infinite scroll
self.current_offset += 1
yield self.infinite_scroll_request(offset = self.current_offset, callback = partial(self.parse, url3 = url3, subid = subid), url3 = url3, subid = subid)
def infinite_scroll_request(self, offset, callback, url3, subid):
infinite_scroll_url = 'https://www.scorptec.com.au/ajax/product_list'
formdata = {
'action': 'get_list',
'order_by': 'popularity|desc',
'display': 'list',
'offset': str(offset),
'fetch_type': 'scroll',
'url1': 'product',
'url2': 'notebooks',
'url3': url3,
'catid': '21',
'subid': subid
}
return scrapy.FormRequest(url=infinite_scroll_url, callback=callback, formdata=formdata)
# Some intros are malformed (full stop instead of comma for specs).
# We fix them so the rest of the parsing doesn't break
def clean_intro(self, intro):
intro = intro.replace('Dell Latitude 7490 Ultrabook.', 'Dell Latitude 7490 Ultrabook,')
return intro
# Some laptops are missing some specs, we can hardcode them as we can look up
# their data manually
def add_missing_data(self, product):
# Notebooks
if product['name'] == 'Lenovo IdeaPad V130 Iron Grey Notebook':
product['graphics_card'] = {
'brand': 'Intel',
'discrete': False,
'raw_name': 'Intel HD Graphics 620',
'model': 'HD',
'model_power': 0,
'model_number': '620',
'name': 'Intel HD 620'
}
if product['name'] == 'Acer Spin 5 2-in-1 Notebook':
product['weight_kgs'] = '1.6'
if product['name'] == 'Dell Latitude 3590 Notebook':
product['weight_kgs'] = '2.02'
if product['name'] == 'HP ProBook 645 G4 Notebook':
product['storage'] = [{
"hdd_gbs": "256",
"is_ssd": True
}]
# Gaming Laptops
if product['name'] == 'Acer Nitro Gaming Notebook':
product['weight_kgs'] = '2.3'
if product['name'] == 'Acer Predator Helios G3 Gaming Notebook':
product['weight_kgs'] = '2.56'
if product['name'] == 'Aorus 15-W9 Gaming Notebook':
product['weight_kgs'] = '2.4'
if product['name'] == 'Aorus 15-X9 Gaming Notebook':
product['weight_kgs'] = '2.4'
if product['name'] == 'ASUS TUF FX505GE Gaming Notebook':
product['weight_kgs'] = '2.2'
if product['name'] == 'ASUS ROG Zephyrus GX531GM Gaming Notebook':
product['weight_kgs'] = '2.1'
if product['name'] == 'MSI GS65 Stealth Black Gaming Notebook':
product['weight_kgs'] = '1.9'
if product['name'] == 'MSI GS75 8SE Stealth Gaming Notebook':
product['weight_kgs'] = '2.25'
if product['name'] == 'MSI GS75 8SF Stealth Gaming Notebook':
product['weight_kgs'] = '2.25'
if product['name'] == 'MSI GS65 Stealth 9SE Gaming Notebook':
product['weight_kgs'] = '1.9'
if product['name'] == 'MSI GE75 Raider Black Gaming Notebook':
product['weight_kgs'] = '2.64'
if product['name'] == 'MSI GE75 Raider Gaming Notebook':
product['weight_kgs'] = '2.61'
if product['name'] == 'MSI GE75 Raider 9SE Gaming Notebook':
product['weight_kgs'] = '2.64'
if product['name'] == 'MSI GE63 Raider Black Gaming Notebook':
product['weight_kgs'] = '2.6'
if product['name'] == 'MSI GT75 8SG Titan Black Gaming Notebook':
product['weight_kgs'] = '4.56'
if product['name'] == 'MSI GT75 8SF Black Gaming Notebook':
product['weight_kgs'] = '4.56'
if product['name'] == 'MSI P65 9SE Gaming Notebook':
product['weight_kgs'] = '1.9'
if product['name'] == 'MSI P65 9SF Gaming Notebook':
product['weight_kgs'] = '1.9'
if product['name'] == 'MSI P65 Creator 9SE Gaming Notebook':
product['weight_kgs'] = '1.9'
if product['name'] == 'MSI P75-9SF Gaming Notebook':
product['weight_kgs'] = '2.25'
# Ultrabooks
if product['name'] == 'Toshiba Portege X20W Ultrabook':
product['graphics_card'] = {
'brand': 'Intel',
'discrete': False,
'raw_name': 'Intel UHD Graphics 620',
'model': 'UHD',
'model_power': 0,
'model_number': '620',
'name': 'Intel UHD 620'
}
if product['name'] == 'Acer Swift 5 Ultrabook':
product['weight_kgs'] = '0.97'
if product['name'] == 'Lenovo ThinkPad X1 Yoga Gen 3 Ultrabook':
product['weight_kgs'] = '1.4'
if product['name'] == 'Dell Latitude 7490 Ultrabook':
product['weight_kgs'] = '1.4'
if product['name'] == 'MSI GE75 Raider 9SF Gaming Notebook':
product['weight_kgs'] = '2.64'
if product['name'] == 'MSI GL63 8SD Gaming Notebook':
product['weight_kgs'] = '2.3'
if product['name'] == 'MSI GL63 8SC Gaming Notebook':
product['weight_kgs'] = '2.3'
if product['name'] == 'MSI GS65 Stealth 9SF Gaming Notebook':
product['weight_kgs'] = '1.9'
if product['name'] == 'MSI GS65 Stealth 9SG Gaming Notebook':
product['weight_kgs'] = '1.9'
if product['name'] == 'MSI GS75 9SG Gaming Notebook':
product['weight_kgs'] = '2.28'
if product['name'] == 'MSI GS75 Stealth 9SE Gaming Notebook':
product['weight_kgs'] = '2.28'
if product['name'] == 'MSI GT75 Titan 9SF Gaming Notebook':
product['weight_kgs'] = '4.56'
if product['name'] == 'MSI GT75 Titan 9SG Gaming Notebook':
product['weight_kgs'] = '4.56'
return product
# Clean various fields and interpret the intro into more structured data
def clean_data(self, product):
self.logger.info(product)
# Price has newlines and other garbage in it, we just want the digits and
# the decimal place
price = re.findall('[\d.]+', product['price'])[0]
# The intro is a comma separated bunch of fields. We're going
# to try and infer the specs from it
#
# A lot of the data seems to have space issues so we also strip the prefixed/postfixed
# whitespace from everything
intro_csv = [field.strip() for field in list(csv.reader([product['intro']]))[0]]
self.logger.info(intro_csv)
# The first field seems to _always_ be laptop name
name = intro_csv[0]
self.logger.info(name)
# The first word of the name also seems to be the brand
brand = name.split(' ', 1)[0]
# CPU seems to be consistently in the second column. Unfortunately no GHz
cpu = intro_csv[1]
# Ram seems to follow CPU. Typically it is either in the format "16GB" or "8GB (1x 8GB) RAM". For our purposes
# we can just take the total
#
# We're also assuming all ram is in GBs
ram_gbs = re.findall('[\d.]+', intro_csv[2])[0]
# The hard drive string typically contains "256GB" "SSD" "1TB" and "HDD" in addition to other text
# that is difficult to normalize. Additionally a laptop may have multiple hard drives.
#
# In our case we look for the pairing of a size and "SSD" or "HDD" and use that to generate
# a hard drive array
storage = []
# Parentehsis breaks everything, lets get rid of all parenthesised text
#
# This won't work for nested parenthesis but they don't seem to occur
hdd_raw_no_parenthesis = re.sub(r'\(.*\)', '', intro_csv[3])
hdd_raw_strings = hdd_raw_no_parenthesis.split() # ['512GB', '(2x', '256GB', 'SSD)', 'M.2', 'PCIE', 'SSD', '+', '1TB', 'HDD']
self.logger.info(hdd_raw_strings)
hdd_gbs = None
for hdd_raw_string in hdd_raw_strings:
# If we haven't found a size string (i.e. 512GB or 1TB) look for it.
if hdd_gbs == None:
if hdd_raw_string.endswith("GB") or hdd_raw_string.endswith("TB"):
hdd_gbs = re.findall('[\d.]+', hdd_raw_string)[0]
if hdd_raw_string.endswith("TB"):
hdd_gbs = str(int(hdd_gbs) * 1024)
else:
# We have a previous size string, now we want 'SSD' or 'HDD'
if hdd_raw_string == 'SSD' or hdd_raw_string == 'HDD' or hdd_raw_string == 'SSHD':
is_ssd = hdd_raw_string == 'SSD'
storage.append({
'hdd_gbs': hdd_gbs,
'is_ssd': is_ssd
})
hdd_gbs = None
# Screen size seems to have a simple format: <x>inch (HD|FHD) (IPS). For our used
# case we'll just take size, though extending to quality markers in the future
# would be nice
#
# It's _usually_ in index 4 but once we found one in index 5, so we search for a
# number ending in inch.
screen_size_inches = None
for field in intro_csv:
if 'inch' in field.lower():
screen_size_inches = re.findall('[\d.]+', field)[0]
break
# All laptops seem to have a "graphics" section. We assume that "Intel" means
# onboard and anything else means discrete
#
# We also cut this up into discrete parts to try and make it easier to parse
#
# This isn't always in the same position so we search for a list of known brands.
graphics_card = {}
for field in intro_csv:
lfield = field.lower()
is_graphics_field = False
if lfield.startswith('geforce'):
is_graphics_field = True
graphics_card['brand'] = 'GeForce'
graphics_card['discrete'] = True
elif lfield.startswith('radeon'):
is_graphics_field = True
graphics_card['brand'] = 'Radeon'
graphics_card['discrete'] = True
elif lfield.startswith('intel'):
is_graphics_field = True
graphics_card['brand'] = 'Intel'
graphics_card['discrete'] = False
if is_graphics_field:
graphics_card['raw_name'] = field
# Set the graphics card model and power according to my
# completely unbiased opinions
if 'geforce' in lfield and 'rtx' in lfield:
graphics_card['model'] = 'RTX'
graphics_card['model_power'] = 4
elif 'geforce' in lfield and 'gtx' in lfield:
graphics_card['model'] = 'GTX'
graphics_card['model_power'] = 3
elif 'geforce' in lfield and 'mx' in lfield:
graphics_card['model'] = 'MX'
graphics_card['model_power'] = 2
elif 'radeon' in lfield and 'rx' in lfield:
graphics_card['model'] = 'RX'
graphics_card['model_power'] = 2
elif 'radeon' in lfield and 'vega' in lfield:
graphics_card['model'] = 'Vega'
graphics_card['model_power'] = 0
elif 'intel' in lfield and 'uhd' in lfield:
graphics_card['model'] = 'UHD'
graphics_card['model_power'] = 1
elif 'intel' in lfield and 'hd' in lfield:
graphics_card['model'] = 'HD'
graphics_card['model_power'] = 0
elif 'intel' in lfield:
graphics_card['model'] = 'HD'
graphics_card['model_power'] = 0
else:
self.logger.info('UNKNOWN GRAPHICS CARD MODEL: ' + lfield)
# Grab all the numbers out of the string. We're going to assume things
graphics_numbers = re.findall('[\d]+', lfield)
# Assume the first number in the string is the model number
if len(graphics_numbers) > 0:
graphics_card['model_number'] = re.findall('[\d]+', lfield)[0]
elif 'intel' in lfield:
# If we don't know the model number 99% of the time it's a 620
graphics_card['model_number'] = '620'
# Assume the second number is the amount of memory
if len(graphics_numbers) > 1:
graphics_card['memory_gbs'] = int(graphics_numbers[1])
# If our memory exceeds 32GB then we are definitely not a graphics
# card and this is a false positive
if graphics_card['memory_gbs'] > 32:
graphics_card = {}
continue
# Also, if the string contains the word "SSD". It's not a
# graphics card
if 'ssd' in lfield:
graphics_card = {}
continue
# Compute a "nice" name from the data we have. Intel has
# slightly different naming conventions so we also account
# for them here
if 'model' in graphics_card:
graphics_card['name'] = graphics_card['brand']
graphics_card['name'] += ' ' + graphics_card['model']
if graphics_card['brand'] == 'Intel':
graphics_card['name'] += ' '
if 'model_number' in graphics_card:
graphics_card['name'] += graphics_card['model_number']
if 'memory_gbs' in graphics_card and graphics_card['memory_gbs'] > 0:
graphics_card['name'] += ' (' + str(graphics_card['memory_gbs']) + 'GB)'
else:
graphics_card['name'] = graphics_card['raw_name']
break
# Weight doesn't always appear at the same index so we need
# to stang for a string ending with "kg"
weight_kgs = None
for field in intro_csv:
if field.endswith('kg'):
weight_kgs = re.findall('[\d.]+', field)[0]
break
return {
'name': name,
'brand': brand,
'cpu': cpu,
'ram_gbs': ram_gbs,
'storage': storage,
'screen_size_inches': screen_size_inches,
'graphics_card': graphics_card,
'weight_kgs': weight_kgs,
'price_aud': price,
'intro': product['intro'],
'url': product['url']
}
```
|
{
"source": "Jessica-Xia/web-scraping-challenge",
"score": 3
}
|
#### File: Jessica-Xia/web-scraping-challenge/scrape_mars.py
```python
from splinter import Browser
from bs4 import BeautifulSoup
import pandas as pd
import time
def init_browser():
executable_path = {"executable_path": "chromedriver.exe"}
return Browser("chrome", **executable_path, headless=False)
def scrape_news():
browser = init_browser()
url="https://mars.nasa.gov/news/"
browser.visit(url)
time.sleep(1)
html=browser.html
soup=BeautifulSoup(html, "html.parser")
content = soup.find("ul", class_="item_list")
articles = content.find_all("li")
title_list=[]
text_list=[]
for article in articles:
news_title=article.find("div", class_="content_title").text
title_list.append(news_title)
news_p=article.find("div", class_="article_teaser_body").text
text_list.append(news_p)
latest_title=title_list[0]
latest_news=text_list[0]
browser.quit()
return latest_title, latest_news
def scrape_image():
browser = init_browser()
url="https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(url)
time.sleep(1)
html=browser.html
soup=BeautifulSoup(html, "html.parser")
pictures=soup.find('ul', class_="articles").find_all("li")
complete_url=[]
for picture in pictures:
url=picture.find('div', class_='img').find('img')['src']
complete_url.append("https://www.jpl.nasa.gov"+url)
featured_image_url=complete_url[0]
browser.quit()
return featured_image_url
def scrape_weather():
browser = init_browser()
url="https://twitter.com/marswxreport?lang=en"
browser.visit(url)
time.sleep(1)
html=browser.html
soup=BeautifulSoup(html, "html.parser")
tweets = soup.find('div', class_="stream").find_all("li", class_="js-stream-item stream-item stream-item")
record=[]
for tweet in tweets:
content = tweet.find('div', class_="js-tweet-text-container").find('p').text
if content[0:7]=='InSight':
record.append(content)
Latest_weather=record[0]
browser.quit()
return Latest_weather
def scrape_fact():
#####
url="https://space-facts.com/mars/"
table = pd.read_html(url)[0]
fact_html=table.to_html(header=False, index=False)
return fact_html
def scrape_hemisphere():
browser = init_browser()
url="https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(url)
time.sleep(1)
html=browser.html
soup=BeautifulSoup(html, "html.parser")
items=soup.find_all('div', class_="item")
hemisphere_image_urls=[]
for item in items:
link=item.find("a", class_="itemLink product-item").find('img')['src']
img_url= "https://astrogeology.usgs.gov/" + link
img_title=item.find("div", class_="description").find("a", class_="itemLink product-item").find("h3").text
hemisphere_image_urls.append({
"title" : img_title,
"img_url": img_url
})
browser.quit()
return hemisphere_image_urls
def scrape_info():
data={}
data["news_title"], data["news_p"]=scrape_news()
data["featured_image_url"]=scrape_image()
data["current_weather"]=scrape_weather()
data["mars_facts"]= scrape_fact()
data["mars_hemisphere" ]=scrape_hemisphere()
return data
```
|
{
"source": "jessicaxuwang/MRNet",
"score": 3
}
|
#### File: MRNet/src/dataloader.py
```python
import os
import sys
import numpy as np
import pandas as pd
import torch
import torch.utils.data as data
import torchvision
import albumentations as A
from constants import *
def aug_img(im, transform):
"""Augment the image stack."""
im = np.transpose(im, [1, 2, 0])
im = transform(image=im)['image']
im = np.transpose(im, [2, 0, 1])
return im
def normalize(vol, rgb=True, transform=None):
pad = int((vol.shape[2] - INPUT_DIM)/2)
if pad != 0:
vol = vol[:,pad:-pad,pad:-pad]
if transform:
vol = aug_img(vol, transform)
vol = (vol - np.min(vol)) / (np.max(vol) - np.min(vol)) * MAX_PIXEL_VAL
# normalize
vol = (vol - MEAN) / STDDEV
# convert to RGB
if rgb:
vol = np.stack((vol,)*3, axis=1)
else:
vol = np.expand_dims(vol, 1)
return vol
class Dataset(data.Dataset):
def __init__(self, data_dir, meta, rgb=True, transform=None, cat='all'):
super().__init__()
self.meta = meta
self.data_dir = data_dir
if cat == 'all':
self.category = ['abnormal', 'acl', 'meniscus']
else:
self.category = [cat]
self.img_type = ['axial', 'coronal', 'sagittal']
self.rgb = rgb
self.transform = transform
def __getitem__(self, index):
row = self.meta.iloc[index]
data_item = {}
for im_type in self.img_type:
path = os.path.join(self.data_dir, row['sub_dir'],
im_type, row['Case'] + '.npy')
with open(path, 'rb') as f:
vol = np.load(f).astype(np.float32)
data_item[im_type] = normalize(vol, self.rgb, self.transform)
label = row[self.category].values.astype(np.float32)
return {'data': data_item, 'label': label}
def __len__(self):
return self.meta.shape[0]
def get_data_loader(data_dir, meta, shuffle=True, rgb=True, in_memory=False,
transform=None, cat='all'):
dataset = Dataset(data_dir, meta, rgb=rgb, transform=transform, cat=cat)
loader = data.DataLoader(dataset, batch_size=1, num_workers=4,
worker_init_fn=lambda x: np.random.seed(
int(torch.initial_seed() + x) % (2 ** 32 - 1)),
shuffle=shuffle)
return loader
```
|
{
"source": "jessicaychen/OCT-Image-Segmentation-ML",
"score": 3
}
|
#### File: jessicaychen/OCT-Image-Segmentation-ML/Pre-process.py
```python
from IPython import get_ipython
get_ipython().magic('reset -sf')
import os
from PIL import Image
#import numpy as np
#import cv2
#import matplotlib.pyplot as plt
#import matplotlib.image as mpimg
#save_dir = r'/Users/JessicaChen/Desktop/ML Data/Processed-input/'
save_dir = r'/Users/JessicaChen/Desktop/ML Data/Processed-mask/'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
#path = "/Users/JessicaChen/Desktop/ML Data/Original cSLOs/"
path = "/Users/JessicaChen/Desktop/ML Data/Masked cSLOs/"
dirs = os.listdir(path)
# resize images to 256x256
def process():
for item in dirs:
if not item.startswith('.') and os.path.isfile(os.path.join(path, item)):
im = Image.open(path + item).convert("L") #grayscale
f = os.path.splitext(item)[0]
new_dir = str(save_dir) + str(f)
imResize = im.resize((256,256), Image.ANTIALIAS) #resize
imResize.save(new_dir + '.jpg', 'JPEG', quality=90)
process()
# Check
#import matplotlib.pyplot as plt
#img = plt.imread('/Users/JessicaChen/Desktop/ML Data/Processed/cSLO00.jpg')
#plt.imshow(img,cmap = "gray")
#print(img.shape) # (256,256) - 1 channel/grayscaled
```
|
{
"source": "jessicayuen/envoy",
"score": 2
}
|
#### File: examples/front-proxy/service.py
```python
from flask import Flask
from flask import request
import os
import requests
import socket
import sys
app = Flask(__name__)
TRACE_HEADERS_TO_PROPAGATE = [
'X-Ot-Span-Context',
'X-Request-Id',
# Zipkin headers
'X-B3-TraceId',
'X-B3-SpanId',
'X-B3-ParentSpanId',
'X-B3-Sampled',
'X-B3-Flags',
# Jaeger header (for native client)
"uber-trace-id",
# SkyWalking headers.
"sw8"
]
@app.route('/service/<service_number>')
def hello(service_number):
return ('Hello from behind Envoy (service {})! hostname: {} resolved'
'hostname: {}\n'.format(os.environ['SERVICE_NAME'], socket.gethostname(),
socket.gethostbyname(socket.gethostname())))
@app.route('/trace/<service_number>')
def trace(service_number):
headers = {}
# call service 2 from service 1
if int(os.environ['SERVICE_NAME']) == 1:
for header in TRACE_HEADERS_TO_PROPAGATE:
if header in request.headers:
headers[header] = request.headers[header]
requests.get("http://localhost:9000/trace/2", headers=headers)
return ('Hello from behind Envoy (service {})! hostname: {} resolved'
'hostname: {}\n'.format(os.environ['SERVICE_NAME'], socket.gethostname(),
socket.gethostbyname(socket.gethostname())))
if __name__ == "__main__":
app.run(host='127.0.0.1', port=8080, debug=True)
```
#### File: tools/code_format/paths.py
```python
import os
import os.path
import shutil
def get_buildifier():
return os.getenv("BUILDIFIER_BIN") or (os.path.expandvars("$GOPATH/bin/buildifier")
if os.getenv("GOPATH") else shutil.which("buildifier"))
def get_buildozer():
return os.getenv("BUILDOZER_BIN") or (os.path.expandvars("$GOPATH/bin/buildozer")
if os.getenv("GOPATH") else shutil.which("buildozer"))
```
#### File: tools/dependency/release_dates.py
```python
import os
import sys
import github
import exports
import utils
# Thrown on errors related to release date.
class ReleaseDateError(Exception):
pass
# Format a datetime object as UTC YYYY-MM-DD.
def format_utc_date(date):
# We only handle naive datetime objects right now, which is what PyGithub
# appears to be handing us.
assert (date.tzinfo is None)
return date.date().isoformat()
# Obtain latest release version and compare against metadata version, warn on
# mismatch.
def verify_and_print_latest_release(dep, repo, metadata_version, release_date):
try:
latest_release = repo.get_latest_release()
if latest_release.created_at > release_date and latest_release.tag_name != metadata_version:
print(f'*WARNING* {dep} has a newer release than {metadata_version}@<{release_date}>: '
f'{latest_release.tag_name}@<{latest_release.created_at}>')
except github.UnknownObjectException:
pass
# Print GitHub release date, throw ReleaseDateError on mismatch with metadata release date.
def verify_and_print_release_date(dep, github_release_date, metadata_release_date):
mismatch = ''
iso_release_date = format_utc_date(github_release_date)
print(f'{dep} has a GitHub release date {iso_release_date}')
if iso_release_date != metadata_release_date:
raise ReleaseDateError(f'Mismatch with metadata release date of {metadata_release_date}')
# Extract release date from GitHub API.
def get_release_date(repo, metadata_version, github_release):
if github_release.tagged:
tags = repo.get_tags()
for tag in tags:
if tag.name == github_release.version:
return tag.commit.commit.committer.date
return None
else:
assert (metadata_version == github_release.version)
commit = repo.get_commit(github_release.version)
return commit.commit.committer.date
# Verify release dates in metadata against GitHub API.
def verify_and_print_release_dates(repository_locations, github_instance):
for dep, metadata in sorted(repository_locations.items()):
release_date = None
# Obtain release information from GitHub API.
github_release = utils.get_github_release_from_urls(metadata['urls'])
if not github_release:
print(f'{dep} is not a GitHub repository')
continue
repo = github_instance.get_repo(f'{github_release.organization}/{github_release.project}')
release_date = get_release_date(repo, metadata['version'], github_release)
if release_date:
# Check whether there is a more recent version and warn if necessary.
verify_and_print_latest_release(dep, repo, github_release.version, release_date)
# Verify that the release date in metadata and GitHub correspond,
# otherwise throw ReleaseDateError.
verify_and_print_release_date(dep, release_date, metadata['release_date'])
else:
raise ReleaseDateError(
f'{dep} is a GitHub repository with no no inferrable release date')
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Usage: %s <path to repository_locations.bzl>' % sys.argv[0])
sys.exit(1)
access_token = os.getenv('GITHUB_TOKEN')
if not access_token:
print('Missing GITHUB_TOKEN')
sys.exit(1)
path = sys.argv[1]
spec_loader = exports.repository_locations_utils.load_repository_locations_spec
path_module = exports.load_module('repository_locations', path)
try:
verify_and_print_release_dates(spec_loader(path_module.REPOSITORY_LOCATIONS_SPEC),
github.Github(access_token))
except ReleaseDateError as e:
print(f'An error occurred while processing {path}, please verify the correctness of the '
f'metadata: {e}')
sys.exit(1)
```
|
{
"source": "jessicayung/hello-motions",
"score": 3
}
|
#### File: hello-motions/hello-motions-flask/add_csv_to_db.py
```python
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import datetime
import numpy as np
import pandas as pd
motions_df = pd.read_csv("motions_with_category_labels_clean.csv")
motions_df.fillna('', inplace=True)
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///motions.db'
# Initialise the database
db = SQLAlchemy(app)
# in python: run from app import db, then db.create_all()
class Motion(db.Model):
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.Date, nullable=False)
circuit = db.Column(db.String(20))
country = db.Column(db.String(50))
international = db.Column(db.Integer, nullable=False, default=0)
tournament = db.Column(db.String(50), nullable=False)
ca_1 = db.Column(db.String(50))
ca_2 = db.Column(db.String(50))
ca_3 = db.Column(db.String(50))
ca_4 = db.Column(db.String(50))
ca_5 = db.Column(db.String(50))
ca_6 = db.Column(db.String(50))
ca_7 = db.Column(db.String(50))
ca_8 = db.Column(db.String(50))
ca_9 = db.Column(db.String(50))
event_link = db.Column(db.String(100))
round_code = db.Column(db.String(10))
round = db.Column(db.String(10))
motion = db.Column(db.String(5000), nullable=False)
infoslide = db.Column(db.String(5000), default="")
topic_area_1 = db.Column(db.String(50))
topic_area_2 = db.Column(db.String(50))
topic_area_3 = db.Column(db.String(50))
topic_area_specific_1 = db.Column(db.String(50))
topic_area_automated = db.Column(db.String(50))
needs_updating = db.Column(db.Boolean, default=False)
def __repr__(self):
return f'<Motion {self.id}: {self.motion}>'
# uncomment if db doesn't exist already
db.create_all()
# "Date,Circuit,City,Country,International,Tournament,CA_1,CA_2,CA_3,CA_4,CA_5,CA_6,CA_7,CA_8,CA_9,Event_Link,Round_Code,Round,Motion,Infoslide,Topic_Area_1,Topic_Area_2,Topic_Area_3,Topic_Area_Specific_1,Needs_updating,Topic_Area_Automated"
num_motions_added = 0
for i, row in motions_df.iterrows():
if row['Motion'] is np.nan or row['Motion'] == "":
continue
try:
new_motion = Motion(id=i,
date=datetime.datetime.strptime(row['Date'], "%Y-%m-%d"),
circuit=row['Circuit'],
country=row['Country'],
international=row['International'],
tournament=row['Tournament'],
ca_1=row['CA_1'],
ca_2=row['CA_2'],
ca_3=row['CA_3'],
ca_4=row['CA_4'],
ca_5=row['CA_5'],
ca_6=row['CA_6'],
ca_7=row['CA_7'],
ca_8=row['CA_8'],
ca_9=row['CA_9'],
event_link=row['Event_Link'],
round_code=row['Round_Code'],
round=row['Round'],
motion=row['Motion'],
infoslide=row['Infoslide'],
topic_area_1=row['Topic_Area_1'],
topic_area_2=row['Topic_Area_2'],
topic_area_3=row['Topic_Area_3'],
topic_area_specific_1=row['Topic_Area_Specific_1'],
topic_area_automated=row['Topic_Area_Automated'],
needs_updating=bool(row['Needs Updating?']))
db.session.add(new_motion)
num_motions_added += 1
except:
print("Error adding motion", row)
db.session.commit()
print("committed motions")
```
|
{
"source": "jessicayywu/model-optimization",
"score": 2
}
|
#### File: compression/algorithms/weight_clustering_test.py
```python
import os
import tempfile
import tensorflow as tf
from tensorflow_model_optimization.python.core.clustering.keras import cluster_config
from tensorflow_model_optimization.python.core.common.keras.compression.algorithms import weight_clustering
def _build_model():
i = tf.keras.layers.Input(shape=(28, 28), name='input')
x = tf.keras.layers.Reshape((28, 28, 1))(i)
x = tf.keras.layers.Conv2D(
20, 5, activation='relu', padding='valid', name='conv1')(
x)
x = tf.keras.layers.MaxPool2D(2, 2)(x)
x = tf.keras.layers.Conv2D(
50, 5, activation='relu', padding='valid', name='conv2')(
x)
x = tf.keras.layers.MaxPool2D(2, 2)(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(500, activation='relu', name='fc1')(x)
output = tf.keras.layers.Dense(10, name='fc2')(x)
model = tf.keras.Model(inputs=[i], outputs=[output])
return model
def _get_dataset():
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# Use subset of 60000 examples to keep unit test speed fast.
x_train = x_train[:1000]
y_train = y_train[:1000]
return (x_train, y_train), (x_test, y_test)
def _train_model(model):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer='adam', loss=loss_fn, metrics=['accuracy'])
(x_train, y_train), _ = _get_dataset()
model.fit(x_train, y_train, epochs=1)
def _save_as_saved_model(model):
saved_model_dir = tempfile.mkdtemp()
model.save(saved_model_dir)
return saved_model_dir
def _get_directory_size_in_bytes(directory):
total = 0
try:
for entry in os.scandir(directory):
if entry.is_file():
# if it's a file, use stat() function
total += entry.stat().st_size
elif entry.is_dir():
# if it's a directory, recursively call this function
total += _get_directory_size_in_bytes(entry.path)
except NotADirectoryError:
# if `directory` isn't a directory, get the file size then
return os.path.getsize(directory)
except PermissionError:
# if for whatever reason we can't open the folder, return 0
return 0
return total
class FunctionalTest(tf.test.TestCase):
def testWeightClustering_TrainingE2E(self):
number_of_clusters = 8
model = _build_model()
_train_model(model)
original_saved_model_dir = _save_as_saved_model(model)
params = weight_clustering.WeightClusteringParams(
number_of_clusters=number_of_clusters,
cluster_centroids_init=\
cluster_config.CentroidInitialization.DENSITY_BASED)
compressed_model = weight_clustering.optimize(model, params)
_train_model(compressed_model)
saved_model_dir = _save_as_saved_model(compressed_model)
_, (x_test, y_test) = _get_dataset()
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
compressed_model.compile(
optimizer='adam', loss=loss_fn, metrics=['accuracy'])
results = compressed_model.evaluate(x_test, y_test)
# Accuracy test.
self.assertGreater(results[1], 0.85) # 0.8708
original_size = _get_directory_size_in_bytes(original_saved_model_dir)
compressed_size = _get_directory_size_in_bytes(saved_model_dir)
# Compressed model size test.
# TODO(tfmot): gzip compression can reduce file size much better.
self.assertLess(compressed_size, original_size / 1.3)
def testWeightClustering_SingleLayer(self):
number_of_clusters = 8
i = tf.keras.layers.Input(shape=(2), name='input')
output = tf.keras.layers.Dense(3, name='fc1')(i)
model = tf.keras.Model(inputs=[i], outputs=[output])
dense_layer_weights = model.layers[1].get_weights()
params = weight_clustering.WeightClusteringParams(
number_of_clusters=number_of_clusters,
cluster_centroids_init=\
cluster_config.CentroidInitialization.DENSITY_BASED)
compressed_model = weight_clustering.optimize(model, params)
dense_layer_compressed_weights = compressed_model.layers[1].get_weights()
# clustering_centroids.
self.assertEqual(
dense_layer_compressed_weights[0].shape, (number_of_clusters,))
# pulling_indices.
self.assertEqual(
dense_layer_compressed_weights[1].shape,
dense_layer_weights[0].shape)
self.assertEqual(str(dense_layer_compressed_weights[1].dtype), 'int64')
self.assertAllInRange(
dense_layer_compressed_weights[1], 0, number_of_clusters - 1)
# bias
assert (dense_layer_weights[1] == dense_layer_compressed_weights[2]).all()
if __name__ == '__main__':
tf.test.main()
```
#### File: compression/internal/optimize.py
```python
from typing import List, Any, Mapping
import dataclasses
import tensorflow as tf
# Workaround to prevent MLIR from constant folding the
# compressed weights into the original weights. For instance,
# if we decompose `self.kernel` into `u` and `v`, we need to
# make sure that decompression occurs during inference, instead
# of during MLIR optimization which could multiply `u` and `v`
# given that they are constants.
#
# TODO(tfmot): make this more stable. This currently relies
# on the TensorFlow Lite MLIR converter to not constant
# fold through `tf.cond`, even though it already does
# for `tf.while`.
def _prevent_constant_folding(tensor, dummy_inputs):
tensor = tf.identity(tensor)
outputs = tf.cond(
tf.reduce_sum(dummy_inputs) > 0, lambda: tensor, lambda: tensor)
return outputs
class _TrainingWrapper(tf.keras.layers.Wrapper):
"""Represent modifications to training graph for weight compression."""
def __init__(self, layer, algorithm, compressible_weights: List[str]):
self.algorithm = algorithm
self.compressible_weights = compressible_weights
self.original_add_weight = layer.add_weight
setattr(layer, 'add_weight', self._skip_compressible_weights)
super(_TrainingWrapper, self).__init__(layer)
def _skip_compressible_weights(self, *args, **kwargs):
# Match for compressible weights based on `name` parameter.
#
# This depends on common practice where every layer's call
# to `self.add_weight` follows this form:
#
# self.`name` = self.add_weight(name=`name`)
#
# where the attribute name matches the variable name.
#
# TODO(tfmot): check if depending on this practice
# is safe for both builtin and custom Keras layers.
# Regardless, raise an exception if name is None, which
# means that the practice has not been followed.
name = None
if args:
name = args[0]
if 'name' in kwargs:
name = kwargs['name']
if name not in self.compressible_weights:
return self.original_add_weight(*args, **kwargs)
# If weight is compressible, substitute in a dummy tensor
# with the same shape as what would have been added.
# Returning an empty tensor would cause ** to fail.
shape = None
if args and len(args) > 1:
shape = args[1]
if 'shape' in kwargs:
shape = kwargs['shape']
return tf.zeros(shape)
def build(self, input_shape):
# Building nested layer via `super` must happen first
# so that the nested layer's variables
# are available to `init_training_weights_repr`.
super(_TrainingWrapper, self).build(input_shape)
# Add weights needed by algorithm during training.
self.training_weights = {}
for attr_name in self.compressible_weights:
compressible_weight = getattr(self.layer, attr_name)
# Note that as standard in `build` methods, the handling of pretrained
# weights actually occurs outside the wrapper. This only initializes
# weights with dummy values. Additionally, we don't have access to the
# actual values of the nested layer's weights since they are no longer
# variables, due to `_skip_compressible_weights` from `__init__`.
assert isinstance(compressible_weight, tf.Tensor)
weight_reprs = self.algorithm.init_training_weights_repr(
compressible_weight)
weights = []
for weight_repr in weight_reprs:
weight = self.add_weight(**dataclasses.asdict(weight_repr))
weights.append(weight)
self.training_weights[attr_name] = weights
def call(self, inputs):
for attr_name in self.compressible_weights:
# TODO(tfmot): move constant folding prevention to the inference graph
# only, since constant folding won't happen during training.
training_weight_tensors = []
for v in self.training_weights[attr_name]:
training_weight_tensors.append(
_prevent_constant_folding(v.read_value(), inputs))
weight_tensor = self.algorithm.training(*training_weight_tensors)
setattr(self.layer, attr_name, weight_tensor)
# This assumes that all changes to the forward pass happen "prior" to
# the nested layer's portion of the forward pass. This suffices since
# the scope of this API is to only optimize the weights.
return self.layer.call(inputs)
# TODO(tfmot): deduplicate code with _TrainingWrapper.
class _InferenceWrapper(tf.keras.layers.Wrapper):
"""Represent modifications to inference graph for weight compression."""
def __init__(self, layer, algorithm,
training_tensors: Mapping[str, List[tf.Tensor]]):
self.algorithm = algorithm
# training_tensors is a map from compressible attributes (e.g. 'kernel')
# to tensors (not variables to prevent model size increasing) with the
# same shape as the corresponding variables used during training.
self.training_tensors = training_tensors
self.original_add_weight = layer.add_weight
setattr(layer, 'add_weight', self._skip_compressible_weights)
super(_InferenceWrapper, self).__init__(layer)
def _skip_compressible_weights(self, *args, **kwargs):
# Match for compressible weights based on `name` parameter.
#
# This depends on common practice where every layer's call
# to `self.add_weight` follows this form:
#
# self.`name` = self.add_weight(name=`name`)
#
# where the attribute name matches the variable name.
#
# TODO(tfmot): check if depending on this practice
# is safe for both builtin and custom Keras layers.
# Regardless, raise an exception if name is None, which
# means that the practice has not been followed.
name = None
if args:
name = args[0]
if 'name' in kwargs:
name = kwargs['name']
if name not in self.training_tensors:
return self.original_add_weight(*args, **kwargs)
# If weight is compressible, substitute in a dummy tensor
# with the same shape as what would have been added.
# Returning an empty tensor would cause ** to fail.
shape = None
if args and len(args) > 1:
shape = args[1]
if 'shape' in kwargs:
shape = kwargs['shape']
return tf.zeros(shape)
def build(self, input_shape):
super(_InferenceWrapper, self).build(input_shape)
# Add weights needed by algorithm during inference.
self.compressed_weights = {}
for attr_name in self.training_tensors:
training_tensors = self.training_tensors[attr_name]
compressed_tensors = self.algorithm.compress(*training_tensors)
weights = []
for t in compressed_tensors:
weight = self.add_weight(name='TODO', shape=t.shape)
weights.append(weight)
self.compressed_weights[attr_name] = weights
def call(self, inputs, training=None):
for attr_name in self.training_tensors:
# TODO(tfmot): understand how read_value() is converted to
# inference in TensorFlow Lite.
compressed_weight_tensors = []
for v in self.compressed_weights[attr_name]:
compressed_weight_tensors.append(
_prevent_constant_folding(v.read_value(), inputs))
weight_tensor = self.algorithm.decompress(*compressed_weight_tensors)
setattr(self.layer, attr_name, weight_tensor)
# TODO(tfmot): handle training arg if needed given this is inference only.
return self.layer.call(inputs)
def _map_to_training_weights(
pretrained_weights: List[List[Any]], algorithm, layer,
compressible_weights: List[str]) -> List[List[Any]]:
"""Construct the training weight values from the layer's pretrained weights.
The weight values have the same structure as the output of
`tf.keras.layers.Layer.get_weights`.
Args:
pretrained_weights: layer's pretrained weights, retrieved via
layer.get_weights()
algorithm: weight compression algorithm
layer: layer
compressible_weights: weight attributes of layer that should be compressed
Returns:
Initial weight values for training.
"""
# Need to know for each layer that kernel is the first weight, etc.
# TODO(tfmot): consider implication on custom Keras layers. The
# user has to pass in the information that 'kernel' is the first
# variable, 'bias' is the second variable, and so on.
# TODO(tfmot): see if Keras can introduce changes to simplify this.
original_weights = []
training_weights = []
if isinstance(layer, tf.keras.layers.Conv2D) or \
isinstance(layer, tf.keras.layers.Dense):
weights = ['kernel', 'bias']
for i, weight in enumerate(weights):
pretrained_weight = pretrained_weights[i]
if weight in compressible_weights:
weight_reprs = algorithm.init_training_weights_repr(pretrained_weight)
for weight_repr in weight_reprs:
# Assumes initializer is tf.keras.initializers.Constant.
# TODO(tfmot): add check for this assumption.
# TODO(tfmot): the documentation for
# tf.keras.initializers.Constant(value)
# suggests that the `value` cannot be any arbitrary shape and
# only a single scalar value. It works in this implementation
# to make `value` any tensor - check this.
training_weights.append(weight_repr.initializer(
shape=None, dtype=weight_repr.dtype))
else:
original_weights.append(pretrained_weight)
return training_weights + original_weights
# TODO(tfmot): same TODOs as _map_to_training_weights.
def _map_to_inference_weights(training_weights, algorithm, training_tensors):
"""Construct the inference weight values from the weights after training.
The weight values have the same structure as the output of
`tf.keras.layers.Layer.get_weights`.
Args:
training_weights: layer's weights from training, retrieved via
layer.get_weights()
algorithm: weight compression algorithm
training_tensors: map from compressible weight attribute (e.g. 'kernel') to
relevant tensors.
Returns:
Initial weight values for training.
Example:
training_weights = [kernel_var1, kernel_var2, bias]
training_tensors = {'kernel': [kernel_var1, kernel_var2]}
expected output: [compress([kernel_var1, kernel_var2]), bias]
"""
compressed_weights = []
weights = ['kernel', 'bias']
layer_weights_i = 0
for weight in weights:
if weight in training_tensors:
compressed = algorithm.compress(*training_tensors[weight])
for c in compressed:
compressed_weights.append(c.numpy())
layer_weights_i += len(training_tensors[weight])
else:
compressed_weights.append(training_weights[layer_weights_i])
layer_weights_i += 1
return compressed_weights
def create_layer_for_training(layer, algorithm):
"""Internal API to create layer for training with weight compression."""
# TODO(tfmot): move these checks to public API for
# visibility.
if not isinstance(algorithm, object):
raise ValueError('`_create_layer_for_training` requires `algorithm` '
'to be an instantiated object, as opposed '
'to the class itself.')
# Currently only supports a layer being built. The non-built
# case may work fine as is, but it needs to be tested, together
# with the followup API for exporting the model when the training
# and inference graphs differ.
if not layer.built:
raise ValueError('`_create_layer_for_training` requires `layer` to '
'be built.')
pretrained_weights = layer.get_weights()
input_shape = layer.input_shape
compressible_weights = algorithm.get_compressible_weights(layer)
# Clone layer for two reasons:
#
# 1) Avoid unnecessary variable creation which undoes the benefits of
# compression. For instance, if we factorize `kernel` into `a` and `b`,
# since `a` and `b` collectively take less space than `kernel`, we
# no longer want to `kernel` to take up space as a variable.
#
# The design depends on replacing the layer's `add_weight`
# method to prevent variable creation, before `add_weight` is called
# in the layer's `build`. Since the layer is built already, we undo
# this by cloning the layer.
#
# 2) The unoptimized layer and the optimized layer are now independent
# of each other and training one will not affect the other.
#
# TODO(tfmot): consider if it's okay to avoid this complexity during training
# and only add it during inference, which is when model size really matters.
# TODO(tfmot): handle custom Keras layer case.
cloned_layer = layer.__class__.from_config(layer.get_config())
# TODO(tfmot): consider if this manner of handling build hinders
# support for subclassed models in trying to set the attributes
# that are layers while ensuring that the underlying trainable weights
# have been created already.
wrapped_layer = _TrainingWrapper(cloned_layer, algorithm,
compressible_weights)
if compressible_weights:
# Set pretrained weight values.
wrapped_layer.build(input_shape)
training_weights = _map_to_training_weights(pretrained_weights, algorithm,
layer, compressible_weights)
wrapped_layer.set_weights(training_weights)
return wrapped_layer
def create_layer_for_inference(layer: _TrainingWrapper, algorithm):
"""Internal API to create layer for inference with weight compression."""
# TODO(tfmot): move these checks to public API for
# visibility.
if not isinstance(algorithm, object):
raise ValueError('`_create_layer_for_inference` requires `algorithm` '
'to be an instantiated object, as opposed '
'to the class itself.')
if not layer.built:
raise ValueError(
'`_create_layer_for_inference` requires `layer` to be built.')
# Process layer.
nested_layer = layer.layer
input_shape = layer.input_shape
# Construct map from attribute (e.g. 'kernel') to tensor versions of
# variables used during training.
compressible_training_tensors = {}
for attr, weights in layer.training_weights.items():
compressible_training_tensors[attr] = [w.read_value() for w in weights]
# Process nested layer.
#
# TODO(tfmot): same TODOs as in _create_layer_for_training.
cloned_layer = nested_layer.__class__.from_config(nested_layer.get_config())
layer_for_inference = _InferenceWrapper(cloned_layer, algorithm,
compressible_training_tensors)
layer_for_inference.build(input_shape)
if layer.get_weights():
# Set weights of layer for inference according to what was trained.
inference_weights = _map_to_inference_weights(
layer.get_weights(), algorithm, compressible_training_tensors)
layer_for_inference.set_weights(inference_weights)
return layer_for_inference
```
#### File: keras/compression/schedules.py
```python
import abc
from typing import Union, Optional
import tensorflow as tf
class Scheduler(metaclass=abc.ABCMeta):
"""Abstract Scheduler."""
@abc.abstractmethod
def __call__(self, step: Union[int, tf.Tensor]) -> tf.Tensor:
"""Scheduler function given tf.Tensor step number.
Args:
step: tf.Tensor with tf.int32 or tf.int64 representing the current step
number of training loops.
Returns:
Any tf.Tensor Scheduled value of given `step`
"""
raise NotImplementedError()
class PolynomialDecay(Scheduler):
"""Scheduling based on polynomial equation.
s(t) = start_value for t < begin_step
= end_value + [(start_value - end_value) * (1 - decay_term) ** exponent]
where decay_term = (t - begin_step) / decay_steps
for 0 <= 1 - decay_term <= 1
<-> 0 <= decay_term <= 1
<-> 0 <= (t - begin_step) / decay_steps <= 1
<-> 0 <= (t - begin_step) <= decay_steps
<-> begin_step <= t <= begin_step + decay_steps (=end_step)
= end_value for t > begin_step + decay_steps (=end_step)
"""
def __init__(self,
start_value: Union[int, float],
decay_steps: int,
end_value: Union[int, float],
begin_step: Optional[int] = 0,
exponent: Optional[float] = 1.0,
dtype: Optional[tf.dtypes.DType] = tf.float32,
name: Optional[str] = None):
"""Initialize PolynomialDecayScheduler.
Args:
start_value: the initial value of decaying. It is also the default value
of this scheduler for step <= begin_step.
decay_steps: A Python positive int value for duration of decaying.
end_value: the final value of decaying. It is also the default value of
this scheduler for step >= end_step = begin_step + decay_steps
begin_step: The step value that this scheduler starts decaying.
Defaults to 0, which means it decays right after training starts.
exponent: The exponent of the polynomial decaying.
Defaults to 1.0, a linear function.
dtype: `tf.dtypes.DType`, dtype of returned tensor.
Defaults to tf.float32.
name: A Python `str` for the name scope of this scheduler.
Returns:
A `tf.Tensor` of the scheduled output value calculated from the polynomial
equation as given above.
"""
self.name = name
self.start_value = start_value
self.begin_step = begin_step
self.end_value = end_value
self.decay_steps = decay_steps
self.end_step = self.begin_step + self.decay_steps
self.exponent = exponent
self.dtype = dtype
def __call__(self, step: Union[int, tf.Tensor]) -> tf.Tensor:
with tf.name_scope(self.name or "PolynomialDecay"):
val = tf.cond(tf.math.less(step, self.begin_step),
lambda: tf.cast(self.start_value, dtype=self.dtype),
lambda: self._after_begin_step(step), name="start")
return val
def _after_begin_step(self, step: Union[int, tf.Tensor]) -> tf.Tensor:
with tf.name_scope(self.name or "PolynomialDecay"):
val = tf.cond(tf.math.greater(step, self.end_step),
lambda: tf.cast(self.end_value, dtype=self.dtype),
lambda: self._during_decay(step), name="end")
return val
def _during_decay(self, step: Union[int, tf.Tensor]) -> tf.Tensor:
"""Return decayed scheduled value."""
with tf.name_scope(self.name or "PolynomialDecay"):
local_steps = tf.cast(step - self.begin_step, dtype=tf.float32)
decay_term = tf.math.divide(local_steps,
tf.cast(self.decay_steps, dtype=tf.float32))
total_delta = tf.cast(self.start_value - self.end_value, dtype=tf.float32)
target = tf.math.add(self.end_value, tf.cast(
tf.math.multiply(total_delta, tf.pow(1 - decay_term, self.exponent)),
dtype=self.dtype))
val = tf.stop_gradient(target)
return val
```
#### File: keras/compression/schedules_test.py
```python
import tensorflow as tf
from tensorflow_model_optimization.python.core.common.keras.compression import schedules
class SimpleScheduler(schedules.Scheduler):
def __call__(self, step: int) -> float:
return 0.1 if step >= 1000 else 0.6
class SimpleSchedulerTest(tf.test.TestCase):
def testSimpleScheduler(self):
scheduler = SimpleScheduler()
expected = [0.6, 0.6, 0.1, 0.1]
output = [scheduler(i) for i in [0, 100, 1000, 2000]]
self.assertAllEqual(output, expected)
class CubicPolynomialDecayTest(tf.test.TestCase):
def testBeforeDecaying(self):
init_value = 0.1
final_value = 1.0
begin_step = 10
decaying_step = 10
total_training_step = begin_step
scheduler = schedules.PolynomialDecay(init_value, decaying_step,
final_value, begin_step=begin_step,
exponent=3)
output = [scheduler(i) for i in range(total_training_step)]
expected = [init_value] * begin_step
self.assertAllClose(output, expected)
def testDecaying(self):
init_value = 0.1
final_value = 1.0
begin_step = 10
decaying_step = 10
exponent = 3
scheduler = schedules.PolynomialDecay(init_value, decaying_step,
final_value, begin_step=begin_step,
exponent=exponent)
expected = [final_value + (init_value - final_value) * \
(1-float(i)/decaying_step) ** exponent
for i in range(decaying_step)]
output = [scheduler(begin_step + i) for i in range(decaying_step)]
self.assertAllClose(output, expected)
def testBeyondEnd(self):
init_value = 0.1
final_value = 1.0
begin_step = 10
decaying_step = 10
total_steps = 30
beyond_end_steps = total_steps - decaying_step - begin_step
scheduler = schedules.PolynomialDecay(init_value, decaying_step,
final_value, begin_step=begin_step,
exponent=3)
expected = [final_value] * beyond_end_steps
output = [scheduler(begin_step + decaying_step + i)
for i in range(beyond_end_steps)]
self.assertAllClose(output, expected)
class LinearPolynomialDecayTest(tf.test.TestCase):
def testHalfWay(self):
step = 5
lr = 0.05
end_lr = 0.0
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr)
expected = lr * 0.5
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def testEnd(self):
step = 10
lr = 0.05
end_lr = 0.001
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr)
expected = end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def testHalfWayWithEnd(self):
step = 5
lr = 0.05
end_lr = 0.001
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr)
expected = (lr + end_lr) * 0.5
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def testBeyondEnd(self):
step = 15
lr = 0.05
end_lr = 0.001
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr)
expected = end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
if __name__ == '__main__':
tf.test.main()
```
#### File: quantization/keras/quantize_scheme.py
```python
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class QuantizeScheme(object):
"""ABC interface which specifies transformer and quantization registry."""
@abc.abstractmethod
def get_layout_transformer(self):
"""Returns the layout transforms for this scheme.
Returns:
Returns the QuantizeLayoutTransform for this quantization scheme.
"""
raise NotImplementedError('Must be implemented in subclasses.')
@abc.abstractmethod
def get_quantize_registry(self):
"""Returns the quantization registry for this scheme.
Returns:
Returns the QuantizeRegistry for this quantization scheme.
"""
raise NotImplementedError('Must be implemented in subclasses.')
```
|
{
"source": "jessicsous/Curso_Python",
"score": 4
}
|
#### File: Curso_Python/aula32/aula32.py
```python
def f(arg, arg2):
return arg * arg2
var = f(2,2)
print(var)
# mesma coisa anterior usando as funções anônimas
a = lambda x,y: x * y
print(a(2,2))
#exemplo:
lista = [
['p1', 13],
['p2', 6],
['p3', 7],
['p4', 50],
['p5', 8],
]
# função pra ordenar lista
def f(item):
return item[1]
lista.sort(key=f)
print(lista)
# reverso
lista1 = [
['p1', 5],
['p2', 7],
['p3', 55],
]
def f1(item):
return item[1]
lista1.sort(key=f1, reverse=True)
print(lista1)
# esse processo pode ser substituido pelas expressões lambda
lista2 = [
['p1', 5],
['p2', 4],
['p3', 3],
]
lista2.sort(key=lambda item: item[1])
print(lista2)
# reverso
lista3 = [
['p1', 5],
['p2', 4],
['p3', 3],
]
lista3.sort(key=lambda item: item[1], reverse=True)
print(lista3)
# outra forma de ordenar usando 'sorted'
lista4 = [
['p1', 1],
['p2', 2],
['p3', 3],
]
print(sorted(lista, key=lambda i: i[1]))
# reverso
lista5 = [
['p1', 3],
['p2', 2],
['p3', 1],
]
print(sorted(lista, key=lambda i: i[1], reverse=True))
```
#### File: Curso_Python/aula38/aula38.py
```python
lista = [1,2,3,4,5,6]
lista1 = 123456
lista2 = 'string'
print(hasattr(lista, '__iter__')) # verificação se a lista é um objeto iterável
print(hasattr(lista1, '__iter__'))
print(hasattr(lista2, '__iter__'))
# como transformar uma lista em um iterador
lista3 = [10,20,30,40,50,60]
lista3 = iter(lista)
print(next(lista3))
print(next(lista3))
print(next(lista3))
print(next(lista3))
print(next(lista3))
print(next(lista3))
# geradores (servem para evitar o consulmo de memoria)
import sys
lista4 = list(range(1000))
print(lista4)
print(sys.getsizeof(lista4)) # quantos bytes a lista ta consulmindo de memoria
import time # módulo para vêr oq acontece na tela
def gera(): # criando uma função
r = [] # cria uma raw
for n in range(100): # faz uma ieração da função range de 0 a 99
r.append(n) # colocando os valores, a cada iteração do laço, no raw vazio
time.sleep(0.1) # dormir 0.1 segundo
return r # depois de tudo retorna o valor
g = gera()
for v in g:
print(v)
# para retornar um valor de cada vez
def gerad():
for n in range(100):
yield n
time.sleep(0.1)
g1 = gerad()
for v in g1:
print(v)
# código manual sem o laço for
def gerador():
variavel = 'valor 1'
yield variavel
variavel = 'valor 2'
yield variavel
g2 = gerador()
print(next(g2))
print(next(g2))
# forma mais simples para criar gerador
l1 = [x for x in range(100)] # salva todos os valores na memória
print(type(l1))
l2 = (x for x in range(100)) # não salva todos valores na memória, só entrega os valores pedidos. para pedir pode ser utilizado o next ou o for
print(type(l2))
print(sys.getsizeof(l1)) # tamanho da lista
print(sys.getsizeof(l2)) # tamanho da lista / apesar de identicas tem tamanhos diferentes
```
#### File: Curso_Python/aula46/aula46.py
```python
def divide(n1, n2):
try:
return n1 / n2
except ZeroDivisionError as error:
print('Log: ', error)
raise
try:
print(divide(2,0))
except ZeroDivisionError as error:
print(error)
# própria menssagem de erro:
def divide(n1, n2):
if n2 == 0:
raise ValueError('n2 não pode ser 0')
return n1 / n2
try:
print(divide(2,0))
except ValueError as error:
print(error)
```
#### File: Curso_Python/aula47/aula47.py
```python
def convert_numero(valor):
try:
valor = int(valor)
return valor
except ValueError as erro:
try:
valor = float(valor)
return valor
except ValueError:
pass
while True:
numero = convert_numero(input('Digite um número: '))
if numero is not None:
print(numero * 5)
else:
print('isso não é número.')
```
#### File: Curso_Python/aula52/aula52.py
```python
class Produto:
def __init__(self, nome, preço):
self.nome = nome
self.preco = preco
def desconto(self, percentual):
self.preco = self.preco - (self.preco * (percentual / 100))
#Getter
@property
def nome(self):
return self._nome
#Setter
@nome.setter
def nome(self, valor):
self._nome = valor.tittle
# Getter : obtem um valor
@property
def preco(self):
return self._preco
# setter : configura um valor
@preco.setter
def preco(self, valor):
if isistance(valor, str):
valor = float(valor.replace('R$', ''))
self._preco = valor
p1 = Produto('CAMISETA', 50)
p1.desconto(10)
print(p1.preco)
p2 = Produto('CANECA', 'R$15')
p2.desconto(10)
print(p2.preco)
```
#### File: Curso_Python/aula54/aula54.py
```python
class BaseDeDados:
def __init__(self):
self.__dados = {}
@property
def dados(self):
return self.__dados
def inserir_client(self, id, nome):
if 'clientes' not in self.__dados:
self.__dados['clientes'] = {id: nome}
else:
self.__dados['clientes'].update({id: nome})
def lista_clientes(self):
for id, nome in self.__dados['clientes'].items():
print(id, nome)
def apaga_cliente(self, id):
del self.__dados['clientes'][id]
bd = BaseDeDados()
bd.inserir_cliente(1, 'Otávio')
bd.__dados = 'outra coisa'
print(bd.__dados)
print(bd._BaseDeDados__dados)
#bd.lista_clientes()
```
#### File: Curso_Python/aula63/aula63.py
```python
from abc import ABC, abstractmethod
class A(ABC):
@abstractmethod
def fala(self, msg): pass
class B(A):
def fala(self, msg):
print(f'B está falando {msg}')
class C(A):
def fala(self, msg):
print(f'C está falando {msg}')
b = B()
c = C()
b.fala('um assunto')
c.fala('outro assunto')
```
#### File: Curso_Python/aula67/aula67.1.py
```python
from contextlib import contextmanager
@contextmanager
def abrir(arquivo, modo):
try:
print('abrindo arquivo')
arquivo = open(arquivo, modo)
yield arquivo
finally:
print('fechando arquivo')
arquivo.close()
with abrir('abc.txt', 'w') as arquivo:
arquivo.write('linha 1\n')
arquivo.write('linha 2\n')
arquivo.write('linha 3\n')
```
#### File: Curso_Python/calculadora1/mapeamento.py
```python
from dados import produtos, pessoas, lista
from functools import reduce
#nova_lista = map(lambda x: x * 2, lista) # multiplicando cada elemento da lista vezes 2
#nova_lista1 = [x for x in lista] # cópia da lista
#nova_lista2 = [x * 2 for x in lista] # outra forma
#print(nova_lista2)
#print(lista)
#print(list(nova_lista))
#print(nova_lista1)
# acrescentar 5% sob os preços do produtos
# cada linha da lista anterior vai chegar no argumento da função a seguir 'p', desse argumento será acessada a chave preco...
# e após acessdado o valor da chave será modificado
#def aumenta_preco(p):
#p['preco'] = round(p['preco'] * 1.05, 2)
#return p
#novos_produtos = map(aumenta_preco, produtos) # a função map mapea uma função que passa em cada elemento do iterável
#for produto in novos_produtos:
#print(produto)
#print()
# pegar só o nome das pessoas do dicionário anterior
#nomes = map(lambda p: p['nome'], pessoas)
# for pessoa in nomes:
# print(pessoa)
# print()
# só as idades
# idades = map(lambda i: i['idade'], pessoas)
# for idade in idades:
#print(idade)
# map = passar uma função dentro de cada elemento do dicionário
# função filter: filtrar informações
nova_lista = filter(lambda x: x > 5, lista)
nova_lista1 = [x for x in lista if x > 5]
print(nova_lista1)
print(list(nova_lista))
# filtrar produtos com preços acima de 10,00 reais
lista_produto = filter(lambda p: p['preco'] > 10, produtos)
for produto in lista_produto:
print(produto)
print()
# para operações mais complexas pode-se criar uma função assim como no map
def filtra(produto):
if produto['preco'] > 10:
return True
lista_produto1 = filter(filtra, produtos)
for produto in lista_produto1:
print(produto)
# checar quem é maior de idade
def filtra(pessoas):
if pessoas['idade'] >= 18:
return True
else:
return False
lista_pessoas = filter(filtra, pessoas)
for pessoa in lista_pessoas:
print(pessoa)
# reduce = acumuladora compulsiva
# forma aprendida anteriormente
acumula = 0
for item in lista:
acumula += item
print(acumula)
# usando a função reduce
soma_lista = reduce(lambda ac, i: i + ac, lista, 0)
print(soma_lista)
soma_precos = reduce(lambda ac, p: p['preco'] + ac, produtos, 0)
print(soma_precos)
soma_idades = reduce(lambda ac, p: ac + p['idade'], pessoas, 0)
print(soma_idades / len(pessoas)) # media de idades
```
#### File: Curso_Python/exercicio/exercicio11.py
```python
def f():
variavel = 'valor'
return variavel
def f2(arg):
print(arg)
var2 = f()
f2(var2)
# outra forma
def f():
return 'valor'
def f2(arg):
return arg()
var = f2(f)
print(var)
```
#### File: Curso_Python/exercicio/exercicio9.py
```python
def f(primeiro, segundo):
vari1 = primeiro * (segundo/100)
vari2 = primeiro
return vari1 + vari2
var = f(50, 50)
print(var)
# outra forma
def aumento_percentual(valor, percentual):
return valor + (valor * percentual / 100)
ap = aumento_percentual(50, 50)
print(ap)
```
|
{
"source": "JessiDG/secondDrafting",
"score": 4
}
|
#### File: JessiDG/secondDrafting/second_drafting.py
```python
class SecondDrafting:
def __init__(self, text):
self._text = text
def check_repetition(self, text):
word_list = []
common_word_list = ['and', 'the', 'or', 'is', 'of', 'was', 'if', 'her', 'hers', 'she',
'he', 'his', 'him', 'they', 'them', 'their']
updated_sentence = ""
for word in text.split():
if word in word_list and word not in common_word_list:
new_word = "**"+str(word)+"**"
updated_sentence += new_word + " "
else:
word_list.append(word)
updated_sentence += word + " "
return updated_sentence
def check_repetitive_punctuation(self, text, punctuation):
updated_sentence = ""
for i in text:
if i == punctuation:
new_punctuation = '**' + str(punctuation) + "**"
updated_sentence += new_punctuation
else:
updated_sentence += i
return updated_sentence
def check_adverbs(self, text):
updated_sentence = ""
for word in text.split():
# print()
# print(word[(len(word) - 2):(len(word))])
if word[(len(word) - 2):(len(word))] == 'ly':
new_word = '**'+word+'**'
updated_sentence += new_word +' '
else:
updated_sentence += word + ' '
return updated_sentence
def check_gerunds(self, text):
updated_sentence = ""
for word in text.split():
if word[(len(word) - 3):(len(word))] == 'ing':
new_word = '**' + word + '**'
updated_sentence += new_word + ' '
else:
updated_sentence += word + ' '
return updated_sentence
def check_infinitives(self, text):
updated_sentence = ""
for word in text.split():
if word == 'to':
new_word = '**' + word + '**'
updated_sentence += new_word + ' '
else:
updated_sentence += word + ' '
return updated_sentence
def check_everything(self, text, punct0=';', punct1='—', punct2='–'):
text_no_repetition = self.check_repetition(text)
text_repetitive_punctuation0 = self.check_repetitive_punctuation(text_no_repetition, punct0)
text_repetitive_punctuation1 = self.check_repetitive_punctuation(text_repetitive_punctuation0, punct1)
text_repetitive_punctuation2 = self.check_repetitive_punctuation(text_repetitive_punctuation1, punct2)
text_adverbs = self.check_adverbs(text_repetitive_punctuation2)
text_gerunds = self.check_gerunds(text_adverbs)
text_infinitives = self.check_infinitives(text_gerunds)
return text_infinitives
def chat_bot(self):
usr = input('what text would you like to check?\n')
return self.check_everything(usr)
def __str__(self):
return self._text
# class byParagraph:
if __name__ == "__main__":
string0 = "and and ; ; — —. happily singing singer to dance"
sd = SecondDrafting(string0)
# print(sd)
# print(sd.check_repetition(string0))
# print(sd.check_repetitive_punctuation(string0, ';'))
# print(sd.check_repetitive_punctuation(string0, '—'))
#print(sd.check_adverbs(string0))
# print(sd.check_gerunds(string0))
# print(sd.check_infinitives(string0))
# print(sd.check_everything(string0))
# string1 = "The en dash is used to represent a span or range of numbers, dates, or time. There should be no space between the en dash and the adjacent material. Depending on the context, the en dash is read as “to” or “through.”"
# print(sd.check_everything(string1))
# string2 = "The other young man nodded. Brian wondered if the utter silence of the three hour drive from the Idaho Falls Greyhound station was part of the Basic Military Training now or whether this Airman was just the silent type. Brian had gone through BMT five years ago, about six months after Hiroshima and Nagasaki. His instructors had been so shell-shocked, he had no idea if they’d covered protocol for situations like this."
# print(sd.check_everything(string2))
# string3 = "Brian held himself stiffly in the pre-war era Ford’s backseat as they bumped along the ill-paved county road. Every jerking pothole yanked the barely-scabbed over belt marks on his back. His face was stony where the Junior Airman driver could see, but his hand was digging new bruises over the old ones on his knees, fingers tucked between the perfect creases of his dress uniform. His first day at work and he wouldn't be able to sit back in any chair he was offered. Not that the world’s first nuclear power plant’s new Security Director will have a lot of sitting to do. About five minutes left, Staff Sergeant Flinn. Thank you, Airman. The other young man nodded. Brian wondered if the utter silence of the three " \
# "hour drive from the Idaho Falls Greyhound station was part of the Basic Military Training now or whether this Airman was just the silent type. Brian had gone through BMT five years ago, about six months after Hiroshima and Nagasaki. His instructors had been so shell-shocked, he had no idea if they’d covered protocol for situations like this. Brian kept his eyes on the impossibly-flat, horizon-daring high desert of central Idaho. There was scrub here, a bit greener than what he'd grown-up with on the blue-red mesas of southeastern New Mexico. The mountains here looked like weapons, holding none of the softening of arroyos of his home. Just saw-toothed peaks and man-eating snow drifts. He could see their destination on the horizon; there wasn\'t anything else to look at in the ' \
# 'middle distance. The Experimental Breeder Reactor (EBR-1) was big cinderblock cube in the middle of the desert. Site chosen because if we all blow ourselves up tomorrow, it\'ll be a thousand years of poisoned water for the cows rather than a real metropolitan area."
# print(sd.check_everything(string3))
print(sd.chat_bot())
```
|
{
"source": "jessie0306/MyCinema",
"score": 2
}
|
#### File: MyCinema/mycinema/board_views.py
```python
from django.shortcuts import render
from mycinema.models import Voc, MymovieRe
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from datetime import datetime
from django.http.response import HttpResponseRedirect
def B_MainFunc(request):
if "id" in request.session:
id = request.session['id']
return render(request, "board_main2.html",{"id":id})
else:
return render(request, 'login.html')
# 고객의 소리 누르면 여기로
def VocFunc(request):# 고객의 소리 넣으면 문의작성이 제일 먼저 뜨게
member_id = request.session['id']
return render(request, "voc_insert.html" , {"member_id": member_id})
def Voc_insertokFunc(request):# 내용을 db에 넣어준다.
if request.method == "POST":
try:
# group 번호 얻기
gbun = 1 # 그룹번호
datas = Voc.objects.all() #자료를 읽고
print(datas)
if datas.count() != 0: # 자료가있다면
gbun = Voc.objects.latest('post_id').post_id +1 # 아이디 번호를 읽고 1를 추가
Voc(
member_id = request.POST.get("member_id"),
voc_pw = request.POST.get("voc_pw"),
voc_type = request.POST.get("voc_type"),
title = request.POST.get("title"),
cont = request.POST.get("cont"),
bip = request.META['REMOTE_ADDR'],
bdate = datetime.now(),
readcnt = 0,
gnum = gbun,
onum = 0,
nested = 0 # 마지막 , 는 있어도 되고 없어도 되고
).save()
except Exception as e:
print('추가 오류 : ', e)
return HttpResponseRedirect('/Voclist') # 추가후 목록보기
def Voc_ListFunc(request):# voc 목록이 보이는곳
member_id = request.session['id']
datas = Voc.objects.all().order_by("-gnum","onum") # 댓글이 있는 경우
# 페이징 처리
paginator = Paginator(datas, 5) # 한화면에 5행씩 출력
try:
page = request.GET.get("page")# 해당 페이지를 받아 들어옴
except:
page = 1 # 페이지 저장안하면 1페이지로
try:
data = paginator.page(page)# 해당 페이지를 받아서 출력
except PageNotAnInteger:
data = paginator.page(1)
except EmptyPage:
data = paginator.page(paginator.num_pages)# 현재 페이지에 남기
return render(request, "voc_list.html", {"data": data, "member_id":member_id})
def Voc_ContentFunc(request):
member_id = request.session['id']
data = Voc.objects.get(post_id = request.GET.get('post_id'))
data.readcnt = int(data.readcnt) +1 # 조회수증가
data.save() # 수정이된것
page = request.GET.get("page") # 페이지 읽고
return render(request, "voc_content.html", { "data_one":data ,"page": page, "member_id": member_id}) # 넘겨줌
def Voc_UpdateFunc(request): # 수정대상 자료 보기
try:
member_id = request.session['id']
data = Voc.objects.get(post_id = request.GET.get("post_id")) # post_id를 읽어서 해당자료 가지고옴
except Exception as e:
print("Voc_UpdateFunc err :",e)
return render(request, "voc_update.html", { 'data_one': data ,"member_id": member_id })
def Voc_UpdateokFunc(request): # 수정
upRec = Voc.objects.get(post_id = request.POST.get("post_id")) # 수정할 자료를 꺼내온다.
if upRec.voc_pw == request.POST.get("up_voc_pw"): # 입력된 up_voc_pw와 실제 비번이 맞으면 수정수행
upRec.member_id = request.POST.get("member_id")
upRec.voc_type = request.POST.get("up_voc_type")
upRec.title = request.POST.get("up_title")
upRec.cont= request.POST.get("up_cont")
upRec.save()
else:
return render(request, "voc_error.html")
return HttpResponseRedirect('/Voclist' ) # 수정후 목록보기
def Voc_DeleteFunc(request): # 삭제할거냐 질문 페이지
try:
member_id = request.session['id']
data = Voc.objects.get(post_id= request.GET.get("post_id")) # 아이디가 일치하는 놈 데이터에 넣어
except Exception as e:
print("Voc_DeleteFunc er: ", e)
return render(request, "voc_delete.html", { "data_one": data ,"member_id": member_id })
def Voc_DeleteokFunc(request): # 삭제
member_id = request.session['id']
delRec = Voc.objects.get(post_id = request.POST.get("post_id"))
if delRec.voc_pw == request.POST.get("del_voc_pw"):
delRec.delete()
return HttpResponseRedirect("/Voclist") # 삭제후 목록보기
else:
return render(request, "voc_error.html", { "member_id": member_id })
def Voc_contentpwckFunc(request): # 내용 볼거냐 질문페이지
try:
member_id = request.session['id']
page = request.GET.get("page") # 페이지 읽고
data = Voc.objects.get(post_id= request.GET.get("post_id")) # 아이디가 일치하는 놈 데이터에 넣어
except Exception as e:
print("Voc_contentpwckFunc er: ", e)
return render(request, "voc_content_pwcheck.html", { "data_one": data ,"page": page ,"member_id": member_id })
def Voc_contentpwchokFunc(request): # 비번맞으면 요청소환,
member_id = request.session['id']
data = Voc.objects.get(post_id = request.POST.get("post_id"))
if data.voc_pw == request.POST.get("voc_pw"):
data.readcnt = int(data.readcnt) +1 # 조회수증가
data.save()
page = request.GET.get("page")
return render(request, "voc_content.html", { "data_one":data ,"page": page, "member_id": member_id }) # 넘겨줌
else:
return render(request, "voc_error.html", { "member_id": member_id })
def Voc_Search2Func(request): # 검색
if request.method == "GET":
s_type = request.GET.get("s_type")
s_value= request.GET.get("s_value")
#print(s_type, ' ',s_value ) # title ssss
if s_type == "title":
datas = Voc.objects.filter(title__contains =s_value).order_by("-post_id")
elif s_type == "member_id":
datas = Voc.objects.filter(member_id=s_value).order_by("-post_id")
elif s_type == "voc_type":
datas = Voc.objects.filter(voc_type =s_value).order_by("-post_id")
paginator = Paginator(datas, 5) # 한화면에 5행씩 출력
page = request.GET.get("page")
try:
member_id = request.session['id']
data = paginator.page(page)
except PageNotAnInteger:
data = paginator.page(1)
except EmptyPage:
data = paginator.page(paginator.num_pages)
return render(request, "voc_searchlist.html", {'data':data, "s_type":s_type,"s_value": s_value, "member_id": member_id})
else:
return HttpResponseRedirect("/Voclist")
def Voc_SearchFunc(request): # 검색
if request.method == "POST":
s_type = request.POST.get("s_type")
s_value= request.POST.get("s_value")
#print(s_type, ' ',s_value ) # title ssss
if s_type == "title":
datas = Voc.objects.filter(title__contains =s_value).order_by("-post_id")
elif s_type == "member_id":
datas = Voc.objects.filter(member_id=s_value).order_by("-post_id")
elif s_type == "voc_type":
datas = Voc.objects.filter(voc_type =s_value).order_by("-post_id")
paginator = Paginator(datas, 5) # 한화면에 5행씩 출력
page = request.GET.get("page")
try:
member_id = request.session['id']
data = paginator.page(page)
except PageNotAnInteger:
data = paginator.page(1)
except EmptyPage:
data = paginator.page(paginator.num_pages)
return render(request, "voc_searchlist.html", {'data':data, "s_type":s_type,"s_value": s_value, "member_id": member_id})
else:
return HttpResponseRedirect("/Voclist")
def Voc_replyFunc(request): # 댓글
try:
data = Voc.objects.get(post_id = request.GET.get("post_id")) # 댓글 대상 원글 읽기
member_id = request.session['id']
except Exception as e:
print("ReplyFunc err : ", e)
return render(request, "voc_reply.html", {'data_one':data , "member_id":member_id})
def Voc_replyokFunc(request):
if request.method == "POST":
try:
regnum = int(request.POST.get("gnum")) # 그룹넘버
reonum = int(request.POST.get("onum")) # 그룹넘버밑에잇는거
tempRec = Voc.objects.get(post_id = request.POST.get("post_id"))
old_gnum = tempRec.gnum
old_onum = tempRec.onum
if old_onum >= reonum and old_gnum == regnum: # 같은 그룹이고 올드오넘이 더커 그러면 오넘 갱신
old_onum = old_onum +1 # onum 갱신
# 댓글 저장
Voc(
member_id = request.POST.get("member_id"),
voc_pw = request.POST.get("voc_pw"),
voc_type = request.POST.get("voc_type"),
title = request.POST.get("title"),
cont = request.POST.get("cont"),
bip = request.META['REMOTE_ADDR'],
bdate = datetime.now(),
readcnt = 0,
gnum = regnum,
onum = old_onum,
nested = int(request.POST.get("nested")) +1 # int로 타입바꾸고 +1
).save()
except Exception as e:
print("ReplyokFunc err : ", e)
return HttpResponseRedirect("/Voclist")
from mycinema.models import Voc, MymovieRe
# 내영화추천
def MMRFunc(request):# 내영화추천누르면 리스트가 나오게
# 검색값이 없을경우
member_id = request.session['id']
datas = MymovieRe.objects.all().order_by("-gnum","onum")
paginator = Paginator(datas, 5) # 한화면에 5행씩 출력
try:
page = request.GET.get("page")# 해당 페이지를 받아 들어옴
except:
page = 1 # 페이지 저장안하면 1페이지로
try:
data = paginator.page(page)# 해당 페이지를 받아서 출력
except PageNotAnInteger:
data = paginator.page(1)
except EmptyPage:
data = paginator.page(paginator.num_pages)# 현재 페이지에 남기
return render(request, "mmr_list.html", {"data": data , "member_id": member_id})
def MMR_Search2Func(request): # 검색
if request.method == "GET":
s_type = request.GET.get("s_type")
s_value= request.GET.get("s_value")
#print(s_type, ' ',s_value ) # title ssss
if s_type == "title":
datas = MymovieRe.objects.filter(title__contains =s_value).order_by("-post_id")
elif s_type == "member_id":
datas = MymovieRe.objects.filter(member_id=s_value).order_by("-post_id")
elif s_type == "like_genre":
datas = MymovieRe.objects.filter(like_genre =s_value).order_by("-post_id")
paginator = Paginator(datas, 5) # 한화면에 5행씩 출력
page = request.GET.get("page")
try:
member_id = request.session['id']
data = paginator.page(page)
except PageNotAnInteger:
data = paginator.page(1)
except EmptyPage:
data = paginator.page(paginator.num_pages)
return render(request, "mmr_searchlist.html", {'data':data, "s_type":s_type,"s_value": s_value , "member_id": member_id})
else:
return HttpResponseRedirect("/MMR")
def MMR_SearchFunc(request): # 검색
if request.method == "POST":
s_type = request.POST.get("s_type")
s_value= request.POST.get("s_value")
#print(s_type, ' ',s_value ) # title ssss
if s_type == "title":
datas = MymovieRe.objects.filter(title__contains =s_value).order_by("-post_id")
elif s_type == "member_id":
datas = MymovieRe.objects.filter(member_id=s_value).order_by("-post_id")
elif s_type == "like_genre":
datas = MymovieRe.objects.filter(like_genre =s_value).order_by("-post_id")
paginator = Paginator(datas, 5) # 한화면에 5행씩 출력
page = request.GET.get("page")
try:
member_id = request.session['id']
data = paginator.page(page)
except PageNotAnInteger:
data = paginator.page(1)
except EmptyPage:
data = paginator.page(paginator.num_pages)
return render(request, "mmr_searchlist.html", {'data':data, "s_type":s_type,"s_value": s_value, "member_id": member_id})
else:
return HttpResponseRedirect("/MMR")
def MMR_insertFunc(request):# 글추가 페이지로 가기
member_id = request.session['id']
return render(request, "mmr_insert.html", {"member_id": member_id})
# MymovieRe
def MMR_insertokFunc(request):# db에 글저장
if request.method == "POST":
try:
# group 번호 얻기
gbun = 1 # 그룹번호
datas = MymovieRe.objects.all() #자료를 읽고
print(datas)
if datas.count() != 0: # 자료가있다면
gbun = MymovieRe.objects.latest('post_id').post_id +1 # 아이디 번호를 읽고 1를 추가
print(request.POST.get("member_id"))
MymovieRe(
member_id = request.POST.get("member_id"),
re_pw = request.POST.get("re_pw"),
like_genre = request.POST.get("like_genre"),
title = request.POST.get("title"),
cont = request.POST.get("cont"),
bip = request.META['REMOTE_ADDR'],
bdate = datetime.now(),
readcnt = 0,
gnum = gbun,
onum = 0,
nested = 0 # 마지막 , 는 있어도 되고 없어도 되고
).save()# 직접넘기는거
except Exception as e:
print('추가 오류 : ', e)
return HttpResponseRedirect('/MMR') # 추가후 목록보기
# MymovieRe
def MMR_contentFunc(request):
member_id = request.session['id']
data = MymovieRe.objects.get(post_id = request.GET.get('post_id'))
data.readcnt = int(data.readcnt) +1 # 조회수증가
data.save() # 수정이된것
page = request.GET.get("page") # 페이지 읽고
return render(request, "mmr_content.html", { "data_one":data ,"page": page, "member_id": member_id}) # 넘겨줌
def MMR_DeleteFunc(request): # 수정대상 자료 보기
try:
member_id = request.session['id']
data = MymovieRe.objects.get(post_id= request.GET.get("post_id")) # 아이디가 일치하는 놈 데이터에 넣어
except Exception as e:
print("MMR_DeleteFunc er: ", e)
return render(request, "mmr_delete.html", { "data_one": data, "member_id": member_id })
# MymovieRe
def MMR_DeleteokFunc(request): # 삭제
member_id = request.session['id']
delRec = MymovieRe.objects.get(post_id = request.POST.get("post_id"))
if delRec.re_pw == request.POST.get("del_re_pw"):
delRec.delete()
return HttpResponseRedirect("/MMR") # 삭제후 목록보기
else:
return render(request, "voc_error.html", { "member_id": member_id })
def MMR_UpdateFunc(request): # 수정대상 자료 보기
try:
member_id = request.session['id']
data = MymovieRe.objects.get(post_id = request.GET.get("post_id"))
except Exception as e:
print("MMR_UpdateFunc err :",e)
return render(request, "mmr_update.html", { 'data_one': data, "member_id": member_id})
def MMR_UpdateokFunc(request): # 수정
upRec = MymovieRe.objects.get(post_id = request.POST.get("post_id")) # 수정할 자료를 꺼내온다.
if upRec.re_pw == request.POST.get("up_re_pw"): # 입력된 up_voc_pw와 실제 비번이 맞으면 수정수행
upRec.member_id = request.POST.get("member_id")
upRec.like_genre = request.POST.get("up_like_genre")
upRec.title = request.POST.get("up_title")
upRec.cont= request.POST.get("up_cont")
upRec.save()
else:
return render(request, "mmr_error.html")
return HttpResponseRedirect('/MMR' ) # 수정후 목록보기
def MMR_replyFunc(request): # 댓글
try:
data = MymovieRe.objects.get(post_id = request.GET.get("post_id")) # 댓글 대상 원글 읽기
member_id = request.session['id']
except Exception as e:
print("ReplyFunc err : ", e)
return render(request, "mmr_reply.html", {'data_one':data,"member_id":member_id })
def MMR_replyokFunc(request):
if request.method == "POST":
try:
regnum = int(request.POST.get("gnum")) # 그룹넘버
reonum = int(request.POST.get("onum")) # 그룹넘버밑에잇는거
tempRec = MymovieRe.objects.get(post_id = request.POST.get("post_id"))
old_gnum = tempRec.gnum
old_onum = tempRec.onum
if old_onum >= reonum and old_gnum == regnum: # 같은 그룹이고 올드오넘이 더커 그러면 오넘 갱신
old_onum = old_onum +1 # onum 갱신
# 댓글 저장
MymovieRe(
member_id = request.POST.get("member_id"),
re_pw = request.POST.get("re_pw"),
like_genre = request.POST.get("like_genre"),
title = request.POST.get("title"),
cont = request.POST.get("cont"),
bip = request.META['REMOTE_ADDR'],
bdate = datetime.now(),
readcnt = 0,
gnum = regnum,
onum = old_onum,
nested = int(request.POST.get("nested")) +1 # int로 타입바꾸고 +1
).save()
except Exception as e:
print("ReplyokFunc err : ", e)
return HttpResponseRedirect("/MMR")
```
|
{
"source": "jessie0624/KB_QA",
"score": 3
}
|
#### File: KB_QA/subModels/CRFModel.py
```python
from typing import List, Optional
import torch
import torch.nn as nn
'''
CRF
从例子说起——词性标注问题
啥是词性标注问题?
非常简单的,就是给一个句子中的每个单词注明词性。比如这句话:“Bob drank coffee at Starbucks”,注明每个单词的词性后是这样的:“Bob (名词) drank(动词) coffee(名词) at(介词) Starbucks(名词)”。
下面,就用条件随机场来解决这个问题。
以上面的话为例,有5个单词,我们将:(名词,动词,名词,介词,名词)作为一个标注序列,称为l,可选的标注序列有很多种,比如l还可以是这样:(名词,动词,动词,介词,名词),我们要在这么多的可选标注序列中,挑选出一个最靠谱的作为我们对这句话的标注。
怎么判断一个标注序列靠谱不靠谱呢?
就我们上面展示的两个标注序列来说,第二个显然不如第一个靠谱,因为它把第二、第三个单词都标注成了动词,动词后面接动词,这在一个句子中通常是说不通的。
假如我们给每一个标注序列打分,打分越高代表这个标注序列越靠谱,我们至少可以说,凡是标注中出现了动词后面还是动词的标注序列,要给它负分!!
上面所说的动词后面还是动词就是一个特征函数,我们可以定义一个特征函数集合,用这个特征函数集合来为一个标注序列打分,并据此选出最靠谱的标注序列。也就是说,每一个特征函数都可以用来为一个标注序列评分,把集合中所有特征函数对同一个标注序列的评分综合起来,就是这个标注序列最终的评分值。
定义CRF中的特征函数 ()
现在,我们正式地定义一下什么是CRF中的特征函数,所谓特征函数,就是这样的函数,它接受四个参数:
1. 句子s(就是我们要标注词性的句子)
2. i,用来表示句子s中第i个单词
3. l_i,表示要评分的标注序列给第i个单词标注的词性
4. l_i-1,表示要评分的标注序列给第i-1个单词标注的词性
它的输出值是0或者1,0表示要评分的标注序列不符合这个特征,1表示要评分的标注序列符合这个特征。
Note:这里,我们的特征函数仅仅依靠当前单词的标签和它前面的单词的标签对标注序列进行评判,这样建立的CRF也叫作线性链CRF,这是CRF中的一种简单情况。为简单起见,本文中我们仅考虑线性链CRF。
从特征函数到概率
定义好一组特征函数后,我们要给每个特征函数f_j赋予一个权重λ_j。
现在,只要有一个句子s,有一个标注序列l,我们就可以利用前面定义的特征函数集来对l评分。
其中i 是句子s中第i个单词. j是第j个特征函数.
score(l|s) = \sum_{j=1}^m \sum_{i=1}^n λ_j f_j(s, i, l_i, l_{i-1})
上式中有两个求和,外面的求和用来求每一个特征函数f_j评分值的和,里面的求和用来求句子中每个位置的单词的的特征值的和.
对这个分数进行指数化和标准化,我们就可以得到标注序列l的概率值p(l|s),如下所示:
句子s 的标注序列为l的概率:
p(l|s) = exp[score(l|s)] / \sum exp[score(l|s)]
几个特征函数的例子
前面我们已经举过特征函数的例子,下面我们再看几个具体的例子,帮助增强大家的感性认识。
f1(s,i,li,l{i-1}) = 1
当l_i是“副词”并且第i个单词以“ly”结尾时,我们就让f1 = 1,其他情况f1为0。
不难想到,f1特征函数的权重λ1应当是正的。而且λ1越大,表示我们越倾向于采用那些把以“ly”结尾的单词标注为“副词”的标注序列.
f2(s,i,li,l{i-1}) = 1
如果i=1,l_i=动词,并且句子s是以“?”结尾时,f2=1,其他情况f2=0。
同样,λ2应当是正的,并且λ2越大,表示我们越倾向于采用那些把问句的第一个单词标注为“动词”的标注序列。
f3(s,i,li,l{i-1}) = 1
当l_i-1是介词,l_i是名词时,f3 = 1,其他情况f3=0。λ3也应当是正的,并且λ3越大,说明我们越认为介词后面应当跟一个名词。
f4(s,i,li,l{i-1}) = 1
如果l_i和l_i-1都是介词,那么f4等于1,其他情况f4=0。
这里,我们应当可以想到λ4是负的,并且λ4的绝对值越大,表示我们越不认可介词后面还是介词的标注序列。
好了,一个条件随机场就这样建立起来了,让我们总结一下:
为了建一个条件随机场,我们首先要定义一个特征函数集,每个特征函数都以整个句子s,当前位置i,位置i和i-1的标签为输入。
然后为每一个特征函数赋予一个权重,然后针对每一个标注序列l,对所有的特征函数加权求和,必要的话,可以把求和的值转化为一个概率值。
CRF与逻辑回归的比较
事实上,条件随机场是逻辑回归的序列化版本。逻辑回归是用于分类的对数线性模型,条件随机场是用于序列化标注的对数线性模型。
CRF与HMM的比较
对于词性标注问题,HMM模型也可以解决。HMM的思路是用生成办法,就是说,在已知要标注的句子s的情况下,去判断生成标注序列l的概率,如下所示:
p(l,s) = p(l1)\prod_i p(li|l{i-1})p(wi|li)
这里:
p(l_i|l_i-1)是转移概率,比如,l_i-1是介词,l_i是名词,此时的p表示介词后面的词是名词的概率。
p(w_i|l_i)表示发射概率(emission probability),比如l_i是名词,w_i是单词“ball”,此时的p表示在是名词的状态下,是单词“ball”的概率。
那么,HMM和CRF怎么比较呢?
答案是:CRF比HMM要强大的多,它可以解决所有HMM能够解决的问题,并且还可以解决许多HMM解决不了的问题。
事实上,我们可以对上面的HMM模型取对数,就变成下面这样:
log p(l,s) = logp(l0) + \sum_i log p(li|li-1) + \sum_i log p(wi|li)
CRF可以定义数量更多,种类更丰富的特征函数。HMM模型具有天然具有局部性,
就是说,在HMM模型中,当前的单词只依赖于当前的标签,当前的标签只依赖于前一个标签。
这样的局部性限制了HMM只能定义相应类型的特征函数,我们在上面也看到了。
但是CRF却可以着眼于整个句子s定义更具有全局性的特征函数.
'''
class CRF(nn.Module):
def __init__(self, num_tags: int=2, batch_first:bool=True) -> None:
if num_tags <= 0:
raise ValueError(f'invalid number of tags:{num_tags}')
super().__init__()
self.num_tags = num_tags
self.batch_first = batch_first
## start 到其他tag(不包含end)的得分
self.start_transitions = nn.Parameter(torch.empty(num_tags))
## 其他tag (不含start) 到end的得分
self.end_transitions = nn.Parameter(torch.empty(num_tags))
'''
从_compute_normalizer中 next_score= broadcast_score + self.transitions + broadcast_mession
可以看出 transitons[i][j]表示从第j个tag 到第i个tag的分数
更正:transitions[i][j] 表示第i个tag 到第j个tag的分数
'''
self.transitions = nn.Parameter(torch.empty(num_tags, num_tags))
self.reset_parameters()
def reset_parameters(self):
init_range = 0.1
nn.init.uniform_(self.start_transitions, -init_range, init_range)
nn.init.uniform_(self.end_transitions, -init_range, init_range)
nn.init.uniform_(self.transitions, -init_range, init_range)
def __repr__(self):
return f'{self.__class__.__name__}(num_tags={self.num_tags})'
def forward(self, emissions: torch.Tensor,
tags: torch.Tensor = None,
mask: Optional[torch.ByteTensor] = None,
reduction: str='mean') -> torch.Tensor:
self.__validate(emissions, tags=tags, mask=mask)
reduction = reduction.lower()
if reduction not in ('none', 'sum','mean', 'token_mean'):
raise ValueError(f'invalid reduction {reduction}')
if mask is None:
mask = torch.ones_like(tags, type=torch.unit8)
if self.batch_first:
# emissions: seq_len, batch_size, tag_num
emissions = emissions.transpose(0, 1)
tags = tags.transpose(0, 1)
mask = mask.transpose(0, 1)
#shape: (batch_size, )
# numerator 分子
numerator = self.__compute_score(emissions=emissions,tags=tags, mask=mask)
# denominator 分母
denominator = self.__compute_normalizer(emissions=emissions, mask=mask)
llh = denominator - numerator
if reduction == 'none':
return llh
elif reduction == 'sum':
return llh.sum()
elif reduction == 'mean':
return llh.mean()
assert reduction == 'token_mean'
return llh.sum() / mask.float().sum()
def decode(self, emissions:torch.Tensor,
mask: Optional[torch.ByteTensor] = None) -> List[List[int]]:
self.__validate(emissions=emissions, mask=mask)
if mask is None:
mask = emissions.new_ones(emissions.shape[:2], dtype=torch.unit8)
if self.batch_first:
emissions = emissions.transpose(0, 1)
mask = mask.transpose(0, 1)
return self.__validate_decode(emissions, mask)
def __validate(self, emissions:torch.Tensor,
tags:Optional[torch.LongTensor] = None,
mask:Optional[torch.ByteTensor] = None) -> None:
if emissions.dim() != 3:
raise ValueError(f'emssions must have dimesion of 3, got {emissions.dim()}')
if emissions.size(2) != self.num_tags:
raise ValueError(
f'expected last dimesion of emission is {self.num_tags}, got {emssions.size(2)}')
if tags is not None:
if emissions.shape[:2] != mask.shape:
raise ValueError(
'the first two dimensions of emissions and mask must match,'
f'got {tuple(emissions.shape[:2])} and {tuple(mask.shape)}'
)
no_empty_seq = not self.batch_first and mask[0].all()
no_empty_seq_bf = self.batch_first and mask[:,0].all()
if not no_empty_seq and not no_empty_seq_bf:
raise ValueError('mask of the first timestep must all be on')
def __compute_score(self, emissions: torch.Tensor,
tags: torch.LongTensor,
mask: torch.ByteTensor) -> torch.Tensor:
# batch second
assert emissions.dim() == 3 and tags.dim() == 2
assert emissions.shape[:2] == tags.shape
assert emissions.size(2) == self.num_tags
assert mask.shape == tags.shape
assert mask[0].all()
seq_length, batch_size = tags.shape
mask = mask.float()
## self.start_transitions start 到其他tag(不含end)的部分
score = self.start_transitions[tags[0]]
#emssions.shape (seq_len, batch_size, tag_nums)
score += emissions[0, torch.arange(batch_size), tags[0]]
for i in range(1, seq_length):
score += self.transitions[tags[i-1], tags[i]] * mask[i]
score += emissions[i, torch.arange(batch_size), tags[i]] * mask[i]
##这里是为了获取每个样本最后一个词的tag.
# shape: batch_size,
seq_ends = mask.long().sum(dim=0) - 1
##每个样本最后一个词的tag
last_tags = tags[seq_ends, torch.arange(batch_size)]
# shape:(batch_size) 每个样本到最后一个词的得分加上之前的score
score += self.end_transitions[last_tags]
return score
def __compute_normalizer(self, emissions:torch.Tensor,
mask: torch.ByteTensor) -> torch.Tensor:
# emission(seq_len, batch_size, num_tags)
# mask: (seq_len, batch_size)
assert emissions.dim() == 3 and mask.dim() == 2
assert emissions.shape[:2] == mask.shape
assert emissions.size(2) == self.num_tags
assert mask[0].all()
seq_length = emissions.size(0)
# shape: batch_size, num_tag
# self.start_transitions, start 到其他tagd的得分,不含end
# start_transitions.shape tag_nums, emission[0].shape (batch_size, tag_size)
score = self.start_transitions + emissions[0]
for i in range(1, seq_length):
#shape: batch_size, num_tag, 1
broadcast_score = score.unsqueeze(dim=2)
#shape: (batch_size,1, num_tags)
broadcast_emissions = emissions[i].unsqueeze(1)
next_score = broadcast_score + self.transitions + broadcast_emissions
next_score = torch.logsumexp(next_score, dim=1)
score = torch.where(mask[i].unsqueeze(1), next_score, score)
# shape (batch_size, num_tags)
score += self.end_transitions
return torch.logsumexp(score, dim=1)
def __viterbi_decode(self, emissions: torch.FloatTensor,
mask: torch.ByteTensor) -> List[List[int]]:
assert emissions.dim() == 3 and mask.dim() == 2
assert emissions.shape[:2] == mask.shape
assert emissions.size(2) == self.num_tags
assert mask[0].all()
seq_length , batch_size = mask.shape
# self.start_transitions start 到其他tag(不包含end)的得分
score = self.start_transitions + emissions[0]
history = []
for i in range(1, seq_length):
broadcast_score = score.unsqueeze(2)
broadcast_emissions = emissions[i].unsqueeze(1)
next_score = broadcast_score + self.transitions + broadcast_emissions
next_score, indices = next_score.max(dim=1)
score = torch.where(mask[i].unsqueeze(1), next_score, score)
history.append(indices)
score += self.end_transitions
seq_ends = mask.long().sum(dim=0) - 1
best_tags_list = []
for idx in range(batch_size):
_, best_last_tag = score[idx].max(dim=0)
best_tags = [best_last_tag.item()]
for hist in reversed(history[:seq_ends[idx]]):
best_last_tag = hist[idx][best_tags[-1]]
best_tags.append(best_last_tag.item())
best_tags.reverse()
best_tags_list.append(best_tags)
return best_tags_list
```
|
{
"source": "jessie-233/python-exercise",
"score": 4
}
|
#### File: py-lesson/HW1/Q2.py
```python
def calculate(time):
all = float(principal) * (1 + float(interest)/100 )
all *= (1 + float(interest)/100) ** int(time)
return all
# In[9]:
principal = input('请输入本金金额(元):')
interest = input('请输入利率(%):')
time = int(input('请输入自动转存次数:'))
result = calculate(time)
print('自动转存%d次后,期满金额为:%.2f 元' %(time, result))
# In[ ]:
```
#### File: py-lesson/HW1/Q3.py
```python
def calcul(x):
result = 1
if x ==0:
return result
elif x>0:
while x>=1:
result *= x
x -=1
return result
# In[22]:
a = int(input('请输入第一个整数:'))
b = int(input('请输入第二个整数:'))
if a<0 or b<0:
print('error!')
else:
c = calcul(a) + calcul(b)
print('两数阶乘之和为:%d'%c)
# In[ ]:
```
#### File: py-lesson/HW3/Q3.py
```python
def fact(x):
return x * fact(x-1) if x > 1 else 1
def combi(n, k):
if(k > n):
print('input error!')
else:
print(int(fact(n) / fact(k) / fact(n - k)))
# In[10]:
combi(3,2)
# In[12]:
combi(6,3)
# In[ ]:
```
#### File: py-lesson/HW4/Q1.py
```python
def is_word_palindrome(file: str):
with open(file, 'r') as f:
k = 0 #行数
for line in f.readlines():
k += 1
s = list(line.lower()) #字符串转列表,小写
txt = []
flag = True
for item in s: #提取出纯字母
if item.isalpha():
txt.append(item)
for i in range(int(len(txt)/2)): #判断是否回文
if txt[i] != txt[len(txt)-1-i]:
flag = False
break
if flag:
print('第%d行是回文'%k)
else:
print('第%d行不是回文'%k)
# In[82]:
is_word_palindrome('Q1.txt')
```
|
{
"source": "jessiebullock/nics-firearms",
"score": 3
}
|
#### File: nics-firearms/scripts/parse-pdf.py
```python
import pandas as pd
import datetime
import pdfplumber
from pdfplumber.utils import within_bbox, collate_chars
from operator import itemgetter
import sys, os
COLUMNS = [
"month",
"state",
"permit",
"permit_recheck",
"handgun",
"long_gun",
"other",
"multiple",
"admin",
"prepawn_handgun",
"prepawn_long_gun",
"prepawn_other",
"redemption_handgun",
"redemption_long_gun",
"redemption_other",
"returned_handgun",
"returned_long_gun",
"returned_other",
"rentals_handgun",
"rentals_long_gun",
"private_sale_handgun",
"private_sale_long_gun",
"private_sale_other",
"return_to_seller_handgun",
"return_to_seller_long_gun",
"return_to_seller_other",
"totals"
]
def parse_month(month_str):
d = datetime.datetime.strptime(month_str, "%B - %Y")
return d.strftime("%Y-%m")
def validate_data(checks):
try:
assert(len(checks) > 0)
except:
raise Exception("No data found.")
## Test vertical totals
# [2:] because first two columns are month and state name
for c in COLUMNS[2:]:
v_total = checks[c].iloc[-1]
v_colsum = checks[c].sum()
try:
assert(v_colsum == (v_total * 2))
except:
raise Exception("Vertical totals don't match on {0}.".format(c))
## Test horizontal totals
h_colsums = checks.fillna(0).sum(axis=1)
h_totals = checks["totals"].fillna(0)
zipped = zip(checks["state"], h_colsums, h_totals)
for state, h_colsum, h_total in zipped:
try:
assert(h_colsum == (h_total * 2))
except:
raise Exception("Horizontal totals don't match on {0}.".format(state))
def parse_value(x):
if pd.isnull(x) or x == "": return None
return int(x.replace(",", ""))
def parse_page(page):
month_crop = page.within_bbox((0, 36, page.width, 58))
month_text = month_crop.extract_text(x_tolerance=2)
month = parse_month(month_text)
sys.stderr.write("\r" + month)
table_crop = page.crop((0, 73, page.width, 500))
edge_xs = list(set(map(itemgetter("x0"), table_crop.edges)))
leftmost_char = min(map(itemgetter("x0"), table_crop.chars))
_table = table_crop.extract_table({
"horizontal_strategy": "text",
"vertical_strategy": "explicit",
"explicit_vertical_lines": [ leftmost_char ] + edge_xs,
"intersection_tolerance": 5,
"text_y_tolerance": 2,
"text_x_tolerance": 2,
})
table = pd.DataFrame([ [ month ] + row for row in _table ])
table.columns = COLUMNS
table[table.columns[2:]] = table[table.columns[2:]].applymap(parse_value)
table.loc[(table["state"] == "llinois"), "state"] = "Illinois"
table = table.loc[lambda df: df["state"].fillna("").str.strip() != ""]
try: validate_data(table)
except: raise Exception("Invalid data for " + month)
return table
def parse_pdf(file_obj):
pdf = pdfplumber.load(file_obj)
# Note: As of Nov. 2019 file, first page is documentation
checks_gen = map(parse_page, pdf.pages[1:])
checks = pd.concat(checks_gen).reset_index(drop=True)
return checks[checks["state"] != "Totals"]
if __name__ == "__main__":
buf = getattr(sys.stdin, 'buffer', sys.stdin)
checks = parse_pdf(buf)
checks.to_csv(sys.stdout, index=False, float_format="%.0f")
sys.stderr.write("\r\n")
```
|
{
"source": "jessiececilya/hatespeech_2019_dataflow_apachebeam",
"score": 3
}
|
#### File: jessiececilya/hatespeech_2019_dataflow_apachebeam/apachebeam_dataflow_pipeline.py
```python
from __future__ import absolute_import
import argparse
import logging
import re
import sys
import six
import random
import os
import time
import logging
import traceback
import json
from detoxify import Detoxify
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.io.gcp.internal.clients import bigquery
from google.cloud import language
from google.cloud import storage
class DetoxDoFn(beam.DoFn):
def __init__(self):
beam.DoFn.__init__(self)
def process(self,element):
"""Returns an iterator over the words of this element.
"""
"""Detoxifies the provided text."""
from detoxify import Detoxify
import logging
import json
try:
input_text=element['text']
results = {}
results = Detoxify('original').predict(input_text)
results['comment_id']=str(element['comment_id'])
detox_results = {'comment_id': str(element['comment_id']) , 'toxicity': float(results['toxicity']), 'severe_toxicity': float(results['severe_toxicity']), 'obscene': float(results['obscene']), 'threat': float(results['threat']), 'insult': float(results['insult']), 'identity_hate': float(results['identity_hate'])}
except Exception as e:
logging.exception("error in process")
return [detox_results]
def run(argv=None, save_main_session=False):
PROJECT_ID='hatespeech-2019'
SCHEMA='comment_id:STRING, toxicity:FLOAT, severe_toxicity:FLOAT, obscene:FLOAT, threat:FLOAT, insult:FLOAT, identity_hate:FLOAT'
pipeline_options = PipelineOptions(
flags=argv,
runner='DataflowRunner',
project='hatespeech-2019',
job_name='dataflowtrial',
requirements_file='requirements.txt',
temp_location='gs://detoxify/',
region='us-west1')
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
p = beam.Pipeline(options=pipeline_options)
# Read the big query table into a PCollection.
query_string = 'SELECT comment_id, text, all_hs FROM `hatespeech-2019.Final_Dataset.Channel_Videos_Comments_Merged` where all_hs=1 LIMIT 20'
detox_data = (
p
| 'QueryTableStdSQL' >> beam.io.Read(beam.io.BigQuerySource(
query=query_string,
use_standard_sql=True))
| 'detoxify' >> beam.ParDo(DetoxDoFn()))
write_data = detox_data | 'writetobigquery'>> beam.io.WriteToBigQuery(table='Toxicity_results',dataset='Final_Dataset',
project='hatespeech-2019',schema= SCHEMA,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND)
result = p.run()
result.wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
# apache-beam
# https://download.pytorch.org/whl/cpu/torch-1.8.1%2Bcpu-cp37-cp37m-linux_x86_64.whl
# detoxify==0.2.2
```
|
{
"source": "jessiedeot/pyoblib",
"score": 2
}
|
#### File: oblib/tests/test_taxonomy.py
```python
import unittest
import datetime
from six import string_types
from oblib import taxonomy
tax = taxonomy.Taxonomy()
class TestTaxonomy(unittest.TestCase):
def test_unit(self):
self.assertIsInstance(taxonomy.Unit(), taxonomy.Unit)
def test_element(self):
self.assertIsInstance(taxonomy.ConceptDetails(), taxonomy.ConceptDetails)
def test_relationship(self):
self.assertIsInstance(taxonomy.Relationship(), taxonomy.Relationship)
def test_taxonomy(self):
self.assertIsInstance(tax, taxonomy.Taxonomy)
self.assertIsInstance(tax.semantic, taxonomy.TaxonomySemantic)
self.assertIsInstance(tax.types, taxonomy.TaxonomyTypes)
self.assertIsInstance(tax.units, taxonomy.TaxonomyUnits)
self.assertIsInstance(tax.numeric_types, taxonomy.TaxonomyNumericTypes)
self.assertIsInstance(tax.generic_roles, taxonomy.TaxonomyGenericRoles)
self.assertIsInstance(tax.ref_parts, taxonomy.TaxonomyRefParts)
self.assertIsInstance(tax.documentation, taxonomy.TaxonomyDocumentation)
class TestTaxonomyNumericTypes(unittest.TestCase):
def test_get_all_numeric_types(self):
self.assertEqual(len(tax.numeric_types.get_all_numeric_types()), 13)
def test_validate_numeric_types(self):
self.assertTrue(tax.numeric_types.is_numeric_type("num-us:insolationItemType"))
self.assertTrue(tax.numeric_types.is_numeric_type("num-us:speedItemType"))
self.assertTrue(tax.numeric_types.is_numeric_type("num-us:luminousIntensityItemType"))
self.assertFalse(tax.numeric_types.is_numeric_type("solar:luminousIntensityItemType"))
self.assertFalse(tax.numeric_types.is_numeric_type("luminousIntensityItemType"))
self.assertFalse(tax.numeric_types.is_numeric_type("num-us:inslationItemType"))
self.assertFalse(tax.numeric_types.is_numeric_type("num-us:speedItemTye"))
self.assertFalse(tax.numeric_types.is_numeric_type("num-us:luminousIntensityIteType"))
class TestTaxonomyRefParts(unittest.TestCase):
def test_get_all_ref_parts(self):
self.assertEqual(len(tax.ref_parts.get_all_ref_parts()), 6)
def test_is_ref_part(self):
self.assertTrue(tax.ref_parts.is_ref_part("Publisher"))
self.assertTrue(tax.ref_parts.is_ref_part("Sample"))
self.assertTrue(tax.ref_parts.is_ref_part("Confidentiality"))
self.assertFalse(tax.ref_parts.is_ref_part("Publishe"))
self.assertFalse(tax.ref_parts.is_ref_part("Sampl"))
self.assertFalse(tax.ref_parts.is_ref_part("Confidentialit"))
class TestTaxonomyGenericRoles(unittest.TestCase):
def test_get_all_generic_roles(self):
self.assertEqual(len(tax.generic_roles.get_all_generic_roles()), 5)
def test_is_generic_role(self):
self.assertTrue(tax.generic_roles.is_generic_role("Generic UML aggregation arc"))
self.assertTrue(tax.generic_roles.is_generic_role("Generic UML inheritance arc"))
self.assertTrue(tax.generic_roles.is_generic_role("Generic UML property arc"))
self.assertFalse(tax.generic_roles.is_generic_role("Genric UML aggregation arc"))
self.assertFalse(tax.generic_roles.is_generic_role("Genric UML inheritance arc"))
self.assertFalse(tax.generic_roles.is_generic_role("Genric UML property arc"))
class TextTaxonomyDocumentation(unittest.TestCase):
def test_get_all_concepts_documentation(self):
self.assertEqual(tax.documentation.get_all_concepts_documentation()["solar:EntitySizeACPower"],
"Size of the entity in megawatts AC.")
self.assertEqual(tax.documentation.get_all_concepts_documentation()["solar:FundDescriptionAnalyst"],
"Name of analyst covering the fund.")
self.assertEqual(tax.documentation.get_all_concepts_documentation()["solar:IncentivesPerformanceBasedIncentiveEscalator"],
"Annual escalation of the performance based incentive value (percent)")
def test_get_concept_documentation(self):
self.assertEqual(tax.documentation.get_concept_documentation("solar:EntitySizeACPower"),
"Size of the entity in megawatts AC.")
self.assertEqual(tax.documentation.get_concept_documentation("solar:FundDescriptionAnalyst"),
"Name of analyst covering the fund.")
self.assertEqual(tax.documentation.get_concept_documentation("solar:IncentivesPerformanceBasedIncentiveEscalator"),
"Annual escalation of the performance based incentive value (percent)")
self.assertIsNone(tax.documentation.get_concept_documentation("solar:NotCorect"))
class TestTaxonomyTypes(unittest.TestCase):
def test_get_all_types(self):
self.assertEqual(67, len(tax.types.get_all_types()))
def test_is_type(self):
self.assertTrue(tax.types.is_type("solar-types:systemAvailabilityModeItemType"))
self.assertTrue(tax.types.is_type("solar-types:deviceItemType"))
self.assertTrue(tax.types.is_type("solar-types:insuranceItemType"))
self.assertFalse(tax.types.is_type("solar:insuranceItemType"))
self.assertFalse(tax.types.is_type("insuranceItemType"))
self.assertFalse(tax.types.is_type("solar-types:systemAvailabilityMoeItemType"))
self.assertFalse(tax.types.is_type("solar-types:deviceIteType"))
self.assertFalse(tax.types.is_type("solar-types:inuranceItemType"))
def test_get_type_enum(self):
self.assertEqual(len(tax.types.get_type_enum("solar-types:projectAssetTypeItemType")), 3)
self.assertEqual(len(tax.types.get_type_enum("solar-types:feeStatusItemType")), 5)
self.assertEqual(len(tax.types.get_type_enum("solar-types:financialTransactionItemType")), 26)
self.assertIsNone(tax.types.get_type_enum("solar-types:fdsfdsadsf"))
class TestTaxonomyUnits(unittest.TestCase):
def test_is_unit(self):
self.assertTrue(tax.units.is_unit("acre"))
self.assertTrue(tax.units.is_unit("acre", attr=None))
self.assertTrue(tax.units.is_unit("acre", "unit_id"))
self.assertFalse(tax.units.is_unit("acre", "unit_name"))
self.assertFalse(tax.units.is_unit("acre", "id"))
with self.assertRaises(ValueError):
found = tax.units.is_unit("acre", "unit_nickname")
with self.assertRaises(ValueError):
found = tax.units.is_unit("acre", attr=14)
self.assertTrue(tax.units.is_unit("oz"))
self.assertTrue(tax.units.is_unit("rad"))
self.assertFalse(tax.units.is_unit("acrre"))
self.assertFalse(tax.units.is_unit("ozz"))
self.assertFalse(tax.units.is_unit("rrad"))
self.assertTrue(tax.units.is_unit("Acre"))
self.assertTrue(tax.units.is_unit("Acre", "unit_name"))
self.assertTrue(tax.units.is_unit("u00001", "id"))
self.assertTrue(tax.units.is_unit("Ounce"))
self.assertTrue(tax.units.is_unit("Radian"))
self.assertFalse(tax.units.is_unit("acrre"))
self.assertFalse(tax.units.is_unit("ozz"))
self.assertFalse(tax.units.is_unit("rrad"))
self.assertTrue(tax.units.is_unit("u00004"))
self.assertFalse(tax.units.is_unit("x0004"))
def test_get_unit(self):
unit = tax.units.get_unit("VAh")
# Test data types
# TODO: checks for strings are commented out for Python 2.7 which fails
# due to unicode issues, need a proper test for both 2.7 and 3.x.
# self.assertIsInstance(unit.id, str)
# self.assertIsInstance(unit.unit_id, str)
# self.assertIsInstance(unit.unit_name, str)
# self.assertIsInstance(unit.ns_unit, str)
# self.assertIsInstance(unit.item_type, str)
self.assertIsInstance(unit.item_type_date, datetime.date)
# self.assertIsInstance(unit.symbol, str)
# self.assertIsInstance(unit.definition, str)
self.assertIsInstance(unit.base_standard, taxonomy.BaseStandard)
self.assertIsInstance(unit.status, taxonomy.UnitStatus)
self.assertIsInstance(unit.version_date, datetime.date)
# Test values
self.assertEqual(unit.id, "u00291")
self.assertEqual(unit.unit_id, "VAh")
self.assertEqual(unit.unit_name, "Volt-ampere-hours")
self.assertEqual(unit.ns_unit, "http://www.xbrl.org/2009/utr")
self.assertEqual(unit.item_type, "energyItemType")
self.assertEqual(unit.item_type_date, datetime.date(2009, 12, 16))
self.assertEqual(unit.symbol, "VAh")
self.assertEqual(unit.definition, "Volt-ampere (VA) hours of energy.")
self.assertEqual(unit.base_standard, taxonomy.BaseStandard.customary)
self.assertEqual(unit.status, taxonomy.UnitStatus.cr)
self.assertEqual(unit.version_date, datetime.date(2017, 7, 12))
unit2 = tax.units.get_unit("u00291")
self.assertEqual(unit, unit2)
unit3 = tax.units.get_unit("Volt-ampere-hours")
self.assertEqual(unit, unit3)
def test_get_all_units(self):
units = tax.units.get_all_units()
self.assertIsInstance(units, dict)
self.assertEqual(len(units.keys()), 296)
class TestTaxonomySemantic(unittest.TestCase):
def test_concept_details(self):
# Data type checks
ci = tax.semantic.get_concept_details("solar:ACDisconnectSwitchMember")
self.assertIsNotNone(ci)
self.assertIsInstance(ci.abstract, bool)
self.assertIsInstance(ci.id, string_types)
# 'six.string_types' is equivalent to "str or unicode" in python2, "str" in python3
self.assertIsInstance(ci.name, string_types)
self.assertIsInstance(ci.nillable, bool)
self.assertIsInstance(ci.period_independent, bool)
self.assertIsInstance(ci.substitution_group, taxonomy.SubstitutionGroup)
self.assertIsInstance(ci.type_name, string_types)
self.assertIsInstance(ci.period_type, taxonomy.PeriodType)
ci = tax.semantic.get_concept_details("solar:MonthlyPeriodAxis")
self.assertIsNone(ci.typed_domain_ref)
ci = tax.semantic.get_concept_details("solar:PVSystemIdentifierAxis")
self.assertIsInstance(ci.typed_domain_ref, string_types)
# Values checks
ci = tax.semantic.get_concept_details("solar:ACDisconnectSwitchMember")
self.assertIsNotNone(ci)
self.assertTrue(ci.abstract)
self.assertEqual(ci.id, "solar:ACDisconnectSwitchMember")
self.assertEqual(ci.name, "ACDisconnectSwitchMember")
self.assertTrue(ci.nillable)
self.assertFalse(ci.period_independent)
self.assertEqual(ci.substitution_group, taxonomy.SubstitutionGroup.item)
self.assertEqual(ci.type_name, "nonnum:domainItemType")
self.assertEqual(ci.period_type, taxonomy.PeriodType.duration)
ci = tax.semantic.get_concept_details("solar:AdvisorInvoicesCounterparties")
self.assertIsNotNone(ci)
self.assertFalse(ci.abstract)
self.assertEqual(ci.id, "solar:AdvisorInvoicesCounterparties")
self.assertEqual(ci.name, "AdvisorInvoicesCounterparties")
self.assertTrue(ci.nillable)
self.assertFalse(ci.period_independent)
self.assertEqual(ci.substitution_group, taxonomy.SubstitutionGroup.item)
self.assertEqual(ci.type_name, "xbrli:stringItemType")
self.assertEqual(ci.period_type, taxonomy.PeriodType.duration)
ci = tax.semantic.get_concept_details("dei:LegalEntityIdentifier")
self.assertIsNotNone(ci)
self.assertFalse(ci.abstract)
self.assertEqual(ci.id, "dei:LegalEntityIdentifier")
self.assertEqual(ci.name, "LegalEntityIdentifier")
self.assertTrue(ci.nillable)
self.assertFalse(ci.period_independent)
self.assertEqual(ci.substitution_group, taxonomy.SubstitutionGroup.item)
self.assertEqual(ci.type_name, "dei:legalEntityIdentifierItemType")
self.assertEqual(ci.period_type, taxonomy.PeriodType.duration)
ci = tax.semantic.get_concept_details("solar:PVSystemIdentifierAxis")
self.assertEqual(ci.typed_domain_ref, "#solar_PVSystemIdentifierDomain")
with self.assertRaises(KeyError):
_ = tax.semantic.get_concept_details("solar:iamnotaconcept")
def test_get_entrypoint_concepts(self):
concepts = tax.semantic.get_entrypoint_concepts("MonthlyOperatingReport")
self.assertEqual(85, len(concepts))
concepts = tax.semantic.get_entrypoint_concepts("MonthlyOperatingReort")
self.assertEqual(concepts, [])
concepts, details = tax.semantic.get_entrypoint_concepts("CutSheet",
details=True)
self.assertEqual(296, len(concepts))
self.assertEqual(296, len(details))
concepts, details = tax.semantic.get_entrypoint_concepts("Utility", True)
self.assertEqual(len(concepts), 8)
for ci in concepts:
self.assertEqual(details[ci], tax.semantic.get_concept_details(ci))
# TODO: SystemInstallation is currently loaded under System.
# self.assertEqual(len(tax.semantic.concepts_ep("SystemInstallationCost")), 10)
# =============================================================================
# def test_get_entrypoint_concepts_details(self):
# self.assertEqual(len(tax.semantic.get_entrypoint_concepts_details("MonthlyOperatingReport")), 84)
# self.assertEqual(tax.semantic.get_entrypoint_concepts_details("MonthlyOperatingReort"), None)
#
# # TODO: 302 is expected but 297 returned, seeking info on why this is from XBRL.
# # self.assertEqual(len(tax.semantic.concepts_info_ep("CutSheet")), 302)
# self.assertEqual(len(tax.semantic.get_entrypoint_concepts_details("CutSheet")), 297)
#
# self.assertEqual(len(tax.semantic.get_entrypoint_concepts_details("Utility")), 8)
#
# for ci in tax.semantic.get_entrypoint_concepts_details("Utility"):
# self.assertEqual(ci, tax.semantic.get_concept_details(ci.id))
#
# =============================================================================
def test_get_all_concepts(self):
self.assertIsNotNone(tax.semantic.get_all_concepts())
self.assertIsInstance(tax.semantic.get_all_concepts(), list)
self.assertIsNotNone(tax.semantic.get_all_concepts(details=True))
self.assertIsNotNone(tax.semantic.get_all_concepts(details=True), dict)
def test_get_all_type_names(self):
self.assertEqual(92, len(tax.semantic.get_all_type_names()))
def test_get_all_entrypoints(self):
# 159 named entry points plus 1 for the "All" entry point:
self.assertEqual(len(tax.semantic.get_all_entrypoints()), 160)
def test_get_entrypoint_relationships(self):
self.assertIsNone(tax.semantic.get_entrypoint_relationships("Arggh"))
self.assertEqual(len(tax.semantic.get_entrypoint_relationships("Utility")), 7)
self.assertEqual(85, len(tax.semantic.get_entrypoint_relationships("MonthlyOperatingReport")))
self.assertEqual(300, len(tax.semantic.get_entrypoint_relationships("CutSheet")))
def test_is_concept(self):
self.assertTrue(tax.semantic.is_concept("solar:EnvironmentalImpactReportExpirationDate"))
self.assertFalse(tax.semantic.is_concept("solar:EnvironmentalImpactReportExirationDate"))
self.assertTrue(tax.semantic.is_concept("solar:AdvisorInvoicesCounterparties"))
self.assertTrue(tax.semantic.is_concept("dei:LegalEntityIdentifier"))
def test_is_entrypoint(self):
self.assertTrue(tax.semantic.is_entrypoint("AssetManager"))
self.assertFalse(tax.semantic.is_entrypoint("AssetMnager"))
self.assertTrue(tax.semantic.is_entrypoint("MonthlyOperatingReport"))
self.assertFalse(tax.semantic.is_entrypoint("MonthlyOperatingRepot"))
def test_unrequired_concepts_removed(self):
"""
In order to save memory concepts that are not required should be removed from memory after the taxonomy
is loaded. This primarily occurs in the us-gaap and dea namespaces since they are not always used
by the solar namespace. Thus these tests prove that certain concepts are gone.
"""
self.assertFalse("dei:EntityReportingCurrencyISOCode" in tax.semantic._concepts_details)
self.assertFalse("dei:BusinessContactMember" in tax.semantic._concepts_details)
self.assertFalse("us-gaap:TimeSharingTransactionsAllowanceForUncollectibleAccountsOnReceivablesSoldWithRecourse" in tax.semantic._concepts_details)
self.assertFalse("us-gaap:TreasuryStockValueAcquiredCostMethod" in tax.semantic._concepts_details)
```
|
{
"source": "jessieDu314/Project_Log",
"score": 3
}
|
#### File: Project_Log/Estimating_Stock_VaR/Data Preprocess.py
```python
import pandas as pd
import numpy as np
import re
# %%%% functions
## Fill missing values
def fillmissing(x,col,index,benchmark):
for i in range(index,len(x)):
# find missing value
if x.loc[i,col] == benchmark:
# if first is missing, fill using the value next to it
if i == index:
x.loc[i,col] = x.loc[i+1,col]
# if the last one is missing, fill using the value preceeds it
elif i == len(x)-1:
x.loc[i,col] = x.loc[i-1,col]
# otherwise, fill using the average of the two not null values above and after
else:
j = i-1
k = i+1
while x.loc[j,col] == benchmark:
j -= 1
while x.loc[k,col] == benchmark:
k += 1
x.loc[i,col] = np.mean([x.loc[j,col],x.loc[k,col]])
return x
## Data Preprocess
def preprocess(x,name,Date,column,index,benchmark,q):
# select the valid starting day
x = x[x['Date'] > Date].copy()
x = x.reset_index().copy()
x = x.drop('index',axis = 1).copy()
# fill na with benchmark we chose
x[column] = x[column].fillna(benchmark).copy()
# fill missing values
x = fillmissing(x,column,index,benchmark).copy()
# calculate daily return
x['lag_'+column] = x[column].shift(1)
x = x.iloc[1:,:].copy().reset_index()
x = x.drop('index',axis = 1).copy()
x['log_ret'] = np.log(x[column])-np.log(x['lag_'+column])
retm = np.mean(x['log_ret'])
x['retv'] = np.square(x['log_ret']-retm)*100
# estimate volatility
x[name+'_20day_vol'] = np.sqrt(x['retv'].rolling(window=20,win_type="boxcar").mean())/10
# estimate quantiles of the distribution of log-returns
x[name+'_quant_ret'] = np.nan
for r in range(len(x)-20):
R_quant = np.quantile(x['log_ret'][r:r+20],q)
x.loc[r+19,name+'_quant_ret'] = R_quant
return x
# %%%% Main Dataset: csi300
csi = pd.read_csv('/Users/msstark/Desktop/project/Shanghai Shenzhen CSI 300 Historical Data.csv')
# setting date format
csi['Date'] = csi['Date'].apply(lambda x: re.sub(r',',r'',x))
csi['Day'] = csi['Date'].apply(lambda x: x.split(' ')[1]).astype(int)
csi['Month'] = csi['Date'].apply(lambda x: x.split(' ')[0])
csi['Month'].unique()
csi['Month'] = csi['Month'].map({'Jan':1,'Feb':2,'Mar':3,'Apr':4,'May':5,'Jun':6,
'Jul':7,'Aug':8,'Sep':9,'Oct':10,'Nov':11,'Dec':12})
csi['Year'] = csi['Date'].apply(lambda x: x.split(' ')[2]).astype(int)
csi['Date'] = csi['Year'].astype(str) +'-'+csi['Month'].astype(str)+'-'+csi['Day'].astype(str)
csi['Date'] = pd.to_datetime(csi['Date'], format='%Y-%m-%d')
csi = csi.rename(columns = {'Price':'Close'}).copy()
# convert object type to float
col = ['Close','Open','High','Low']
for c in col:
csi[c] = csi[c].apply(lambda x: re.sub(r',',r'',x)).astype('float')
csi['log_dsprd'] = np.log(csi['High'] - csi['Low'])
csi.columns
# apply preprocess function
csi = preprocess(csi,'csi','2005-01-03','Close',0,0,0.025).copy()
# %%%% spot exchange rate
xr = pd.read_csv('/Users/msstark/Desktop/project/DEXCHUS.csv')
# setting date format
xr['DATE'] = pd.to_datetime(xr['DATE'], format='%Y-%m-%d')
xr = xr.rename(columns = {'DATE':'Date','DEXCHUS':'exR'}).copy()
# we find there's '.' inside our dataset
# replace '.' with '0', which is also the benchmark we chose to fill the missing values
xr['exR'] = xr[['exR']].apply(lambda x: x.replace('.','0'))
# convert object type to float
xr['exR'] = xr['exR'].astype(float)
# apply preprocess function
xr = preprocess(xr,'exR','2005-01-03','exR',0,0,0.025).copy()
# merge onto the main dataset
csi = csi.merge(xr[['Date','exR_quant_ret']],left_on = ['Date'],right_on = ['Date'],how = 'left').copy()
# %%%% hsi
hsi = pd.read_csv('^HSI.csv')
# setting date format
hsi['Date'] = pd.to_datetime(hsi['Date'], format='%Y-%m-%d')
# apply preprocess function
hsi = preprocess(hsi,'hsi','2005-01-03','Close',0,0,0.025).copy()
# merge onto the main dataset
csi = csi.merge(hsi[['Date','hsi_quant_ret']],left_on = ['Date'],right_on = ['Date'],how = 'left').copy()
# %%%% sse
sse = pd.read_csv('SSE Composite Index.csv')
# setting date format
sse['Date'] = pd.to_datetime(sse['Date'], format='%Y-%m-%d')
# apply preprocess function
sse = preprocess(sse,'sse','2005-01-03','Close',0,0,0.025).copy()
# merge onto the main dataset
csi = csi.merge(sse[['Date','sse_quant_ret']],left_on = ['Date'],right_on = ['Date'],how = 'left').copy()
# %%%% commodities
# corn
corn = pd.read_csv('corn-prices-historical-chart-data.csv')
corn = corn.rename(columns = {'date':'Date',' value':'Close'})
# setting date format
corn['Date'] = pd.to_datetime(corn['Date'], format='%Y-%m-%d')
# apply preprocess function
corn = preprocess(corn,'corn','2005-01-03','Close',0,0,0.025).copy()
# merge onto the main dataset
csi = csi.merge(corn[['Date','corn_quant_ret']],left_on = ['Date'],right_on = ['Date'],how = 'left').copy()
# soybean
soybean = pd.read_csv('soybean-prices-historical-chart-data.csv')
soybean = soybean.rename(columns = {'date':'Date',' value':'Close'})
# setting date format
soybean['Date'] = pd.to_datetime(soybean['Date'], format='%Y-%m-%d')
# apply preprocess function
soybean = preprocess(soybean,'soybean','2005-01-03','Close',0,0,0.025).copy()
# merge onto the main dataset
csi = csi.merge(soybean[['Date','soybean_quant_ret']],left_on = ['Date'],right_on = ['Date'],how = 'left').copy()
# %%%% heating oil
heat = pd.read_csv('New_York_Harbor_No._2_Heating_Oil_Spot_Price_FOB.csv')
heat = heat.rename(columns = {'Day':'Date','New York Harbor No. 2 Heating Oil Spot Price FOB Dollars per Gallon':'Close'})
# setting date format
heat['Date'] = heat['Date'].apply(lambda x: re.sub(r'\/',r'-',x))
heat['Date'] = pd.to_datetime(heat['Date'], format='%m-%d-%Y')
heat = heat.sort_values(by=['Date'],ascending=True).reset_index().copy()
heat = heat.drop('index',axis = 1).copy()
# apply preprocess function
heat = preprocess(heat,'heat','2005-01-03','Close',0,0,0.025).copy()
# merge onto the main dataset
csi = csi.merge(heat[['Date','heat_quant_ret']],left_on = ['Date'],right_on = ['Date'],how = 'left').copy()
# %%%% 10-year bond yield rate
bond = pd.read_csv('China 10-Year Bond Yield Historical Data.csv')
bond = bond.rename(columns = {'Price':'Close'})
# setting date format
bond['Date'] = bond['Date'].apply(lambda x: re.sub(r',',r'',x))
bond['Day'] = bond['Date'].apply(lambda x: x.split(' ')[1]).astype(int)
bond['Month'] = bond['Date'].apply(lambda x: x.split(' ')[0])
bond['Month'] = bond['Month'].map({'Jan':1,'Feb':2,'Mar':3,'Apr':4,'May':5,'Jun':6,
'Jul':7,'Aug':8,'Sep':9,'Oct':10,'Nov':11,'Dec':12})
bond['Year'] = bond['Date'].apply(lambda x: x.split(' ')[2]).astype(int)
bond['Date'] = bond['Year'].astype(str) +'-'+bond['Month'].astype(str)+'-'+bond['Day'].astype(str)
bond['Date'] = pd.to_datetime(bond['Date'], format='%Y-%m-%d')
bond = bond.sort_values(by=['Date'],ascending=True).reset_index().copy()
bond = bond.drop('index',axis = 1).copy()
# apply preprocess function
bond = preprocess(bond,'bond','2005-01-03','Close',0,0,0.025).copy()
# merge onto the main dataset
csi = csi.merge(bond[['Date','bond_quant_ret']],left_on = ['Date'],right_on = ['Date'],how = 'left').copy()
# %%%% metal
# platinum
platinum = pd.read_csv('platinum-prices-historical-chart-data.csv')
platinum = platinum.rename(columns = {'date':'Date',' value':'Close'})
# setting date format
platinum['Date'] = pd.to_datetime(platinum['Date'], format='%Y-%m-%d')
# apply preprocess function
platinum = preprocess(platinum,'platinum','2005-01-03','Close',0,0,0.025).copy()
# merge onto the main dataset
csi = csi.merge(platinum[['Date','platinum_quant_ret']],left_on = ['Date'],right_on = ['Date'],how = 'left').copy()
# palladium
palladium = pd.read_csv('palladium-prices-historical-chart-data.csv')
palladium = palladium.rename(columns = {'date':'Date',' value':'Close'})
# setting date format
palladium['Date'] = pd.to_datetime(palladium['Date'], format='%Y-%m-%d')
# apply preprocess function
palladium = preprocess(palladium,'palladium','2005-01-03','Close',0,0,0.025).copy()
# merge onto the main dataset
csi = csi.merge(palladium[['Date','palladium_quant_ret']],left_on = ['Date'],right_on = ['Date'],how = 'left').copy()
# %%%% Final Clean
csi.columns
# select useful features
data_col = ['Date', 'Day', 'Month', 'log_dsprd', 'log_ret', 'retv', 'csi_20day_vol',
'csi_quant_ret', 'exR_quant_ret', 'hsi_quant_ret','sse_quant_ret', 'corn_quant_ret',
'soybean_quant_ret','heat_quant_ret', 'bond_quant_ret', 'platinum_quant_ret',
'palladium_quant_ret']
new_data = csi[data_col].copy()
# rename columns
data_col = list(map(lambda x: x.replace('quant_ret','rstar'), data_col))
new_data.columns = data_col
# save merged raw values
new_data.to_csv('csi_updated.csv')
# %%%% Fill missing values
# setting filling benchmark, leave the first 20 rows out because they are caused by rolling calculation
new_data.iloc[19:] = new_data.iloc[19:].fillna(np.inf).copy()
new_data.isna().sum()
# fill missing valus for each column
col_fill = ['exR_rstar','hsi_rstar', 'sse_rstar', 'corn_rstar','soybean_rstar',
'heat_rstar', 'bond_rstar','platinum_rstar','palladium_rstar']
for c in col_fill:
y = fillmissing(new_data,c,19,np.inf).copy()
# save the full dataset
new_data = y.copy()
new_data.to_csv('csi_updated_full.csv')
```
|
{
"source": "jessielaf/django-scan-models",
"score": 2
}
|
#### File: django-scan-models/scan_models/settings.py
```python
from django.conf import settings
SETTING_NAME = "SCAN_MODELS"
DEFAULT_SETTINGS = {"mapping": {}, "validator": "scan_models.validators.VeeValidate", "camelize": False, "verbosity": 2}
def get_setting(name: str):
"""
Gets the setting for scan models
Args:
name: Name of the setting
Returns: Value fo the setting
"""
setting = getattr(settings, SETTING_NAME) if hasattr(settings, SETTING_NAME) else {}
return setting[name] if name in setting else DEFAULT_SETTINGS[name]
```
|
{
"source": "jessielaf/jcms-pip",
"score": 2
}
|
#### File: jcms/components/url_parser.py
```python
from jcms.components.util import app_has_attr
from jcms.config import AppInfo
from jcms.generators.url_generator import UrlGenerator
class UrlParser:
"""
This class parses all the generators to actual paths that can be used by django
"""
@staticmethod
def get_urls():
"""
Gets the generators for each app and converts them to paths
:return List of paths added via jcms
:rtype list
"""
urls = []
app_info = AppInfo()
for app_name, app_data in app_info.JCMS_APPS.items():
if app_has_attr(app_name, app_data, 'urlpatterns', list):
for urlpattern in app_data.urlpatterns:
if isinstance(urlpattern, UrlGenerator):
urls += urlpattern.get_urls()
return urls
```
#### File: jcms/components/util.py
```python
import warnings
from .no_config import NoConfig
def warn(warning: str):
"""
A central function to warn the user
:param warning: The warning message for the user
:type warning: str
"""
warnings.warn(warning, Warning)
def app_has_attr(app_name, app_data, variable_name: str, variable_type) -> bool:
"""
Checks if a jcms app has a attribute. If not it warns the user with a nice message
:param app_name: Name of the jcms app
:param app_data: data of the specified app
:param variable_name: Name of the variable that is searched
:param variable_type: Type of the variable that is searched
:return: Bool
"""
has_attribute = hasattr(app_data, variable_name) and (isinstance(getattr(app_data, variable_name), variable_type)
or getattr(app_data, variable_name) == NoConfig)
if not has_attribute:
warn('In app ' + app_name + ': no ' + variable_name + ' in jcms.py found, '
+ variable_name + ' is not instance of ' + variable_type.__name__ + ' or is NoConfig')
return has_attribute
```
#### File: jcms/generators/cms_generator.py
```python
from braces.views import LoginRequiredMixin, PermissionRequiredMixin
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView
from django.views.generic.list import ListView
from django.views.generic.edit import UpdateView
from django.views.generic.edit import DeleteView
from django.urls import reverse_lazy
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib import messages
from django.db.models import Q
from django.urls import path
from abc import ABCMeta
from .url_generator import UrlGenerator
class CMSGenerator(UrlGenerator):
"""
Creates a cms crud for the model
"""
__metaclass__ = ABCMeta
def __init__(self, model, create_edit_list, list_fields=[], icon=''):
self.model = model
self.create_edit_list = create_edit_list
self.list_fields = list_fields and list_fields or create_edit_list
self.icon = icon
self.model_name = model.__name__.lower()
def get_urls(self):
"""
Gets all url objects for to create the urls
:return: List[path]
"""
return [
path(self.model_name + '/', self.list_view(), name=self.model_name + 'List'),
path(self.model_name + '/create/', self.create_view(), name=self.model_name + 'Create'),
path(self.model_name + '/<pk>/', self.detail_view(), name=self.model_name + 'Detail'),
path(self.model_name + '/<pk>/edit/', self.edit_view(), name=self.model_name + 'Edit'),
path(self.model_name + '/<pk>/delete/', self.delete_view(), name=self.model_name + 'Delete'),
]
def base_view_class(self):
"""
Creates a class that has the basic features for each view
:return: BaseViewClass
"""
class BaseViewClass(LoginRequiredMixin, PermissionRequiredMixin):
model = self.model
permission_required = 'jcms.create_' + self.model_name
return BaseViewClass
def create_edit_class(self):
"""
Creates a class for the create and edit view
:return: CreateEditClass
"""
class CreateEditClass(self.base_view_class()):
fields = self.create_edit_list
template_name = 'jcms-admin/crud/edit_or_create.html'
success_url = reverse_lazy('jcms:' + self.model_name + 'List')
return CreateEditClass
def list_view(self):
"""
This creates the view for the list. With permission create_<model_name>
:return: ObjectList
"""
main = self
class ObjectList(self.base_view_class(), ListView):
fields = self.list_fields
template_name = 'jcms-admin/crud/list.html'
def get_queryset(self):
query_set = main.get_search_queryset(self)
if query_set:
return query_set
return super(ObjectList, self).get_queryset()
return ObjectList.as_view()
def detail_view(self):
"""
@todo implement
Detail view. With permission create_<model_name>
:return: ObjectDetail
"""
class ObjectDetail(self.base_view_class(), DetailView):
template_name = 'jcms-admin/crud/detail.html'
return ObjectDetail.as_view()
def create_view(self):
"""
Creates the view for the creation of the model
:return: ObjectCreate
"""
class ObjectCreate(self.create_edit_class(), SuccessMessageMixin, CreateView):
success_message = 'Successfully created ' + self.model_name
return ObjectCreate.as_view()
def edit_view(self):
"""
Creates the view for editing. With permission change_<model_name>
:return: ObjectEdit
"""
class ObjectEdit(self.create_edit_class(), SuccessMessageMixin, UpdateView):
permission_required = 'jcms.change_' + self.model_name
success_message = 'Successfully edited ' + self.model_name
return ObjectEdit.as_view()
def delete_view(self):
"""
Creates the delete view. With permission delete_<model_name>
:return: ObjectDelete
"""
class ObjectDelete(self.base_view_class(), DeleteView):
permission_required = 'jcms.delete_' + self.model_name
success_url = reverse_lazy('jcms:' + self.model_name + 'List')
success_message = 'Successfully deleted ' + self.model_name
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(ObjectDelete, self).delete(request, *args, **kwargs)
return ObjectDelete.as_view()
@staticmethod
def get_search_queryset(generic_list):
"""
Gets the search query for the object
:return: Queryset
"""
search_term = generic_list.request.GET.get('search')
if search_term:
queries = [Q(**{f + '__icontains': search_term}) for f in generic_list.fields]
qs = Q()
for query in queries:
qs = qs | query
return generic_list.model.objects.filter(qs)
return None
```
#### File: jcms/models/generic_menu_item.py
```python
from typing import List
from django.template.defaultfilters import slugify
from jcms.models.single_menu_item import SingleMenuItem
class GenericMenuItem:
"""
Generic menu item that can be seen in the left bar in the cms
"""
def __init__(self, title: str, single_menu_items: List[SingleMenuItem], slug: str = False):
"""
:param slug: The slug the single menu items will have in front of them
:type slug: str
:param title: Display name for the MenuItem
:type title: str
:param single_menu_items: SingleMenuItems that are shown as children
:type single_menu_items: List[SingleMenuItem]
"""
if slug:
self.slug = slug
else:
self.slug = slugify(title)
self.title = title
self.single_menu_items = single_menu_items
```
|
{
"source": "Jessieluu/WIRL_national_education_radio",
"score": 2
}
|
#### File: controllers/admin/access.py
```python
import json
import pysolr
import re
from io import StringIO
from flask import redirect, url_for, render_template, session, flash
from flask.ext.login import current_user, login_required, logout_user, login_user
from NationalEducationRadio.service import get_blueprint
from NationalEducationRadio.service import db
from NationalEducationRadio.models.db.User import User, AccessLevel
from NationalEducationRadio.models.db.Category import Category
from NationalEducationRadio.models.db.Channel import Channel
from NationalEducationRadio.models.db.Audio import Audio
from NationalEducationRadio.models.db.Record import Record
from NationalEducationRadio.models.form.ChannelForm import ChannelForm
from NationalEducationRadio.models.form.LoginForm import LoginForm
from NationalEducationRadio.models.form.AudioForm import AudioForm
from NationalEducationRadio.models.form.KeywordForm import KeywordForm
from NationalEducationRadio.models.form.CaptionForm import CaptionForm
from NationalEducationRadio.models.units.tools import password_encryption, required_to_flash, audio_upload, parse_question_csv, get_solr_data, article_filter
from NationalEducationRadio.models.units.login import backstage_required
from NationalEducationRadio.models.units.keywords import get_keyword
admin = get_blueprint('admin')
@admin.route('/', methods=['GET', 'POST'])
def index():
"""
登入後台
:return: 登入後臺頁面
"""
def login_redirect():
return redirect(url_for('admin.channel'))
if current_user.is_anonymous is not True:
return login_redirect()
form = LoginForm()
if form.validate_on_submit():
admin_user = User.query.filter_by(account=form.account.data, password=password_encryption(form.password.data)).first()
print(admin_user)
if admin_user is not None:
session['level'] = admin_user.level
login_user(admin_user)
return login_redirect()
else:
flash('帳號或密碼錯誤')
required_to_flash(form)
return render_template('admin/index.html', form=form)
@admin.route('/logout')
@login_required
@backstage_required
def logout():
"""
登出後台
:return: 轉址到登入後台
"""
logout_user()
return redirect(url_for('admin.index'))
@admin.route('/channel')
@login_required
@backstage_required
def channel():
chals = Channel.query.all()
return render_template('admin/channel.html', chals=chals, user=current_user)
@admin.route('/channel/insert', methods=['GET', 'POST'])
@login_required
@backstage_required
def channel_insert():
form = ChannelForm()
if form.validate_on_submit():
new_channel = Channel(channel_name=form.channel_name.data, category=form.channel_category.data, channel_memo=form.channel_memo.data)
return redirect(url_for('admin.channel'))
return render_template('admin/channel_form.html', form=form, behavior="新增")
@admin.route('/channel/update/<id>', methods=['GET', 'POST'])
@login_required
@backstage_required
def channel_update(id):
form = ChannelForm()
if form.validate_on_submit():
edit_channel = Channel.query.filter_by(channel_id=form.channel_id.data).first()
edit_channel.channel_name = form.channel_name.data
edit_channel.category = form.channel_category.data
edit_channel.channel_memo = form.channel_memo.data
return redirect(url_for('admin.channel'))
channel = Channel.query.filter_by(channel_id=id).first()
form.channel_id.data = channel.channel_id
form.channel_name.data = channel.channel_name
form.channel_category.data = channel.category
form.channel_memo.data = channel.channel_memo
return render_template('admin/channel_form.html', channel=channel, form=form, behavior="編輯")
# *****
@admin.route('/channel/delete/<id>', methods=['GET','POST'])
@login_required
@backstage_required
def channel_delete(id):
form = ChannelForm()
if form.validate_on_submit():
del_audio = Audio.query.filter_by(audio_channel=id).all()
for d in range(len(del_audio)):
del_record = Record.query.filter_by(audio_id=del_audio[d].audio_id).all()
for r in range(len(del_record)):
db.session.delete(del_record[r])
db.session.delete(del_audio[d])
del_channel = Channel.query.filter_by(channel_id=form.channel_id.data).first()
db.session.delete(del_channel)
return redirect(url_for('admin.channel'))
channel = Channel.query.filter_by(channel_id=id).first()
form.channel_id.data = channel.channel_id
form.channel_name.data = channel.channel_name
form.channel_category.data = channel.category
form.channel_memo.data = channel.channel_memo
return render_template('admin/channel_form.html', channel=channel, form=form, readonly=1, behavior="刪除")
@admin.route('/audio/<channel_id>', methods=['GET', 'POST'])
@login_required
@backstage_required
def audio(channel_id):
channel = Channel.query.filter_by(channel_id=channel_id).first()
audios = Audio.query.filter_by(channel=channel).all()
return render_template('admin/audio.html', audios=audios, user=current_user, channel=channel)
@admin.route('/audio/add/<channel_id>', methods=['GET', 'POST'])
@login_required
@backstage_required
def audio_add(channel_id):
form = AudioForm()
channel = Channel.query.filter_by(channel_id=channel_id).first()
if form.validate_on_submit():
new_audio = Audio(
audio_name=form.audio_name.data,
channel=channel,
audio_file=audio_upload(form.audio_file.data),
audio_question=parse_question_csv(form.audio_question.data))
return redirect(url_for('admin.audio', channel_id = channel.channel_id))
form.audio_channel.data = channel.channel_id
return render_template('admin/audio_form.html', form=form, behavior="新增", isdelete=0, channel=channel)
@admin.route('/audio/delete/<id>', methods=['GET', 'POST'])
@login_required
@backstage_required
def audio_delete(id):
form = AudioForm()
form.uncheckFileUpload()
if form.validate_on_submit():
del_audio = Audio.query.filter_by(audio_id=form.audio_id.data).first()
if Record.query.filter_by(audio_id=form.audio_id.data).all() is not None:
del_record = Record.query.filter_by(audio_id=form.audio_id.data).all()
for d in range(len(del_record)):
db.session.delete(del_record[d])
if Audio.query.filter_by(audio_id=form.audio_id.data).first() is not None:
del_audio = Audio.query.filter_by(audio_id=form.audio_id.data).first()
db.session.delete(del_audio)
return redirect(url_for('admin.audio', channel_id = del_audio.channel.channel_id))
audio = Audio.query.filter_by(audio_id=id).first()
form.audio_id.data = audio.audio_id
form.audio_name.data = audio.audio_name
form.audio_channel.data = audio.channel.channel_id
return render_template('admin/audio_form.html', form=form, readonly=1, behavior="刪除", isdelete=1, channel=audio.channel)
@admin.route('/audio/edit/<id>', methods=['GET', 'POST'])
@login_required
@backstage_required
def audio_edit(id):
form = AudioForm()
form.uncheckFileUpload()
audio = Audio.query.filter_by(audio_id=id).first()
if form.validate_on_submit():
audio.audio_name = form.audio_name.data
print(form.audio_file.data.filename)
if form.audio_file.data.filename:
audio.audio_file = audio_upload(form.audio_file.data)
if form.audio_question.data.filename:
audio.audio_question = parse_question_csv(form.audio_question.data)
return redirect(url_for('admin.audio', channel_id = audio.channel.channel_id))
form.audio_id.data = audio.audio_id
form.audio_name.data = audio.audio_name
form.audio_channel.data = audio.channel.channel_id
return render_template('admin/audio_form.html', form=form, readonly=0, behavior="編輯", isdelete=0, channel=audio.channel)
@admin.route('/audio/view/<id>', methods=['GET'])
@login_required
@backstage_required
def audio_view(id):
audio = Audio.query.filter_by(audio_id=id).first()
records = Record.query.filter_by(audio=audio).all()
questions = json.load(StringIO(audio.audio_question))
for question in questions:
question['correct'] = 0
question['wrong'] = 0
for record in records:
recordData = json.load(StringIO(record.record_data))
for data in recordData:
for question in questions:
if data['id'] == question['id']:
if data['user_answer'] == data['answer'][0]:
question['correct'] = question['correct'] + 1
else:
question['wrong'] = question['wrong'] + 1
break
return render_template('admin/audio_view.html', questions=questions)
#****
@admin.route('/audio/keyword_view/<id>', methods=['GET', 'POST'])
@login_required
@backstage_required
def keyword_view(id):
audio = Audio.query.filter_by(audio_id=id).first()
success, dontuse, summary = get_solr_data(audio.audio_id)
keywords = audio.keyword
if keywords is None:
keywords = "關鍵字尚未建置"
form = KeywordForm()
if form.validate_on_submit():
solr = pysolr.Solr('http://127.0.0.1/solr/EBCStation', timeout=10)
if "<eps>" in form.keyword_content.data:
solrContent = article_filter(form.keyword_content.data)
else:
solrContent = form.keyword_content.data
top10, summary = get_keyword(form.keyword_id.data, solrContent)
solr.delete(q='audio_id:'+id)
solr.add([
{
"audio_id": form.keyword_id.data,
"content": solrContent,
"summary" : summary
}])
return redirect(url_for('admin.audio', channel_id = audio.channel.channel_id))
form.keyword_id.data = audio.audio_id
return render_template('admin/keyword_view.html', audio=audio, form=form, success=success, keywords=keywords, summary=summary)
# need to add
@admin.route('/audio/caption_view/<id>', methods=['GET', 'POST'])
@login_required
@backstage_required
def caption_view(id):
audio = Audio.query.filter_by(audio_id=id).first()
form = CaptionForm()
caption = ""
if form.validate_on_submit():
solr = pysolr.Solr('http://127.0.0.1/solr/EBCStationCaption', timeout=10)
caption = form.caption_content.data
solr.delete(q='audio_id:'+id)
solr.add([
{
"audio_id": form.caption_id.data,
"caption": form.caption_content.data,
}])
return redirect(url_for('admin.audio', channel_id=audio.channel.channel_id))
form.caption_id.data = audio.audio_id
return render_template('admin/caption_view.html', audio=audio, form=form, caption=caption)
```
#### File: models/db/PlayLog.py
```python
from NationalEducationRadio.service import db
class PlayLog(db.Model):
"""PlayLog ORM
播放音檔的紀錄。
Attributes
play_log_id: 流水號
audio: 音檔
user: 使用者
star_time: 進入音檔的時間
end_time: 離開音檔的時間
"""
__tablename__ = "play_logs"
play_log_id = db.Column(db.Integer, primary_key=True)
# audio = db.Column(db.Integer, db.ForeignKey("audio.audio_id"))
# user = db.Column(db.Integer, db.ForeignKey("users.id"))
audio = db.Column(db.Integer)
user = db.Column(db.Integer)
star_time = db.Column(db.Text(collation='utf8_unicode_ci'))
end_time = db.Column(db.Text(collation='utf8_unicode_ci'))
def __repr__(self):
return self.play_log_id
```
#### File: models/units/login.py
```python
from flask import redirect, url_for, session
from functools import wraps
from NationalEducationRadio.models.db.User import AccessLevel
def permission_required(permissions):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
"""
使用and判斷權限
"""
if int(session['level']) & permissions > 0:
return f(*args, **kwargs)
elif session.get('level') & AccessLevel.USER > 0:
return redirect(url_for('admin.page'))
else:
return redirect(url_for('radio.login'))
return decorated_function
return decorator
def admin_required(f):
"""
系統管理者權限
:param f: 下一個函數
:return: 繼續執行函數或是轉址
"""
return permission_required(AccessLevel.ADMIN_USER)(f)
def user_required(f):
"""
使用者權限
:param f: 下一個函數
:return: 繼續執行函數或是轉址
"""
return permission_required(AccessLevel.USER)(f)
def backstage_required(f):
"""
後台權限
:param f: 下一個函數
:return: 繼續執行函數或是轉址
"""
return permission_required(AccessLevel.ADMIN_USER)(f)
```
|
{
"source": "Jessieluu/word2vec_QADataset",
"score": 3
}
|
#### File: word2vec_QADataset/main/method2.py
```python
from gensim.models import word2vec
from gensim import models
from pprint import pprint
from scipy import spatial
import numpy as np
import time
import os
import json
import csv
import random
def readData(path):
t = time.time()
with open(path, 'r') as reader:
data = json.loads(reader.read())
#print("It took %.2f sec to read data" % (time.time() - t))
return data
# ==================
# method 2
# ==================
def generateAnswer(data):
C_con = np.zeros(250, dtype = float)
QA_con = np.zeros((250, 250), dtype = float)
#ca = data['correct_answer']
anslist = ['1', '2', '3', '4']
C_list = data['corpus']
QA_list = []
for j in range (0, 4):
QA_list.append(data['question'])
for word in C_list:
try:
vector = model[word]
except KeyError as e:
continue
for i in range(250):
C_con[i] += vector[i]
for i in range(250):
C_con[i] /= 250
for j in range(0, len(data['answer'])):
QA_list[j].extend(data['answer'][j])
for word in QA_list[j]:
try:
vector = model[word]
except KeyError as e:
continue
for i in range(250):
QA_con[j][i] += vector[i]
for i in range(250):
QA_con[j][i] /= 250
ini = 0
high_cq = 0
i = 0
ans = 0
for qa in QA_con:
cos = 1 - spatial.distance.cosine(C_con, qa)
if cos > ini:
ini = cos
high = qa
ans = i
i += 1
#tag = (anslist[ans] == ca )
#print("The predict answer is %s." %(anslist[ans]))
#print("The correct answer is %s." %ca)
return anslist[ans]
def main():
t = time.time()
pathData = './Result/'
# clear result
f = open('method2_result.txt', 'w')
f.close()
totalData = 1500
wrongid = 0
ansList = []
#====== read data in for loop ======
for i in range(totalData):
#print("Start reading data in" + pathData + str(i) + '.json')
jsonData = readData(pathData + str(i) + '.json')
#print("Start generate output of" + pathData + str(i) + '.json')
#=== check format is correct or not ===
randomNum = True
if len(jsonData['answer']) != 4:
randomNum = False
wrongid += 1
if randomNum == False:
ansTag = str(random.randint(1,4))
elif randomNum == True:
ansTag = generateAnswer(jsonData)
ansList.append(ansTag)
#====== output data =======
# with open("method2_result.txt", 'a+') as file:
# file.write(ansTag)
# file.write("\n")
outputList = []
outputMerge = []
readFileName = 'output.csv'
outputFileName = 'outputMerge.csv'
#read question number from csv file
with open(readFileName, newline= '') as csvfile:
spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
outputList = list(spamreader)
for i in range(0, len(ansList), 1):
data = str(outputList[i+1][0])+ str(ansList[i])
outputMerge.append(data)
with open(outputFileName, 'w', newline='') as csvfile:
csvfile.write('ID,Answer')
csvfile.write('\n')
for i in outputMerge:
csvfile.write(i)
csvfile.write('\n')
#print("Output done!")
print("=========Finished========")
print("Total wrong corpus format numbers are %d" % wrongid)
print("It took %.2f sec to process" % (time.time() - t))
# print(ansList)
pathModel = './word2vec/wiki/wiki_zh_tw(skip300)/word2vec.model'
model = models.Word2Vec.load(pathModel)
print("Success load model!")
if __name__ == "__main__":
main()
```
|
{
"source": "Jessie-McColm/Covid-Data-Dashboard",
"score": 4
}
|
#### File: Covid-Data-Dashboard/project/app.py
```python
import json
def read_config_file():
'''Reads the json config file and returns the data in it as a
dictionary
Returns:
config_data (dictionary): A dictionary containing the data in the
config file
'''
with open('config_file.json', 'r', encoding="UTF-8") as config_file:
config_data=json.load(config_file)
return config_data
read_config_file()
def split_time(time_input):
'''
splits an input into hours and minutes, and then converts the total into seconds
Parameters:
time_input (string): The time to be split into hours and minutes and
converted into seconds. It should have the format HH:MM where H means
hours and M means minutes
Returns:
total (int): The total number of seconds that the time_inpuit has been
converted into
'''
spliter=time_input.split(":")
hours=int(spliter[0])
minutes=int(spliter[1])
total=(hours*60*60)+(minutes*60)
return total
```
#### File: Covid-Data-Dashboard/project/covid_data_handler.py
```python
import logging
import sched
import time
import datetime
import json
from uk_covid19 import Cov19API
from app import *
logging.basicConfig(filename='sys.log',level=logging.DEBUG)
API_schedule_covid=sched.scheduler(timefunc=time.time, delayfunc=time.sleep)#this needs to be global
config_file=read_config_file()
API_KEY= config_file["API key:"]
LOCATION=config_file["Location:"]
LOCATION_TYPE=config_file["Location type:"]
NATION=config_file["Nation:"]
def parse_csv_data(csv_filename):
'''
Reads the data from a cvs file given and adds each line to a list
Parameters:
csv_filename (string): The filename of the csv file to be read
Returns:
csv_file_list (list): A list of all the lines in the csv file
'''
with open(csv_filename, "r", encoding="UTF-8") as csv_file:
csv_file_list=[]
for line in csv_file:
csv_file_list.append(line)
return csv_file_list
def process_covid_csv_data(covid_csv_data):
'''
Processes the data given to extract some useful data points from a csv file that has been
read into a list
Parameters:
covid_csv_data (list): A list of lines read from a csv file where each line
contains covid data for a different date
Returns:
last7days_cases (int): The total number of cases over the past week
current_hospital_cases (int): The current number of people in hospital with covid
total_deaths (int): The total number of people who have ever died from covid in the
given area
'''
stripped_cvs=[]
for line in covid_csv_data:
line=line.replace("\n","")
stripped_cvs.append(line)
covid_csv_data=stripped_cvs
future_keys=covid_csv_data[0].split(",")
cvs_dict={}
for section in covid_csv_data:
if section!= covid_csv_data[0]:
section=section.split(",")
field_date=section[3]
temp_dict={}
for loop in range(0,len(section)):
if future_keys[loop]!="date":
temp_dict[future_keys[loop]]=section[loop]
cvs_dict[field_date]=temp_dict
for entry in cvs_dict:
current_hospital_cases=cvs_dict[entry]["hospitalCases"]
if current_hospital_cases!="":
current_hospital_cases=int(current_hospital_cases)
break
last7days_cases=0
count=0
for entry in cvs_dict:
daily_new_cases=cvs_dict[entry]["newCasesBySpecimenDate"]
if daily_new_cases!="":
count+=1
if count>1 and count<=8:
last7days_cases+=int(daily_new_cases)
for entry in cvs_dict:
total_deaths=cvs_dict[entry]["cumDailyNsoDeathsByDeathDate"]
if total_deaths not in ('','None', None):
total_deaths=int(total_deaths)
break
return last7days_cases, current_hospital_cases, total_deaths
def process_covid_API_data(data_dict):
'''
Processes the data given to extract some useful data points from a large dictionary
Parameters:
data_dict (list): A list of dictionaries where each dictionary contains the covid data
for a secific date
Returns:
last7days_cases (int): The total number of cases over the past week
current_hospital_cases (int): The current number of people in hospital with covid
total_deaths (int): The total number of people who have ever died from covid in the
given area
'''
current_hospital_cases=0
for entry in data_dict:
current_hospital_cases=entry["hospitalCases"]
if current_hospital_cases not in ('','None', None):
current_hospital_cases=int(current_hospital_cases)
break
last7days_cases=0
count=0
for entry in data_dict:
daily_new_cases=entry["newCasesBySpecimenDate"]
if daily_new_cases not in ('','None',None):
count+=1
if count>1 and count<=8:
last7days_cases+=int(daily_new_cases)
total_deaths=0
for entry in data_dict:
total_deaths=entry["cumDailyNsoDeathsByDeathDate"]
if total_deaths not in ('','None', None):
total_deaths=int(total_deaths)
break
return last7days_cases, current_hospital_cases, total_deaths
def covid_API_request(location="Exeter",location_type="ltla"):
'''
makes a request to the covid API to retreive data about the location, hospital cases,
total deaths, and new cases. Then either saves this data is a file called
covid_data.json or national_covid_data.json depending on if the location_type
is a nation or not.
Parameters:
location (string): The name of the location that the retrieved data should be from
location_type (string): The type of the area that the data should be from.
E.g. nation or ltla
Returns:
news_articles: A list of dictionaries where each dictionary gives the covid data
on a specific date
'''
area_type="areaType="+location_type
area_name="areaName="+location
data_area = [area_type,area_name]
data_fields = {
"date": "date",
"areaName": "areaName",
"areaCode": "areaCode",
"hospitalCases":"hospitalCases",
"cumDailyNsoDeathsByDeathDate": "cumDailyNsoDeathsByDeathDate",
"newCasesBySpecimenDate": "newCasesBySpecimenDate",
}
api = Cov19API(filters=data_area, structure=data_fields)
data = api.get_json()
logging.info('covid update done')
if location_type=="nation":
with open('national_covid_data.json', 'w', encoding="UTF-8") as covid_file:
json.dump(data["data"], covid_file, indent=6)
return data
else:
with open('covid_data.json', 'w', encoding="UTF-8") as covid_file:
json.dump(data["data"], covid_file, indent=6)
covid_API_request(NATION,"nation")
return data
def schedule_covid_updates(update_interval=1,update_name=""):
'''
Schedules updates to news API data using an event added to a schedule that is
in the global namespace
Parameters:
update_interval (int): A decimal integer that specifies the time in seconds
between now and when the update should be scheduled for
update_name (string): The name of the update
Returns:
A dictionary consisting of:
event: A scheduled event
update_name (string): The name of the update
'''
if not isinstance(update_interval, int):
seconds=split_time(update_interval)
now_seconds=split_time( datetime.datetime.now().time().strftime("%H:%M"))
update_interval=seconds-now_seconds
if update_interval < 0:
update_interval=update_interval+(24*60*60)
event = API_schedule_covid.enter(update_interval, 1, covid_API_request,(LOCATION,LOCATION_TYPE))
logging.info('covid update scheduled')
return {"event":event,"title":update_name,"type":"covid"}
def repeat_scheduled_covid_update(update_interval,update_name,scheduled_event):
'''
Schedules updates to news API data using an event added to a schedule that is in the
global namespace
Parameters:
update_interval (int): A decimal integer that specifies the time in seconds
between now and when the update should be scheduled for
update_name (string): The name of the update
Returns:
A dictionary consisting of:
event: A scheduled event
update_name (string): The name of the update'''
if not isinstance(update_interval, int):
seconds=split_time(update_interval)
future_time=update_interval
now_seconds=split_time( datetime.datetime.now().time().strftime("%H:%M"))
update_interval=seconds-now_seconds
if update_interval < 0:
update_interval=update_interval+(24*60*60)
else:
future_time=update_interval
event=API_schedule_covid.enter(update_interval,1, scheduled_event, (future_time,update_name))
event2=API_schedule_covid.enter(update_interval,1, repeat_scheduled_covid_update, (future_time,update_name,scheduled_event))
logging.info('repeat covid update scheduled')
return {"title":update_name,"events":[event,event2],"type":"covid","content":update_interval}
```
#### File: project/Tests/test_app.py
```python
from app import split_time
def test_split_time():
assert split_time("12:35")==45300
```
|
{
"source": "jessie-murray/numbers-parser",
"score": 3
}
|
#### File: src/numbers_parser/document.py
```python
from functools import lru_cache
from typing import Union, Generator
from numbers_parser.containers import ItemsList
from numbers_parser.model import NumbersModel
from numbers_parser.cell import (
Cell,
MergedCell,
xl_cell_to_rowcol,
xl_range,
)
class Document:
def __init__(self, filename):
self._model = NumbersModel(filename)
def sheets(self):
if not hasattr(self, "_sheets"):
refs = self._model.sheet_ids()
self._sheets = ItemsList(self._model, refs, Sheet)
return self._sheets
class Sheet:
def __init__(self, model, sheet_id):
self._sheet_id = sheet_id
self._model = model
def tables(self):
if not hasattr(self, "_tables"):
refs = self._model.table_ids(self._sheet_id)
self._tables = ItemsList(self._model, refs, Table)
return self._tables
@property
def name(self):
return self._model.sheet_name(self._sheet_id)
class Table:
def __init__(self, model, table_id):
super(Table, self).__init__()
self._model = model
self._table_id = table_id
@property
def name(self):
return self._model.table_name(self._table_id)
@lru_cache(maxsize=None)
def rows(self, values_only: bool = False) -> list:
"""
Return all rows of cells for the Table.
Args:
values_only: if True, return cell values instead of Cell objects
Returns:
rows: list of rows; each row is a list of Cell objects
"""
row_cells = []
if values_only:
for row_num in range(self.num_rows):
row_cells.append(
[
self.cell(row_num, col_num).value
for col_num in range(self.num_cols)
]
)
else:
for row_num in range(self.num_rows):
row_cells.append(
[self.cell(row_num, col_num) for col_num in range(self.num_cols)]
)
return row_cells
@property
@lru_cache(maxsize=None)
def merge_ranges(self) -> list:
merge_cells = self._model.merge_cell_ranges(self._table_id)
ranges = [xl_range(*r["rect"]) for r in merge_cells.values()]
return sorted(set(list(ranges)))
@property
def num_rows(self) -> int:
"""Number of rows in the table"""
return self._model.number_of_rows(self._table_id)
@property
def num_cols(self) -> int:
"""Number of columns in the table"""
return self._model.number_of_columns(self._table_id)
def cell(self, *args) -> Union[Cell, MergedCell]:
if type(args[0]) == str:
(row_num, col_num) = xl_cell_to_rowcol(args[0])
elif len(args) != 2:
raise IndexError("invalid cell reference " + str(args))
else:
(row_num, col_num) = args
if row_num >= self.num_rows or row_num < 0:
raise IndexError(f"row {row_num} out of range")
if col_num >= self.num_cols or col_num < 0:
raise IndexError(f"coumn {col_num} out of range")
return Cell.factory(self._model, self._table_id, row_num, col_num)
def iter_rows(
self,
min_row: int = None,
max_row: int = None,
min_col: int = None,
max_col: int = None,
values_only: bool = False,
) -> Generator[tuple, None, None]:
"""
Produces cells from a table, by row. Specify the iteration range using
the indexes of the rows and columns.
Args:
min_row: smallest row index (zero indexed)
max_row: largest row (zero indexed)
min_col: smallest row index (zero indexed)
max_col: largest row (zero indexed)
values_only: return cell values rather than Cell objects
Returns:
generator: tuple of cells
Raises:
IndexError: row or column values are out of range for the table
"""
min_row = min_row or 0
max_row = max_row or self.num_rows - 1
min_col = min_col or 0
max_col = max_col or self.num_cols - 1
if min_row < 0:
raise IndexError(f"row {min_row} out of range")
if max_row > self.num_rows:
raise IndexError(f"row {max_row} out of range")
if min_col < 0:
raise IndexError(f"column {min_col} out of range")
if max_col > self.num_cols:
raise IndexError(f"column {max_col} out of range")
rows = self.rows()
for row_num in range(min_row, max_row + 1):
if values_only:
yield tuple(cell.value for cell in rows[row_num][min_col : max_col + 1])
else:
yield tuple(rows[row_num][min_col : max_col + 1])
def iter_cols(
self,
min_col: int = None,
max_col: int = None,
min_row: int = None,
max_row: int = None,
values_only: bool = False,
) -> Generator[tuple, None, None]:
"""
Produces cells from a table, by column. Specify the iteration range using
the indexes of the rows and columns.
Args:
min_col: smallest row index (zero indexed)
max_col: largest row (zero indexed)
min_row: smallest row index (zero indexed)
max_row: largest row (zero indexed)
values_only: return cell values rather than Cell objects
Returns:
generator: tuple of cells
Raises:
IndexError: row or column values are out of range for the table
"""
min_row = min_row or 0
max_row = max_row or self.num_rows - 1
min_col = min_col or 0
max_col = max_col or self.num_cols - 1
if min_row < 0:
raise IndexError(f"row {min_row} out of range")
if max_row > self.num_rows:
raise IndexError(f"row {max_row} out of range")
if min_col < 0:
raise IndexError(f"column {min_col} out of range")
if max_col > self.num_cols:
raise IndexError(f"column {max_col} out of range")
rows = self.rows()
for col_num in range(min_col, max_col + 1):
if values_only:
yield tuple(row[col_num].value for row in rows[min_row : max_row + 1])
else:
yield tuple(row[col_num] for row in rows[min_row : max_row + 1])
```
#### File: src/numbers_parser/iwafile.py
```python
import struct
import snappy
from functools import partial
from numbers_parser.mapping import ID_NAME_MAP
from numbers_parser.exceptions import NotImplementedError
from google.protobuf.internal.decoder import _DecodeVarint32
from google.protobuf.json_format import MessageToDict
from numbers_parser.generated.TSPArchiveMessages_pb2 import ArchiveInfo
class IWAFile(object):
def __init__(self, chunks, filename=None):
self.chunks = chunks
self.filename = filename
@classmethod
def from_buffer(cls, data, filename=None):
try:
chunks = []
while data:
chunk, data = IWACompressedChunk.from_buffer(data, filename)
chunks.append(chunk)
return cls(chunks, filename)
except Exception as e: # pragma: no cover
if filename:
raise ValueError("Failed to deserialize " + filename) from e
else:
raise
def to_dict(self):
try:
return {"chunks": [chunk.to_dict() for chunk in self.chunks]}
except Exception as e: # pragma: no cover
if self.filename:
raise ValueError("Failed to serialize " + self.filename) from e
else:
raise
class IWACompressedChunk(object):
def __init__(self, archives):
self.archives = archives
def __eq__(self, other):
return self.archives == other.archives # pragma: no cover
@classmethod
def _decompress_all(cls, data):
while data:
header = data[:4]
first_byte = header[0]
if not isinstance(first_byte, int):
first_byte = ord(first_byte)
if first_byte != 0x00:
raise ValueError( # pragma: no cover
"IWA chunk does not start with 0x00! (found %x)" % first_byte
)
unpacked = struct.unpack_from("<I", bytes(header[1:]) + b"\x00")
length = unpacked[0]
chunk = data[4 : 4 + length]
data = data[4 + length :]
try:
yield snappy.uncompress(chunk)
except Exception: # pragma: no cover
# Try to see if this data isn't compressed in the first place.
# If this data is still compressed, parsing it as Protobuf
# will almost definitely fail anyways.
yield chunk
@classmethod
def from_buffer(cls, data, filename=None):
data = b"".join(cls._decompress_all(data))
archives = []
while data:
archive, data = IWAArchiveSegment.from_buffer(data, filename)
archives.append(archive)
return cls(archives), None
def to_dict(self):
return {"archives": [archive.to_dict() for archive in self.archives]}
class ProtobufPatch(object):
def __init__(self, data):
self.data = data
def __eq__(self, other):
return self.data == other.data # pragma: no cover
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.data) # pragma: no cover
def to_dict(self):
return message_to_dict(self.data)
@classmethod
def FromString(cls, message_info, proto_klass, data):
# Recent numbers does not apear to store date this way
assert len(message_info.diff_field_path.path) != 1
return cls(proto_klass.FromString(data))
class IWAArchiveSegment(object):
def __init__(self, header, objects):
self.header = header
self.objects = objects
def __eq__(self, other):
return (
self.header == other.header and self.objects == other.objects
) # pragma: no cover
def __repr__(self):
return "<%s identifier=%s objects=%s>" % ( # pragma: no cover
self.__class__.__name__,
self.header.identifier,
repr(self.objects).replace("\n", " ").replace(" ", " "),
)
@classmethod
def from_buffer(cls, buf, filename=None):
archive_info, payload = get_archive_info_and_remainder(buf)
if not repr(archive_info):
raise ValueError(
"Segment doesn't seem to start with an ArchiveInfo!"
) # pragma: no cover
payloads = []
n = 0
for message_info in archive_info.message_infos:
try:
if message_info.type == 0 and archive_info.should_merge and payloads:
base_message = archive_info.message_infos[
message_info.base_message_index
]
klass = partial(
ProtobufPatch.FromString,
message_info,
ID_NAME_MAP[base_message.type],
)
else:
klass = ID_NAME_MAP[message_info.type]
except KeyError: # pragma: no cover
raise NotImplementedError(
"Don't know how to parse Protobuf message type "
+ str(message_info.type)
)
try:
message_payload = payload[n : n + message_info.length]
if hasattr(klass, "FromString"):
output = klass.FromString(message_payload)
else:
output = klass(message_payload)
except Exception as e: # pragma: no cover
raise ValueError(
"Failed to deserialize %s payload of length %d: %s"
% (klass, message_info.length, e)
)
payloads.append(output)
n += message_info.length
return cls(archive_info, payloads), payload[n:]
def to_dict(self):
return {
"header": header_to_dict(self.header),
"objects": [message_to_dict(message) for message in self.objects],
}
def message_to_dict(message):
if hasattr(message, "to_dict"):
return message.to_dict()
output = MessageToDict(message)
output["_pbtype"] = type(message).DESCRIPTOR.full_name
return output
def header_to_dict(message):
output = message_to_dict(message)
for message_info in output["messageInfos"]:
del message_info["length"]
return output
def get_archive_info_and_remainder(buf):
msg_len, new_pos = _DecodeVarint32(buf, 0)
n = new_pos
msg_buf = buf[n : n + msg_len]
n += msg_len
return ArchiveInfo.FromString(msg_buf), buf[n:]
```
#### File: numbers-parser/tests/test_folder.py
```python
import pytest
from numbers_parser import Document
XXX_TABLE_1_REF = [
[None, "XXX_COL_1", "XXX_COL_2", "XXX_COL_3", "XXX_COL_4", "XXX_COL_5"],
["XXX_ROW_1", "XXX_1_1", "XXX_1_2", "XXX_1_3", "XXX_1_4", "XXX_1_5"],
["XXX_ROW_2", "XXX_2_1", "XXX_2_2", None, "XXX_2_4", "XXX_2_5"],
["XXX_ROW_3", "XXX_3_1", None, "XXX_3_3", "XXX_3_4", "XXX_3_5"],
]
def test_read_folder():
doc = Document("tests/data/test-5.numbers")
sheets = doc.sheets()
tables = sheets["ZZZ_Sheet_1"].tables()
data = tables["XXX_Table_1"].rows(values_only=True)
assert data == XXX_TABLE_1_REF
```
|
{
"source": "jessiepullaro414/Toastifai",
"score": 3
}
|
#### File: jessiepullaro414/Toastifai/main.py
```python
import RPi.GPIO as GPIO
import time
import serial
from twilio.rest import TwilioRestClient
from clarifai import rest
from clarifai.rest import ClarifaiApp
import cv2
from PIL import Image, ImageEnhance
ser = serial.Serial('/dev/ttyACM4',9600)
GPIO.setmode(GPIO.BCM)
GPIO.setup(2,GPIO.OUT)
GPIO.output(2,GPIO.HIGH)
#while True :
# val = int(ser.readline())
# print(val)
# if val == 1 :
# break
file = "current_image.png"
camera_port = 0
# Now we can initialize the camera capture object with the cv2.VideoCapture class.
# All it needs is the index to a camera port.
camera = cv2.VideoCapture(camera_port)
app = ClarifaiApp("UHgwtsynZgd19kC1f46FoNulBsdglENG5oF9Dwwc", "VBP5o33oTh6Whpt7nZyg-gAa1GV9ZoKDZktJ4vBe")#pulls the api keys from keys.py
model = app.models.get("toast")
toast_perfect = 0.3
toast_done = False
toast_counter = 0
# predict with samples
def get_image():
# read is the easiest way to get a full image out of a VideoCapture object.
retval, im = camera.read()
return im
while (toast_done != True or toast_counter == 3):
camera_capture = get_image()
cv2.imwrite(file, camera_capture)
#PIL stuff
img = Image.open(file)
converter = ImageEnhance.Color(img)
img2 = converter.enhance(2.0)
img2.save(file)
model_json = model.predict_by_filename(file)
print model_json[u'outputs'][0][u'data'][u'concepts'][0][u'value']
if (toast_counter < 3 and float(model_json[u'outputs'][0][u'data'][u'concepts'][0][u'value']) > .48):
toast_counter = toast_counter + 1
print ("toast done is true")
print (toast_counter)
if (toast_counter >= 3):
toast_done = True
print ("Toast RIP")
break
else:
print ("toast done is false")
continue
print (toast_perfect)
toast_perfect = model_json[u'outputs'][0][u'data'][u'concepts'][0][u'value']
print ("\n")
client = TwilioRestClient(account='AC24b3c0ad866586038e4f5ca818ae30ed',token='3bab3d6d5bef27855cfb5bada3c39346')
client.messages.create(to="+14077583284",from_="+14079019282", body="TOAST")
GPIO.setmode(GPIO.BCM)
GPIO.setup(2,GPIO.OUT)
GPIO.output(2,GPIO.LOW)
time.sleep(1)
GPIO.output(2,GPIO.HIGH)
GPIO.cleanup()
```
#### File: web/handlers/basic.py
```python
import tornado.web
from bs4 import BeautifulSoup
from yattag import Doc
class Handler(tornado.web.RequestHandler):
def cr(self):
self.yat = Doc().tagtext()
def getgen(self):
return "Hello, world!"
def get(self):
soup = BeautifulSoup(self.getgen(), "html.parser")
self.write(soup.prettify())
```
|
{
"source": "JessieRamaux/Food-Volume-Estimation",
"score": 2
}
|
#### File: Food Detection/src/model.py
```python
Detection/src/model.py
import argparse
import functools
import itertools
import os
import six
import tensorflow as tf
import dataset_utils
import utils
from inception_resnet_v2 import inception_resnet_v2, inception_resnet_v2_arg_scope
tf.logging.set_verbosity(tf.logging.INFO)
# =============== CONFIGURATION ===============
IMAGE_SIZE = 299
IMAGES_PER_GPU = 8
GPU_COUNT = 2
BATCH_SIZE = IMAGES_PER_GPU * GPU_COUNT
LEARNING_RATE = 0.005
DECAY = 0.9
VALIDATION_STEPS = 50
STEPS_PER_EPOCH = 101000 / BATCH_SIZE
VARIABLE_STRATEGY = 'GPU'
WEIGHT_DECAY = 2e-4
DECAY = 0.9
def tower_fn(is_training, feature, label, num_classes):
"""Build computation tower
Args:
is_training: true if is training graph.
feature: a Tensor.
label: a Tensor.
Returns:
A tuple with the loss for the tower, the gradients and parameters, and
predictions.
"""
with tf.contrib.framework.arg_scope(inception_resnet_v2_arg_scope()):
logits, endpoints = inception_resnet_v2(feature,
num_classes=num_classes,
is_training=is_training)
tower_pred = {
'classes': tf.argmax(input=logits, axis=1),
'probabilities': tf.nn.softmax(logits)
}
if label:
tower_loss = tf.losses.sparse_softmax_cross_entropy(
logits=logits,
labels=label)
aux_tower_loss = 0.4 * \
tf.losses.sparse_softmax_cross_entropy(
logits=endpoints['AuxLogits'], labels=label)
tower_loss = tf.reduce_mean(tower_loss + aux_tower_loss)
model_params = tf.trainable_variables()
tower_loss += WEIGHT_DECAY * tf.add_n(
[tf.nn.l2_loss(v) for v in model_params])
tower_grad = tf.gradients(tower_loss, model_params)
return tower_loss, zip(tower_grad, model_params), tower_pred
return None, None, tower_pred
def get_model_fn(num_classes):
"""
Returns a model function given the number of classes.
"""
def model_fn(features, labels, mode):
"""Inception_Resnet_V2 model body.
Support single host, one or more GPU training. Parameter distribution can
be either one of the following scheme.
1. CPU is the parameter server and manages gradient updates.
2. Parameters are distributed evenly across all GPUs, and the first GPU
manages gradient updates.
Args:
features: a list of tensors, one for each tower
labels: a list of tensors, one for each tower
mode: ModeKeys.TRAIN or EVAL
Returns:
A EstimatorSpec object.
"""
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
tower_features = features
tower_labels = labels
tower_losses = []
tower_gradvars = []
tower_preds = []
# channels first (NCHW) is normally optimal on GPU and channels last (NHWC)
# on CPU. The exception is Intel MKL on CPU which is optimal with
# channels_last.
data_format = None
if not data_format:
if GPU_COUNT == 0:
data_format = 'channels_last'
else:
data_format = 'channels_first'
if GPU_COUNT == 0:
num_devices = 1
device_type = 'cpu'
else:
num_devices = GPU_COUNT
device_type = 'gpu'
for i in range(num_devices):
worker_device = '/{}:{}'.format(device_type, i)
if VARIABLE_STRATEGY == 'CPU':
device_setter = utils.local_device_setter(
worker_device=worker_device)
elif VARIABLE_STRATEGY == 'GPU':
device_setter = utils.local_device_setter(
ps_device_type='gpu',
worker_device=worker_device,
ps_strategy=tf.contrib.training.GreedyLoadBalancingStrategy(
GPU_COUNT, tf.contrib.training.byte_size_load_fn))
with tf.variable_scope('', reuse=bool(i != 0)):
with tf.name_scope('tower_%d' % i) as name_scope:
with tf.device(device_setter):
loss, gradvars, preds = tower_fn(is_training, tower_features[i],
tower_labels and tower_labels[i], num_classes)
tower_losses.append(loss)
tower_gradvars.append(gradvars)
tower_preds.append(preds)
if i == 0:
# Only trigger batch_norm moving mean and variance update from
# the 1st tower. Ideally, we should grab the updates from all
# towers but these stats accumulate extremely fast so we can
# ignore the other stats from the other towers without
# significant detriment.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
name_scope)
if mode == 'train' or mode == 'eval':
# Now compute global loss and gradients.
gradvars = []
with tf.name_scope('gradient_ing'):
all_grads = {}
for grad, var in itertools.chain(*tower_gradvars):
if grad is not None:
all_grads.setdefault(var, []).append(grad)
for var, grads in six.iteritems(all_grads):
# Average gradients on the same device as the variables
# to which they apply.
with tf.device(var.device):
if len(grads) == 1:
avg_grad = grads[0]
else:
avg_grad = tf.multiply(
tf.add_n(grads), 1. / len(grads))
gradvars.append((avg_grad, var))
# Device that runs the ops to apply global gradient updates.
consolidation_device = '/gpu:0' if VARIABLE_STRATEGY == 'GPU' else '/cpu:0'
with tf.device(consolidation_device):
loss = tf.reduce_mean(tower_losses, name='loss')
examples_sec_hook = utils.ExamplesPerSecondHook(
BATCH_SIZE, every_n_steps=10)
global_step = tf.train.get_global_step()
learning_rate = tf.constant(LEARNING_RATE)
tensors_to_log = {'learning_rate': learning_rate, 'loss': loss}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100)
initializer_hook = utils.IteratorInitializerHook()
train_hooks = [initializer_hook, logging_hook, examples_sec_hook]
optimizer = tf.train.MomentumOptimizer(
learning_rate=LEARNING_RATE, momentum=MOMENTUM)
# Create single grouped train op
train_op = [
optimizer.apply_gradients(gradvars, global_step=global_step)
]
train_op.extend(update_ops)
train_op = tf.group(*train_op)
predictions = {
'classes':
tf.concat([p['classes'] for p in tower_preds], axis=0),
'probabilities':
tf.concat([p['probabilities']
for p in tower_preds], axis=0)
}
stacked_labels = tf.concat(labels, axis=0)
metrics = {
'accuracy':
tf.metrics.accuracy(stacked_labels, predictions['classes'])
}
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
training_hooks=train_hooks,
eval_metric_ops=metrics)
else:
predictions = {
'classes':
tf.concat([p['classes'] for p in tower_preds], axis=0),
'probabilities':
tf.concat([p['probabilities']
for p in tower_preds], axis=0),
'features': tf.concat([feature for feature in features], axis=0)
}
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions)
return model_fn
def input_fn(dataset_dir, split_name, is_training):
"""Create input graph for model.
Args:
split_name: one of 'train', 'validate' and 'eval'.
Returns:
two lists of tensors for features and labels, each of GPU_COUNT length.
"""
with tf.device('/cpu:0'):
tfrecord_file_pattern = '%s_%s_*.tfrecord' % (
os.path.basename(dataset_dir), "%s")
file_pattern_for_counting = '%s' % (os.path.basename(dataset_dir))
dataset = dataset_utils.get_split(split_name, dataset_dir,
tfrecord_file_pattern, file_pattern_for_counting)
image_batch, _, label_batch = dataset_utils.load_batch(
dataset, BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, is_training)
if GPU_COUNT <= 1:
# No GPU available or only 1 GPU.
return [image_batch], [label_batch]
# Note that passing num=batch_size is safe here, even though
# dataset.batch(batch_size) can, in some cases, return fewer than batch_size
# examples. This is because it does so only when repeating for a limited
# number of epochs, but our dataset repeats forever.
image_batch = tf.unstack(image_batch, num=BATCH_SIZE, axis=0)
label_batch = tf.unstack(label_batch, num=BATCH_SIZE, axis=0)
feature_shards = [[] for i in range(GPU_COUNT)]
label_shards = [[] for i in range(GPU_COUNT)]
for i in range(BATCH_SIZE):
idx = i % GPU_COUNT
feature_shards[idx].append(image_batch[i])
label_shards[idx].append(label_batch[i])
feature_shards = [tf.parallel_stack(x) for x in feature_shards]
label_shards = [tf.parallel_stack(x) for x in label_shards]
return feature_shards, label_shards
def get_experiment_fn(dataset_dir):
"""
Returns the experiment function given a dataset_dir
"""
# Get the number of classes from the label file
_, num_classes = dataset_utils.read_label_file(dataset_dir)
def experiment_fn(run_config, hparams):
"""
This is a method passed to tf.contrib.learn.learn_runner that will
return an instance of an Experiment.
"""
train_input_fn = functools.partial(
input_fn,
dataset_dir=dataset_dir,
split_name='train',
is_training=True)
eval_input_fn = functools.partial(
input_fn,
dataset_dir=dataset_dir,
split_name='validation',
is_training=False)
classifier = tf.estimator.Estimator(
model_fn=get_model_fn(num_classes),
config=run_config)
return tf.contrib.learn.Experiment(
classifier,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=None, # Train forever
eval_steps=VALIDATION_STEPS)
return experiment_fn
def train(model_dir, dataset_dir):
"""
Begins training the entire architecture.
"""
# Session configuration.
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
intra_op_parallelism_threads=0, # Autocompute how many threads to run
gpu_options=tf.GPUOptions(force_gpu_compatible=True))
config = tf.contrib.learn.RunConfig(
session_config=sess_config, model_dir=model_dir)
tf.contrib.learn.learn_runner.run(
get_experiment_fn(dataset_dir),
run_config=config,
hparams=tf.contrib.training.HParams())
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(
description='Train a model against a dataset.')
PARSER.add_argument('--model', dest='model',
required=True,
help='The name of the model\'s folder.')
PARSER.add_argument('--dataset', dest='dataset',
required=True,
help='The folder corresponding to this model\'s dataset.')
if not os.path.exists(PARSER.parse_args().model):
raise Exception("Path %s doesn't exist." % PARSER.parse_args().model)
if not os.path.exists(PARSER.parse_args().dataset):
raise Exception("Path %s doesn't exist." % PARSER.parse_args().dataset)
# A (supposed) 5% percent boost in certain GPUs by using faster convolution operations
os.environ['TF_SYNC_ON_FINISH'] = '0'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
train(PARSER.parse_args().model, PARSER.parse_args().dataset)
```
#### File: Segmentation Mask/RefineNet/multi_gpu_train.py
```python
import tensorflow as tf
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.framework import ops as _ops
import time
import shutil
import datetime
import os
import cv2
import pickle
import numpy as np
from tensorflow.contrib import slim
import sys
sys.path.append(os.getcwd())
from nets import model as model
from utils.tf_records import read_tfrecord_and_decode_into_image_annotation_pair_tensors
from utils.pascal_voc import pascal_segmentation_lut
from utils.augmentation import (distort_randomly_image_color,flip_randomly_left_right_image_with_annotation,
scale_randomly_image_with_annotation_with_fixed_size_output)
tf.app.flags.DEFINE_integer('batch_size', 3, '')
tf.app.flags.DEFINE_integer('train_size', 512, '')
tf.app.flags.DEFINE_float('learning_rate', 0.0001, '')
tf.app.flags.DEFINE_integer('max_steps', 60000, '')
tf.app.flags.DEFINE_float('moving_average_decay', 0.997, '')
tf.app.flags.DEFINE_integer('num_classes', 21, '')
tf.app.flags.DEFINE_string('gpu_list', '0,1', '')
tf.app.flags.DEFINE_string('checkpoint_path', 'checkpoints/', '')
tf.app.flags.DEFINE_string('logs_path', 'logs/', '')
tf.app.flags.DEFINE_boolean('restore', True, 'whether to resotre from checkpoint')
tf.app.flags.DEFINE_integer('save_checkpoint_steps', 2000, '')
tf.app.flags.DEFINE_integer('save_summary_steps', 10, '')
tf.app.flags.DEFINE_integer('save_image_steps', 100, '')
tf.app.flags.DEFINE_string('training_data_path', 'data/pascal_augmented_train.tfrecords', '')
tf.app.flags.DEFINE_string('pretrained_model_path', 'data/resnet_v1_101.ckpt', '')
tf.app.flags.DEFINE_integer('decay_steps',20000,'')
tf.app.flags.DEFINE_integer('decay_rate',0.1,'')
FLAGS = tf.app.flags.FLAGS
def tower_loss(images, annotation,class_labels,reuse_variables=None):
with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
logits = model.model(images, is_training=True)
pred = tf.argmax(logits, dimension=3)
model_loss = model.loss(annotation, logits,class_labels)
total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
# add summary
if reuse_variables is None:
tf.summary.scalar('model_loss', model_loss)
tf.summary.scalar('total_loss', total_loss)
return total_loss, model_loss,pred
def average_gradients(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def build_image_summary():
log_image_data = tf.placeholder(tf.uint8, [None, None, 3])
log_image_name = tf.placeholder(tf.string)
log_image = gen_logging_ops._image_summary(log_image_name, tf.expand_dims(log_image_data, 0), max_images=1)
_ops.add_to_collection(_ops.GraphKeys.SUMMARIES, log_image)
return log_image, log_image_data, log_image_name
def main(argv=None):
gpus = range(len(FLAGS.gpu_list.split(',')))
pascal_voc_lut = pascal_segmentation_lut()
class_labels = pascal_voc_lut.keys()
with open('data/color_map', 'rb') as f:
color_map = pickle.load(f)
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_list
now = datetime.datetime.now()
StyleTime = now.strftime("%Y-%m-%d-%H-%M-%S")
os.makedirs(FLAGS.logs_path+StyleTime)
if not os.path.exists(FLAGS.checkpoint_path):
os.makedirs(FLAGS.checkpoint_path)
else:
if not FLAGS.restore:
if os.path.exists(FLAGS.checkpoint_path):
shutil.rmtree(FLAGS.checkpoint_path)
os.makedirs(FLAGS.checkpoint_path)
filename_queue = tf.train.string_input_producer([FLAGS.training_data_path], num_epochs=1000)
image, annotation = read_tfrecord_and_decode_into_image_annotation_pair_tensors(filename_queue)
image, annotation = flip_randomly_left_right_image_with_annotation(image, annotation)
image = distort_randomly_image_color(image)
image_train_size=[FLAGS.train_size,FLAGS.train_size]
resized_image, resized_annotation = scale_randomly_image_with_annotation_with_fixed_size_output(image, annotation,
image_train_size)
resized_annotation = tf.squeeze(resized_annotation)
image_batch, annotation_batch = tf.train.shuffle_batch([resized_image, resized_annotation],
batch_size=FLAGS.batch_size*len(gpus), capacity=1000, num_threads=4,
min_after_dequeue=500)
# split
input_images_split = tf.split(image_batch, len(gpus))
input_segs_split = tf.split(annotation_batch, len(gpus))
learning_rate = tf.Variable(FLAGS.learning_rate, trainable=False)
global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
# add summary
tf.summary.scalar('learning_rate', learning_rate)
opt = tf.train.AdamOptimizer(learning_rate)
tower_grads = []
reuse_variables = None
for i, gpu_id in enumerate(gpus):
with tf.device('/gpu:%d' % gpu_id):
with tf.name_scope('model_%d' % gpu_id) as scope:
iis = input_images_split[i]
isms = input_segs_split[i]
total_loss, model_loss,output_pred = tower_loss(iis, isms, class_labels, reuse_variables)
batch_norm_updates_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope))
reuse_variables = True
grads = opt.compute_gradients(total_loss)
tower_grads.append(grads)
grads = average_gradients(tower_grads)
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
summary_op = tf.summary.merge_all()
log_image, log_image_data, log_image_name = build_image_summary()
# save moving average
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# batch norm updates
with tf.control_dependencies([variables_averages_op, apply_gradient_op, batch_norm_updates_op]):
train_op = tf.no_op(name='train_op')
saver = tf.train.Saver(tf.global_variables(), max_to_keep=100)
summary_writer = tf.summary.FileWriter(FLAGS.logs_path+StyleTime, tf.get_default_graph())
if FLAGS.pretrained_model_path is not None:
variable_restore_op = slim.assign_from_checkpoint_fn(FLAGS.pretrained_model_path,
slim.get_trainable_variables(),
ignore_missing_vars=True)
global_vars_init_op = tf.global_variables_initializer()
local_vars_init_op = tf.local_variables_initializer()
init = tf.group(local_vars_init_op, global_vars_init_op)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
restore_step=0
if FLAGS.restore:
sess.run(init)
print('continue training from previous checkpoint')
ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
restore_step=int(ckpt.split('.')[0].split('_')[-1])
saver.restore(sess, ckpt)
else:
sess.run(init)
if FLAGS.pretrained_model_path is not None:
variable_restore_op(sess)
start = time.time()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
try:
while not coord.should_stop():
for step in range(restore_step,FLAGS.max_steps):
if step != 0 and step % FLAGS.decay_steps == 0:
sess.run(tf.assign(learning_rate, learning_rate.eval() * FLAGS.decay_rate))
ml, tl, _ = sess.run([model_loss, total_loss, train_op])
if np.isnan(tl):
print('Loss diverged, stop training')
break
if step % 10 == 0:
avg_time_per_step = (time.time() - start)/10
start = time.time()
print('Step {:06d}, model loss {:.4f}, total loss {:.4f}, {:.3f} seconds/step, lr: {:.7f}').\
format(step, ml, tl, avg_time_per_step,learning_rate.eval())
if (step+1) % FLAGS.save_checkpoint_steps == 0:
filename = ('RefineNet'+'_step_{:d}'.format(step + 1) + '.ckpt')
filename = os.path.join(FLAGS.checkpoint_path,filename)
saver.save(sess, filename)
print('Write model to: {:s}'.format(filename))
if step % FLAGS.save_summary_steps == 0:
_, tl, summary_str = sess.run([train_op, total_loss, summary_op])
summary_writer.add_summary(summary_str, global_step=step)
if step % FLAGS.save_image_steps == 0:
log_image_name_str = ('%06d' % step)
img_split,seg_split,pred = sess.run([iis,isms,output_pred])
img_split=np.squeeze(img_split)[0]
seg_split=np.squeeze(seg_split)[0]
pred=np.squeeze(pred)[0]
#img_split=cv2.resize(img_split,(128,128))
color_seg = np.zeros((seg_split.shape[0], seg_split.shape[1], 3))
for i in range(seg_split.shape[0]):
for j in range(seg_split.shape[1]):
color_seg[i, j, :] = color_map[str(seg_split[i][j])]
color_pred = np.zeros((pred.shape[0], pred.shape[1], 3))
for i in range(pred.shape[0]):
for j in range(pred.shape[1]):
color_pred[i, j, :] = color_map[str(pred[i][j])]
write_img=np.hstack((color_seg,color_pred))
log_image_summary_op = sess.run(log_image,feed_dict={log_image_name: log_image_name_str, \
log_image_data: write_img})
summary_writer.add_summary(log_image_summary_op, global_step=step)
except tf.errors.OutOfRangeError:
print('finish')
finally:
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
tf.app.run()
```
#### File: Segmentation Mask/utils/augmentation.py
```python
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
slim = tf.contrib.slim
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def distort_color(image, color_ordering=0, fast_mode=True, scope=None):
"""Distort the color of a Tensor image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: 3-D Tensor containing single image in [0, 1].
color_ordering: Python int, a type of distortion (valid values: 0-3).
fast_mode: Avoids slower ops (random_hue and random_contrast)
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
Raises:
ValueError: if color_ordering not in [0, 3]
"""
with tf.name_scope(scope, 'distort_color', [image]):
if fast_mode:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
elif color_ordering == 2:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif color_ordering == 3:
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
raise ValueError('color_ordering must be in [0, 3]')
# The random_* ops do not necessarily clamp.
return tf.clip_by_value(image, 0.0, 1.0)
def flip_randomly_left_right_image_with_annotation(image_tensor, annotation_tensor):
"""Accepts image tensor and annotation tensor and returns randomly flipped tensors of both.
The function performs random flip of image and annotation tensors with probability of 1/2
The flip is performed or not performed for image and annotation consistently, so that
annotation matches the image.
Parameters
----------
image_tensor : Tensor of size (width, height, 3)
Tensor with image
annotation_tensor : Tensor of size (width, height, 1)
Tensor with annotation
Returns
-------
randomly_flipped_img : Tensor of size (width, height, 3) of type tf.float.
Randomly flipped image tensor
randomly_flipped_annotation : Tensor of size (width, height, 1)
Randomly flipped annotation tensor
"""
# Random variable: two possible outcomes (0 or 1)
# with a 1 in 2 chance
random_var = tf.random_uniform(maxval=2, dtype=tf.int32, shape=[])
randomly_flipped_img = control_flow_ops.cond(pred=tf.equal(random_var, 0),
fn1=lambda: tf.image.flip_left_right(image_tensor),
fn2=lambda: image_tensor)
randomly_flipped_annotation = control_flow_ops.cond(pred=tf.equal(random_var, 0),
fn1=lambda: tf.image.flip_left_right(annotation_tensor),
fn2=lambda: annotation_tensor)
return randomly_flipped_img, randomly_flipped_annotation
def distort_randomly_image_color(image_tensor, fast_mode=False):
"""Accepts image tensor of (width, height, 3) and returns color distorted image.
The function performs random brightness, saturation, hue, contrast change as it is performed
for inception model training in TF-Slim (you can find the link below in comments). All the
parameters of random variables were originally preserved. There are two regimes for the function
to work: fast and slow. Slow one performs only saturation and brightness random change is performed.
Parameters
----------
image_tensor : Tensor of size (width, height, 3) of tf.int32 or tf.float
Tensor with image with range [0,255]
fast_mode : boolean
Boolean value representing whether to use fast or slow mode
Returns
-------
img_float_distorted_original_range : Tensor of size (width, height, 3) of type tf.float.
Image Tensor with distorted color in [0,255] intensity range
"""
# Make the range to be in [0,1]
img_float_zero_one_range = tf.to_float(image_tensor) / 255
# Randomly distort the color of image. There are 4 ways to do it.
# Credit: TF-Slim
# https://github.com/tensorflow/models/blob/master/slim/preprocessing/inception_preprocessing.py#L224
# Most probably the inception models were trainined using this color augmentation:
# https://github.com/tensorflow/models/tree/master/slim#pre-trained-models
distorted_image = apply_with_random_selector(img_float_zero_one_range,
lambda x, ordering: distort_color(x, ordering, fast_mode=fast_mode),
num_cases=4)
img_float_distorted_original_range = distorted_image * 255
return img_float_distorted_original_range
def scale_randomly_image_with_annotation_with_fixed_size_output(img_tensor,
annotation_tensor,
output_shape,
min_relative_random_scale_change=0.9,
max_realtive_random_scale_change=1.1,
mask_out_number=255):
"""Returns tensor of a size (output_shape, output_shape, depth) and (output_shape, output_shape, 1).
The function returns tensor that is of a size (output_shape, output_shape, depth)
which is randomly scaled by a factor that is sampled from a uniform distribution
between values [min_relative_random_scale_change, max_realtive_random_scale_change] multiplied
by the factor that is needed to scale image to the output_shape. When the rescaled image
doesn't fit into the [output_shape] size, the image is either padded or cropped. Also, the
function returns scaled annotation tensor of the size (output_shape, output_shape, 1). Both,
the image tensor and the annotation tensor are scaled using nearest neighbour interpolation.
This was done to preserve the annotation labels. Be careful when specifying the big sample
space for the random variable -- aliasing effects can appear. When scaling, this function
preserves the aspect ratio of the original image. When performing all of those manipulations
there will be some regions in the output image with blank regions -- the function masks out
those regions in the annotation using mask_out_number. Overall, the function performs the
rescaling neccessary to get image of output_shape, adds random scale jitter, preserves
scale ratio, masks out unneccassary regions that appear.
Parameters
----------
img_tensor : Tensor of size (width, height, depth)
Tensor with image
annotation_tensor : Tensor of size (width, height, 1)
Tensor with respective annotation
output_shape : Tensor or list [int, int]
Tensor of list representing desired output shape
min_relative_random_scale_change : float
Lower bound for uniform distribution to sample from
when getting random scaling jitter
max_realtive_random_scale_change : float
Upper bound for uniform distribution to sample from
when getting random scaling jitter
mask_out_number : int
Number representing the mask out value.
Returns
-------
cropped_padded_img : Tensor of size (output_shape[0], output_shape[1], 3).
Image Tensor that was randomly scaled
cropped_padded_annotation : Tensor of size (output_shape[0], output_shape[1], 1)
Respective annotation Tensor that was randomly scaled with the same parameters
"""
# tf.image.resize_nearest_neighbor needs
# first dimension to represent the batch number
img_batched = tf.expand_dims(img_tensor, 0)
annotation_batched = tf.expand_dims(annotation_tensor, 0)
# Convert to int_32 to be able to differentiate
# between zeros that was used for padding and
# zeros that represent a particular semantic class
annotation_batched = tf.to_int32(annotation_batched)
# Get height and width tensors
input_shape = tf.shape(img_batched)[1:3]
input_shape_float = tf.to_float(input_shape)
scales = output_shape / input_shape_float
rand_var = tf.random_uniform(shape=[1],
minval=min_relative_random_scale_change,
maxval=max_realtive_random_scale_change)
final_scale = tf.reduce_min(scales) * rand_var
scaled_input_shape = tf.to_int32(tf.round(input_shape_float * final_scale))
# Resize the image and annotation using nearest neighbour
# Be careful -- may cause aliasing.
# TODO: try bilinear resampling for image only
resized_img = tf.image.resize_nearest_neighbor( img_batched, scaled_input_shape )
resized_annotation = tf.image.resize_nearest_neighbor( annotation_batched, scaled_input_shape )
resized_img = tf.squeeze(resized_img, axis=0)
resized_annotation = tf.squeeze(resized_annotation, axis=0)
# Shift all the classes by one -- to be able to differentiate
# between zeros representing padded values and zeros representing
# a particular semantic class.
annotation_shifted_classes = resized_annotation + 1
cropped_padded_img = tf.image.resize_image_with_crop_or_pad( resized_img, output_shape[0], output_shape[1] )
cropped_padded_annotation = tf.image.resize_image_with_crop_or_pad(annotation_shifted_classes,
output_shape[0]//4,
output_shape[1]//4)
# TODO: accept the classes lut instead of mask out
# value as an argument
annotation_additional_mask_out = tf.to_int32(tf.equal(cropped_padded_annotation, 0)) * (mask_out_number+1)
cropped_padded_annotation = cropped_padded_annotation + annotation_additional_mask_out - 1
return cropped_padded_img, cropped_padded_annotation
```
#### File: Food-Volume-Estimation/Volume Estimation/volume.py
```python
import numpy as np
import cv2
import os
import json
import glob
from PIL import Image, ImageDraw
plate_diameter = 25 #cm
plate_depth = 1.5 #cm
plate_thickness = 0.2 #cm
def Max(x, y):
if (x >= y):
return x
else:
return y
def polygons_to_mask(img_shape, polygons):
mask = np.zeros(img_shape, dtype=np.uint8)
mask = Image.fromarray(mask)
xy = list(map(tuple, polygons))
ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1)
mask = np.array(mask, dtype=bool)
return mask
def mask2box(mask):
index = np.argwhere(mask == 1)
rows = index[:, 0]
clos = index[:, 1]
left_top_r = np.min(rows)
left_top_c = np.min(clos)
right_bottom_r = np.max(rows)
right_bottom_c = np.max(clos)
return [left_top_c, left_top_r, right_bottom_c, right_bottom_r]
def get_bbox(points, h, w):
polygons = points
mask = polygons_to_mask([h,w], polygons)
return mask2box(mask)
def get_scale(points, img, lowest):
bbox = get_bbox(points, img.shape[0], img.shape[1])
diameter = (bbox[2]-bbox[0]+1+bbox[3]-bbox[1]+1)/2
len_per_pix = plate_diameter/float(diameter)
avg = 0
k = 0
for point in points:
avg += img[point[1]][point[0]]
k += 1
avg = avg/float(k)
depth = lowest - avg
depth_per_pix = plate_depth/depth
return len_per_pix, depth_per_pix
def cal_volume(points, img, len_per_pix, depth_per_pix, lowest):
volume = 0.0
bbox = get_bbox(points, img.shape[0], img.shape[1])
points = np.array(points)
shape = points.shape
points = points.reshape(shape[0], 1, shape[1])
for i in range(bbox[0], bbox[2]+1):
for j in range(bbox[1], bbox[3]+1):
if (cv2.pointPolygonTest(points, (i,j), False) >= 0):
volume += Max(0, (lowest - img[j][i]) * depth_per_pix - plate_thickness) * len_per_pix * len_per_pix
return volume
def get_volume(img, json_path):
lowest = np.max(img)
vol_dict = {}
#print(lowest)
len_per_pix = 0.0
depth_per_pix = 0.0
with open(json_path, 'r') as json_file:
data = json.load(json_file)
for shape in data['shapes']:
if (shape['label'] == "plate"):
len_per_pix, depth_per_pix = get_scale(shape['points'], img, lowest)
#print(len_per_pix, depth_per_pix)
break
for shape in data['shapes']:
label = shape['label']
if (label == "plate"):
continue
points = shape['points']
volume = cal_volume(points, img, len_per_pix, depth_per_pix, lowest)
if (label in vol_dict):
vol_dict[label] += volume
else:
vol_dict[label] = volume
return vol_dict
img = cv2.imread("out.png",0)
print(get_volume(img,"test.json"))
```
|
{
"source": "jessierliu/ecogVIS",
"score": 2
}
|
#### File: ecogvis/functions/transcription_data.py
```python
from ndx_hierarchical_behavioral_data.transcription_io import timitsounds_df, timitsounds_converter
from ndx_hierarchical_behavioral_data.mocha_io import mocha_df, mocha_re_df, mocha_converter
from ndx_hierarchical_behavioral_data.text_grid_io import textgriddf_reader, textgriddf_df, textgriddf_converter
def add_transcription_data(nwbfile, path_transcription, tr_type, subject_id=None,
session_id=None):
"""
Add transcription data to nwb file
Parameters:
-----------
nwbfile : nwbfile
path_transcription : path
tr_type : str
Transcriptions data type: timitsounds, textgrid or mocha
subject_id : str
Used for Mocha
session_id : str
Used for Mocha
"""
if tr_type == 'timitsounds':
phonemes, syllables, words, sentences = get_timitsounds(path_transcription=path_transcription)
elif tr_type == 'mocha':
phonemes, syllables, words, sentences = get_mocha(
path_transcription=path_transcription,
subject_id=subject_id,
session_id=session_id
)
if sentences is None: # If no transcription data for given session
return None
elif tr_type == 'textgrid':
phonemes, syllables, words = None, None, None
sentences = get_textgrid(path_transcription=path_transcription)
else:
raise TypeError('Invalid transcription type')
# Create behavioral processing module, if not existent
if 'behavior' not in nwbfile.processing:
nwbfile.create_processing_module(
name='behavior',
description='behavioral data'
)
# Add transcription tables to behavioral processing module
if phonemes is not None:
nwbfile.processing['behavior'].add(phonemes)
if syllables is not None:
nwbfile.processing['behavior'].add(syllables)
if words is not None:
nwbfile.processing['behavior'].add(words)
if sentences is not None:
nwbfile.processing['behavior'].add(sentences)
return nwbfile
def get_textgrid(path_transcription):
"""Get data from TextGrid file"""
data = textgriddf_reader(path_file=path_transcription)
text_df = textgriddf_df(data, item_no=2)
sentences = textgriddf_converter(text_df)
return sentences
def get_mocha(path_transcription, subject_id, session_id):
"""Get data from Mocha directory"""
phoneme_data, syllable_data, word_data, sentences_data = mocha_df(path_to_files=path_transcription)
re_phoneme_data, re_syllable_data, re_word_data, re_sentence_data = mocha_re_df(
phoneme_data=phoneme_data,
syllable_data=syllable_data,
word_data=word_data,
sentences_data=sentences_data,
subject_id=subject_id, # 'EC118'
session_id=session_id, # 'B6'
trial_id='...'
)
if re_sentence_data.shape[0] == 0:
return None, None, None, None
else:
phonemes, syllables, words, sentences = mocha_converter(
re_phoneme_data=re_phoneme_data,
re_syllable_data=re_syllable_data,
re_word_data=re_word_data,
re_sentence_data=re_sentence_data
)
return phonemes, syllables, words, sentences
def get_timitsounds(path_transcription):
"""Get data from TimitSounds directory"""
phonemes_data, syllables_data, words_data, sentences_data, pitch_data, formant_data, intensity_data = timitsounds_df(path_to_files=path_transcription)
phonemes, syllables, words, sentences, pitch_ts, formant_ts, intensity_ts = timitsounds_converter(
phonemes_data=phonemes_data,
syllables_data=syllables_data,
words_data=words_data,
sentences_data=sentences_data,
pitch_data=pitch_data,
formant_data=formant_data,
intensity_data=intensity_data
)
return phonemes, syllables, words, sentences
```
#### File: signal_processing/tests/test_processing_data.py
```python
import numpy as np
from pynwb import NWBHDF5IO
from ecogvis.signal_processing.processing_data import high_gamma_estimation, spectral_decomposition, preprocess_raw_data, make_new_nwb
import unittest
import os
class ProcessingDataTestCase(unittest.TestCase):
def setUp(self):
here_path = os.path.dirname(os.path.abspath(__file__))
# processed and raw example data
self.processed_name = os.path.join(here_path, 'example_ecephys.nwb')
# temporary files
self.test_name = 'ecephys_exmpl_test.nwb'
self.copy_name = 'ecephys_exmpl_copy.nwb'
# Pull out processing parameters
with NWBHDF5IO(self.processed_name, 'r') as io:
nwbfile = io.read()
rate = nwbfile.processing['ecephys'].data_interfaces['LFP'].electrical_series['preprocessed'].rate
self.config = {
'referencing': ('CAR', 16),
'Notch': 60,
'Downsample': rate
}
bands = nwbfile.processing['ecephys'].data_interfaces['DecompositionSeries'].bands
self.bands_vals = bands.to_dataframe().to_numpy().T
# Note: assumes that high gamma was generated with the same bands
def test_processing(self):
# Test copying
try:
self.step1_make_new_nwb()
except Exception as e:
self.fail("{} failed ({}: {})".format('step1_make_new_nwb', type(e), e))
# Make a copy of the processed nwb that only has raw data
# Note: assumes that copying works
cp_objs = {
'institution': True,
'lab': True,
'session': True,
'devices': True,
'electrode_groups': True,
'electrodes': True,
'acquisition': ['raw']
}
make_new_nwb(self.processed_name, self.test_name, cp_objs=cp_objs)
# Test preprocessing
try:
self.step2_preprocess_raw_data()
except Exception as e:
self.fail("{} failed ({}: {})".format('step2_preprocess_raw_data', type(e), e))
# Test Spectral decomposition
# Note: assumes that preprocessing ran correctly on the test file
try:
self.step3_spectral_decomposition()
except Exception as e:
self.fail("{} failed ({}: {})".format('step3_spectral_decomposition', type(e), e))
# Test High Gamma estimation
# Note: assumes that preprocessing ran correctly on the test file
try:
self.step4_high_gamma_estimation()
except Exception as e:
self.fail("{} failed ({}: {})".format('step4_high_gamma_estimation', type(e), e))
# Remove the testing nwb file
os.remove(self.test_name)
def tearDown(self):
# If there wasn't an error, these files will have been removed already
try:
os.remove(self.test_name)
except FileNotFoundError as e:
pass
try:
os.remove(self.copy_name)
except FileNotFoundError as e:
pass
def step1_make_new_nwb(self):
cp_objs = {
'institution': True,
'lab': True,
'session': True,
'devices': True,
'electrode_groups': True,
'electrodes': True,
'intervals': True,
'stimulus': True,
'acquisition': 'default',
'ecephys': 'default'
}
make_new_nwb(self.processed_name, self.copy_name, cp_objs=cp_objs)
with NWBHDF5IO(self.processed_name, 'r') as io1:
nwbfile_in = io1.read()
with NWBHDF5IO(self.copy_name, 'r') as io2:
new_nwbfile_in = io2.read()
# Check that they have the same elements (symmetric set difference is empty)
assert not ((set(cp_objs.keys()) & set(new_nwbfile_in.fields.keys())) ^ \
(set(cp_objs.keys()) & set(nwbfile_in.fields.keys())))
# (May want to add deeper checks here)
os.remove(self.copy_name)
def step2_preprocess_raw_data(self):
# Make sure there is no existing preprocessed data in the test file
with NWBHDF5IO(self.test_name, 'r') as io:
nwbfile_test = io.read()
assert not(nwbfile_test.processing)
# Run preprocessing
preprocess_raw_data(self.test_name, self.config)
# Check that it matches the processed file
with NWBHDF5IO(self.test_name, 'r') as io:
nwbfile_test = io.read()
lfp_data = nwbfile_test.processing['ecephys'].data_interfaces['LFP'].electrical_series['preprocessed'].data[:]
with NWBHDF5IO(self.processed_name, 'r') as io:
nwbfile_correct = io.read()
lfp_data_expected = nwbfile_correct.processing['ecephys'].data_interfaces['LFP'].electrical_series['preprocessed'].data[:]
np.testing.assert_almost_equal(lfp_data, lfp_data_expected)
def step3_spectral_decomposition(self):
# Make sure there is no Decomposition data in the test file
with NWBHDF5IO(self.test_name, 'r') as io:
nwbfile_test = io.read()
assert 'DecompositionSeries' not in nwbfile_test.processing['ecephys'].data_interfaces
# Run decomposition
spectral_decomposition(self.test_name, self.bands_vals)
# Check that it matches the processed file
with NWBHDF5IO(self.test_name, 'r') as io:
nwbfile_test = io.read()
decomposition_data = nwbfile_test.processing['ecephys'].data_interfaces['DecompositionSeries'].data[:]
with NWBHDF5IO(self.processed_name, 'r') as io:
nwbfile_correct = io.read()
decomposition_data_expected = nwbfile_correct.processing['ecephys'].data_interfaces['DecompositionSeries'].data[:]
np.testing.assert_almost_equal(decomposition_data, decomposition_data_expected)
def step4_high_gamma_estimation(self):
# Make sure there is no High Gamma data in the test file
with NWBHDF5IO(self.test_name, 'r') as io:
nwbfile_test = io.read()
assert 'high_gamma' not in nwbfile_test.processing['ecephys'].data_interfaces
# Run high gamma
high_gamma_estimation(self.test_name, self.bands_vals)
# Check that it matches the processed file
with NWBHDF5IO(self.test_name, 'r') as io:
nwbfile_test = io.read()
high_gamma_data = nwbfile_test.processing['ecephys'].data_interfaces['high_gamma'].data[:]
with NWBHDF5IO(self.processed_name, 'r') as io:
nwbfile_correct = io.read()
high_gamma_data_expected = nwbfile_correct.processing['ecephys'].data_interfaces['high_gamma'].data[:]
np.testing.assert_almost_equal(high_gamma_data, high_gamma_data_expected)
```
|
{
"source": "JessieSalas/narrative",
"score": 3
}
|
#### File: JessieSalas/narrative/parser.py
```python
from nltk.tag.stanford import StanfordNERTagger
import operator
from pprint import pprint
from nltk.tokenize import word_tokenize
import ner
st = ner.SocketNER(host='localhost', port=8080)
def find_ngrams(input_list, n):
return zip(*[input_list[i:] for i in range(n)])
#st = StanfordNERTagger('/Users/Owner/stanford-ner-2014-06-16/classifiers/english.all.3class.distsim.crf.ser.gz','/Users/Owner/stanford-ner-2014-06-16/stanford-ner.jar')
book_path = 'texts/1984.txt'
text = " ".join( [line.strip() for line in open('texts/1984.txt', 'r')] )
sentence = ""
#splits = word_tokenize(jungle)
#make new list to keep ngrams generated
#tagged = splits
#tagged.extend([" ".join(ng) for ng in find_ngrams(splits,2)])
#tagged.extend([" ".join(ng) for ng in find_ngrams(splits,3)])
#tagged.extend([ " ".join(ng) for ng in find_ngrams(splits,4) ])
def combineDicts(d1,d2):
"""
Combine two dictionaries of LISTS into a single dictionary of SETS
returns a single new dictionary
"""
combination = {}
for k in d1.keys():
if k in combination:
entity_list = d1[k]
for entity in entity_list:
combination[k].add(entity)
else:
combination[k] = set()
for entity in d1[k]:
combination[k].add(entity)
for k in d2.keys():
if k in combination:
entity_list = d2[k]
for entity in entity_list:
combination[k].add(entity)
else:
combination[k] = set()
for entity in d2[k]:
combination[k].add(entity)
return combination
#n is the size of the chunks
n = 2500
l = text
chunks = [l[i:i+n] for i in range(0, len(l), n)]
"""
first = chunks[1]
second = chunks[2]
ta1 = st.get_entities(first)
print (ta1)
ta2 = st.get_entities(second)
print (ta2)
print('---')
print (combineDicts(ta1,ta2))
exit()
"""
all_tags={}
for chunk in chunks:
tagged = st.get_entities(chunk)
all_tags = combineDicts(tagged,all_tags)
#pprint(all_tags)
#Now we have all the Entities that occur in the book.
#Next we will count the frequency of those entities
people = all_tags['PERSON']
"""
entity_count = {}
for entity in people:
count = text.count(entity)
entity_count[entity] = count
"""
all_entity_count = {}
for tag in all_tags:
entity_tag = all_tags[tag]
entity_count = {}
for entity in entity_tag:
count = text.count(entity)
entity_count[entity] = count
sorted_entity = [a for a in reversed(sorted(entity_count.items(), key=operator.itemgetter(1)))]
all_entity_count[tag] = sorted_entity
pprint(all_entity_count)
exit()
#Create a dictionary of tags
tag_dict = {}
for token in all_tags:
word = token[0]
tag = token[1]
if tag in tag_dict:
if word in tag_dict[tag]:
tag_dict[tag][word] += 1
else:
tag_dict[tag][word] = 1
else:
#1st level was tag category
#Dictionary of Dictionaries: 2nd level is invididual tag
#This is so we can count the number of individual tags
#tag_dict[tag]={token[0]: 1}
#This is where we initialize a new dictionary
tag_dict[tag] = {word:1}
#print(tag_dict.keys())
person_dict = tag_dict['R']
sorted_persons = [a for a in reversed(sorted(person_dict.items(), key=operator.itemgetter(1)))]
pprint(sorted_persons)
exit()
#pprint(tag_dict['ORGANIZATION'])
sorted_persons = [a for a in reversed(sorted(person_dict.items(), key=operator.itemgetter(1)))]
pprint(sorted_persons)
##pprint(tag_dict.keys())
```
|
{
"source": "jessiesrr/evo-epithelium",
"score": 3
}
|
#### File: data/vary_MU_data/plot.py
```python
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
sns.set_style('white')
sns.set_palette('colorblind')
def formatting(xlabel,ylabel,large=False,legend=False):
if large: labelsize,ticksize = 26,18
else: labelsize,ticksize = 16,12
plt.xlabel(xlabel,size=labelsize,labelpad=10)
plt.ylabel(ylabel,size=labelsize,labelpad=10)
plt.xticks(size=ticksize)
plt.yticks(size=ticksize)
plt.tight_layout()
if legend: plt.legend(loc='best',fontsize=10,frameon=False)
def load_data(data_type,MU_vals,start_index=1):
return {MU:np.array([np.loadtxt('MU%.1f/%s_%d'%(MU,data_type,i))[start_index:] for i in range(3)]) for MU in MU_vals}
MU_vals = -np.array((0.1,1,10,25,50,100,250))
timestep = 1.0
def plot_force(MU_vals):
force_data = load_data('force',MU_vals)
t_vals = [timestep*i for i in range(len(force_data[MU_vals[0]][0]))]
fig = plt.figure()
for MU in MU_vals:
plt.plot(t_vals[::10],np.mean(force_data[MU],axis=0)[::10],label=r'\mu = %d'%MU)
plt.legend(loc='best')
plt.show()
def plot_neigh_distr(MU_vals):
neigh_data = load_data('neigh_distr',MU_vals,int(10/timestep))
neigh_data = {MU:np.mean(data.reshape(data.shape[0]*data.shape[1],data.shape[2]),axis=0) for MU, data in neigh_data.iteritems()}
min_,max_ = 4,9
fig, ax = plt.subplots()
index = np.arange(max_+1-min_)
bar_width=0.7/len(MU_vals)
rects = [ax.bar(index+bar_width*(i-1),neigh_data[MU][min_:max_+1]/100., bar_width,label=r'$\mu=%.1f$'%-MU) for i,MU in enumerate(MU_vals)]
plt.legend(loc='best',frameon=False)
ax.set_xticklabels(np.arange(min_-1,max_+1))
formatting('Polygon side #','Frequency')
plt.savefig('side_distr.pdf')
# plot_force(MU_vals)
plot_neigh_distr(MU_vals)
```
#### File: evo-epithelium/graph_pd/cluster_stats.py
```python
import numpy as np
import multiprocessing as mp
from functools import partial
import os
import sys
from pd_graph_lib import run_death_birth_simulation_neutral_clones
def read_neighbour_list(fname):
neighbours = []
with open(fname,'r') as f:
for line in f:
neighbours.append(np.fromstring(line,dtype=int,sep=' '))
return neighbours
def calc_interactions(neighbours,cell_types,mutant_index,n):
"""treats all cells with ancestor 'mutant_index' as cooperators
returns:
n (int): size of clone
I_CC/I_CD (ints): number of cooperator-cooperator/defector interactions in population
W_CC/W_CD (floats): number of cooperator-cooperator/defector interactions in pop. weighted by neighbour number
"""
types = cell_types==mutant_index
I_CC,I_CD,W_CC,W_CD,N_D = 0,0,0.,0.,0
for ctype,cell_neighbours in zip(types,neighbours):
if ctype:
Cneigh,neigh = float(sum(types[cell_neighbours])),float(len(cell_neighbours))
I_CC += Cneigh
I_CD += neigh - Cneigh
W_CC += Cneigh/neigh
W_CD += (neigh-Cneigh)/neigh
return [n,I_CC,I_CD,W_CC,W_CD]
def run_sim(i):
"""run a single simulation and save interaction data for each clone"""
rand = np.random.RandomState()
history = run_death_birth_simulation_neutral_clones(neighbours,timend,timestep,rand)
data = [calc_interactions(neighbours,cell_types,mutant_index,n)
for cell_types in history
for mutant_index,n in enumerate(np.bincount(cell_types)) if n>0]
np.savetxt('%s/data_%d'%(outdir,i),data,fmt=('%4d','%4d','%4d','%4.6f','%4.6f'))
nlist_file = 'hex_graph' #file from which to read graph structure
neighbours = read_neighbour_list(nlist_file)
neighbours = [np.array(n,dtype=int) for n in neighbours]
N = len(neighbours) #population size
timend,timestep = 100000.,20. #length of simulation (hours), timesteps at which to calc interaction data
sim_runs = int(sys.argv[1]) # number of sims to run taken as command line arg
outdir = 'interaction_data'
if not os.path.exists(outdir): # if the outdir doesn't exist create it
os.makedirs(outdir)
cpunum=mp.cpu_count()
pool = mp.Pool(processes=cpunum-1,maxtasksperchild=1000)
pool.map(run_sim,range(sim_runs))
pool.close()
pool.join()
```
#### File: evo-epithelium/graph_pd/run_parallel_migration1.py
```python
import numpy as np
import multiprocessing as mp
from functools import partial
import os
import sys
from pd_graph_lib import run_death_birth_simulation_til_fixation, prisoners_dilemma_averaged, prisoners_dilemma_accumulated
nlist_file = 'vt_graph_6' #import neighbour list for each node in graph. possible files vt_graph_X or hex_graph
migration_strengths = [0.0,0.1,0.2,0.5,1.0] #values of m to calc fixprobs for where m=prob. of a migration at each timestep
c,DELTA = 1.0,0.025
b_vals = np.arange(2,12) #values of b to run sim for
runs=int(1e5) #number of simulations from which to calc fixprob
def read_neighbour_list(fname):
"""read in neighbours from file"""
neighbours = []
with open(fname,'r') as f:
for line in f:
neighbours.append(np.fromstring(line,dtype=int,sep=' '))
return neighbours
def run_parallel(b,m,i):
"""run a single simulation with death-birth update until fixation returning +1 (mutants fixed), 0 (mutants died out), or -1 (incomplete).
if incomplete saves history"""
if i%int(1e3)==0: print 'b = %.1f; %d runs complete' %(b,i)
rand = np.random.RandomState()
fix,history = run_death_birth_simulation_til_fixation(neighbours,DELTA,game,(b,c),timend,timestep,rand,return_fix=True,migration_strength=m)
if fix == -1: np.savetxt('%s/incomplete_b%d_%d'%(outdir,b,i),[sum(types) for types in history])
return fix
game = prisoners_dilemma_averaged
outdir = 'EGTpd_av_db/migration/%s'%(nlist_file)
if not os.path.exists(outdir): # if the outdir doesn't exist create it
os.makedirs(outdir)
neighbours = read_neighbour_list(nlist_file)
neighbours = [np.array(n,dtype=int) for n in neighbours]
N = len(neighbours)
timend,timestep = 100000.,1.0
cpunum=mp.cpu_count()
pool = mp.Pool(processes=cpunum,maxtasksperchild=1000)
for m in migration_strengths:
outfile = '%s/m%.1f'%(outdir,m)
fix_results = open(outfile,'w',0)
fix_results.write('#b fixed lost \n')
for b in b_vals:
fixation = np.array([f for f in pool.imap(partial(run_parallel,b,m),range(int(runs)))])
fixed = len(np.where(fixation==1)[0])
nofixed = len(np.where(fixation==0)[0])
incomplete = len(np.where(fixation==-1)[0])
fix_results.write('%.1f %d %d %d\n'%(b,fixed,nofixed,incomplete))
fix_results.close()
```
#### File: VTdyn/libs/data.py
```python
import os
import numpy as np
#library of functions for saving data from a history object (list of tissues)
def save_mean_area(history,outdir,index=0):
"""saves mean area of cells in each tissue"""
if not os.path.exists(outdir): # if the folder doesn't exist create it
os.makedirs(outdir)
filename = '%s/area_mean_%d'%(outdir,index)
np.savetxt(filename,[np.mean(tissue.mesh.areas) for tissue in history])
def save_areas(history,outdir,index=0):
"""saves all areas of cells in each tissue"""
if not os.path.exists(outdir): # if the folder doesn't exist create it
os.makedirs(outdir)
filename = '%s/areas_%d'%(outdir,index)
wfile = open(filename,'w')
for tissue in history:
for area in tissue.mesh.areas:
wfile.write('%.3e '%area)
wfile.write('\n')
def save_force(history,outdir,index=0):
"""saves mean magnitude of force on cells in each tissue"""
if not os.path.exists(outdir): # if the folder doesn't exist create it
os.makedirs(outdir)
wfile = open('%s/%s_%d'%(outdir,'force',index),'w')
for tissue in history:
wfile.write('%.3e \n'%np.mean(np.sqrt((tissue.Force(tissue)**2).sum(axis=1))))
wfile.close()
def save_neighbour_distr(history,outdir,index=0):
"""save neighbour distributions in each tissue"""
if not os.path.exists(outdir): # if the folder doesn't exist create it
os.makedirs(outdir)
wfilename = '%s/%s_%d'%(outdir,'neigh_distr',index)
np.savetxt(wfilename,[np.bincount([len(tissue.mesh.neighbours[i]) for i in range(len(tissue))],minlength=18) for tissue in history],fmt=(['%d']*18))
def save_N_cell(history,outdir,index=0):
"""save number of cells in each tissue"""
if not os.path.exists(outdir): # if the folder doesn't exist create it
os.makedirs(outdir)
wfilename = '%s/%s_%d'%(outdir,'N_cell',index)
np.savetxt(wfilename,[len(tissue) for tissue in history],fmt=('%d'))
def save_N_mutant(history,outdir,index=0):
"""saves number of mutants in each tissue given by 'mutant' property"""
if not os.path.exists(outdir): # if the folder doesn't exist create it
os.makedirs(outdir)
wfilename = '%s/%s_%d'%(outdir,'N_mutant',index)
np.savetxt(wfilename,[sum(tissue.properties['mutant']) for tissue in history],fmt=('%d'))
def save_N_mutant_type(history,outdir,index=0):
"""saves number of mutants in each tissue given by 'type' property"""
if not os.path.exists(outdir): # if the folder doesn't exist create it
os.makedirs(outdir)
wfilename = '%s/%s_%d'%(outdir,'N_mutant',index)
np.savetxt(wfilename,[sum(tissue.properties['type']) for tissue in history],fmt=('%d'))
def get_cell_history(history,cell_id):
"""generate a history for a given cell id with area at each timestep, age at each timestep and
fate (reproduction=1 or death=0)"""
cell_history = {'area':[],'age':[],'fate':None}
for tissue in history:
if cell_id not in tissue.cell_ids and len(cell_history['area']) > 0:
if cell_id in tissue.mother: cell_history['fate'] = 1
else: cell_history['fate'] = 0
break
elif cell_id in tissue.cell_ids:
mesh_id = np.where(tissue.cell_ids == cell_id)[0][0]
cell_history['area'].append(tissue.mesh.areas[mesh_id])
cell_history['age'].append(tissue.age[mesh_id])
return cell_history
def get_cell_histories(history,start=0):
"""generate history for all cells (see above get_cell_history)"""
return [get_cell_history(history[start:],i) for i in range(max(history[-1].cell_ids)) if len(get_cell_history(history[start:],i)['age'])>0]
def save_age_of_death(history,outdir,index=0):
"""save cell lifetimes for each cell in history"""
if not os.path.exists(outdir): # if the folder doesn't exist create it
os.makedirs(outdir)
cell_histories = np.array(get_cell_histories(history))
has_fate = np.array([h['fate'] is not None for h in cell_histories])
cell_histories = cell_histories[has_fate]
fates = np.array([h['fate'] for h in cell_histories],dtype=bool)
final_age_d = [cell['age'][-1] for cell in cell_histories[fates]]
final_age_a = [cell['age'][-1] for cell in cell_histories[~fates]]
np.savetxt('%s/division_age_%d'%(outdir,index),final_age_d)
np.savetxt('%s/apoptosis_age_%d'%(outdir,index),final_age_a)
def save_ages(history,outdir,index=0):
"""saves all cell ages for each tissue in history"""
if not os.path.exists(outdir): # if the folder doesn't exist create it
os.makedirs(outdir)
filename = '%s/ages_%d'%(outdir,index)
wfile = open(filename,'w')
for tissue in history:
for age in tissue.age:
wfile.write('%.3e '%age)
wfile.write('\n')
def save_mean_age(history,outdir,index=0):
"""save mean age of cells for each tissue in history"""
if not os.path.exists(outdir): # if the folder doesn't exist create it
os.makedirs(outdir)
filename = '%s/age_mean_%d'%(outdir,index)
np.savetxt(filename,[np.mean(tissue.age) for tissue in history])
def save_all(history,outdir,index=0):
save_N_cell(history,outdir,index)
save_N_mutant(history,outdir,index)
save_ages(history,outdir,index)
save_neighbour_distr(history,outdir,index)
save_force(history,outdir,index)
try: save_areas(history,outdir,index)
except: pass
save_age_of_death(history,outdir,index)
```
|
{
"source": "jessiesrr/VTdyn",
"score": 2
}
|
#### File: VTdyn/libs/pd_lib_neutral.py
```python
import os
import sys
import numpy as np
import itertools
import structure
from structure.global_constants import T_D,dt,ETA,MU
from structure.cell import Tissue, BasicSpringForceNoGrowth
import structure.initialisation as init
def print_progress(step,N_steps):
sys.stdout.write("\r %.2f %%"%(step*100./N_steps))
sys.stdout.flush()
def run(simulation,N_step,skip):
"""run a given simulation for N_step iterations
returns list of tissue objects at intervals given by skip"""
return [tissue.copy() for tissue in itertools.islice(simulation,0,N_step,skip)]
def run_generator(simulation,N_step,skip):
"""generator for running a given simulation for N_step iterations
returns generator for of tissue objects at intervals given by skip"""
return itertools.islice(simulation,0,N_step,skip)
def run_return_events(simulation,N_step):
return [tissue.copy() for tissue in itertools.islice(simulation,N_step) if tissue is not None]
def run_return_final_tissue(simulation,N_step):
return next(itertools.islice(simulation,N_step,None))
def run_til_fix(simulation,N_step,skip,include_fixed=True):
return [tissue.copy() for tissue in generate_til_fix(simulation,N_step,skip,include_fixed=include_fixed)]
def fixed(tissue):
try:
return (1 not in tissue.properties['type'] or 0 not in tissue.properties['type'])
except KeyError:
return np.all(tissue.properties['ancestor']==tissue.properties['ancestor'][0])
def generate_til_fix(simulation,N_step,skip,include_fixed=True):
for tissue in itertools.islice(simulation,0,N_step,skip):
if not fixed(tissue):
yield tissue
else:
if include_fixed:
yield tissue
break
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#------------------------------------------ SIMULATION ROUTINES ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def simulation_no_division(tissue,dt,N_steps,rand):
"""run tissue simulation with no death or division"""
yield tissue
step = 1.
while True:
N= len(tissue)
mesh = tissue.mesh
step += 1
mesh.move_all(tissue.dr(dt))
tissue.update(dt)
yield tissue
def simulation(tissue,dt,N_steps,stepsize,rand,eta=ETA,progress_on=False):
yield tissue
step = 1.
while True:
N= len(tissue)
properties = tissue.properties
mesh = tissue.mesh
mesh.move_all(tissue.dr(dt,eta))
if rand.rand() < (1./T_D)*N*dt:
mother = rand.randint(N)
tissue.add_daughter_cells(mother,rand)
tissue.remove(mother,True)
tissue.remove(rand.randint(N)) #kill random cell
tissue.update(dt)
if progress_on: print_progress(step,N_steps)
step += 1
yield tissue
def simulation_ancestor_tracking(tissue,dt,N_steps,stepsize,rand,eta=ETA,progress_on=False):
"""simulation loop for neutral process tracking ancestor ids"""
tissue.properties['ancestor']=np.arange(len(tissue))
return simulation(tissue,dt,N_steps,stepsize,rand,eta=eta,progress_on=progress_on)
def simulation_mutant_tracking(tissue,dt,N_steps,stepsize,rand,eta=ETA,progress_on=False,mutant_number=1,mutant_type=1):
"""simulation loop for neutral process tracking mutant ids"""
tissue.properties['type'] = np.full(len(tissue),1-mutant_type,dtype=int)
tissue.properties['type'][rand.choice(len(tissue),size=mutant_number,replace=False)]=mutant_type
return simulation(tissue,dt,N_steps,stepsize,rand,eta=eta,progress_on=progress_on)
def initialise_tissue(N,dt,timend,timestep,rand,mu=MU,save_areas=False,save_cell_histories=False):
"""initialise tissue and run simulation until timend returning final state"""
tissue = init.init_tissue_torus(N,N,0.01,BasicSpringForceNoGrowth(mu),rand,save_areas=save_areas,save_cell_histories=save_cell_histories)
if timend !=0:
tissue = run_return_final_tissue(simulation(tissue,dt,timend/dt,timestep/dt,rand),timend/dt)
tissue.reset(reset_age=True)
return tissue
def run_simulation(simulation,N,timestep,timend,rand,init_time=None,mu=MU,eta=ETA,dt=dt,til_fix=True,generator=False,save_areas=False,
tissue=None,save_cell_histories=False,progress_on=False,**kwargs):
"""initialise tissue with NxN cells and run given simulation with given game and constants.
starts with single cooperator
ends at time=timend OR if til_fix=True when population all cooperators (type=1) or defectors (2)
returns history: list of tissue objects at time intervals given by timestep
"""
if tissue is None:
tissue = initialise_tissue(N,dt,init_time,timestep,rand,mu=mu,save_areas=save_areas,save_cell_histories=save_cell_histories)
if til_fix:
include_fix = not (til_fix=='exclude_final')
if generator:
history = generate_til_fix(simulation(tissue,dt,timend/dt,timestep/dt,rand,eta=eta,progress_on=progress_on,**kwargs),timend/dt,timestep/dt,include_fix)
else:
history = run_til_fix(simulation(tissue,dt,timend/dt,timestep/dt,rand,eta=eta,progress_on=progress_on,**kwargs),timend/dt,timestep/dt)
else:
history = run(simulation(tissue,dt,timend/dt,timestep/dt,rand,eta=eta,progress_on=progress_on,**kwargs),timend/dt,timestep/dt)
return history
```
#### File: VTdyn/libs/pd_size_dep.py
```python
import sys
import numpy as np
import itertools
import structure
from structure.global_constants import *
from structure.cell import Tissue, BasicSpringForceNoGrowth, MutantSpringForce
import structure.initialisation as init
def print_progress(step,N_steps):
sys.stdout.write("\r %.2f %%"%(step*100/N_steps))
sys.stdout.flush()
def simulation_no_division(tissue,dt,N_steps,rand):
step = 0.
while True:
N= len(tissue)
mesh = tissue.mesh
step += 1
mesh.move_all(tissue.dr(dt))
tissue.update(dt)
print_progress(step,N_steps)
yield tissue
def run(tissue_original,simulation,N_step,skip):
return [tissue_original.copy()]+[tissue.copy() for tissue in itertools.islice(simulation,skip-1,N_step,skip)]
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#------------------------------------------SIZE-DEPENDENCE-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def simulation_size_dependent(tissue,dt,N_steps,stepsize,rand):
step = 0.
while True:
if len(np.where(tissue.properties['ancestor']!=tissue.properties['ancestor'][0])[0])==1 and step%stepsize==1: break
N= len(tissue)
mesh = tissue.mesh
step += 1
mesh.move_all(tissue.dr(dt))
ready = np.where(tissue.mesh.areas>=DIV_AREA)[0]
for mother in ready:
tissue.add_daughter_cells(mother,rand)
tissue.properties['ancestor'] = np.append(tissue.properties['ancestor'],[tissue.properties['ancestor'][mother]]*2)
tissue.remove(ready)
if rand.rand() < (1./T_D)*N*dt:
tissue.remove(rand.randint(N))
tissue.update(dt)
print_progress(step,N_steps)
yield tissue
def run_simulation_size_dependent(N,timestep,timend,rand):
ages = rand.rand(N*N)*(T_G1+T_other)
multiplier = RHO+GROWTH_RATE*0.5*(T_G1+T_other)
force = BasicSpringForce()
tissue = init.init_tissue_torus_with_multiplier(N,N,0.01,force,rand,multiplier,ages,save_areas=True)
tissue.properties['ancestor'] = np.arange(N*N)
history = run(tissue,simulation_size_dependent(tissue,dt,timend/dt,timestep/dt,rand=rand),timend/dt,timestep/dt)
return history
```
#### File: jessiesrr/VTdyn/run_CIP_parallel_simple.py
```python
import numpy as np
from multiprocessing import Pool,cpu_count
import libs.contact_inhibition_lib as lib #library for simulation routines
import libs.data as data
from structure.global_constants import *
import structure.initialisation as init
from structure.cell import Tissue, BasicSpringForceNoGrowth
import sys,os
import itertools
threshold_area_fraction = float(sys.argv[1])
death_to_birth_rate_ratio = float(sys.argv[2])
domain_size_multiplier = float(sys.argv[3])
DELTA = float(sys.argv[4])
job_id = sys.argv[5]
NUMBER_SIMS = 10000
BATCH_SIZE = 1000
L = 10 # population size N=l*l
TIMEND = 80000. # simulation time (hours)
MAX_POP_SIZE = 1000
TIMESTEP = 96. # time intervals to save simulation history
DEATH_RATE = 0.25/24.
INIT_TIME = 96.
PARENTDIR = "CIP_simple_fix_N100/db%.2f_a%.1f/"%(death_to_birth_rate_ratio,threshold_area_fraction)
if not os.path.exists(PARENTDIR): # if the outdir doesn't exist create it
os.makedirs(PARENTDIR)
game = "simple"
simulation = lib.simulation_contact_inhibition_area_dependent
rates = (DEATH_RATE,DEATH_RATE/death_to_birth_rate_ratio)
with open(PARENTDIR+'info',"w") as f:
f.write('death_rate = %.6f\n'%DEATH_RATE)
f.write('initial pop size = %3d\n'%(L*L))
f.write('domain width = %.1f\n'%(L*domain_size_multiplier))
f.write('quiescent area ratio = %.1f\n'%threshold_area_fraction)
f.write('death to birth rate ratio = %.2f\n'%death_to_birth_rate_ratio)
f.write('timestep = %.1f'%TIMESTEP)
def fixed(history,i):
if 0 not in history[-1].properties['type']:
fix = 1
elif 1 not in history[-1].properties['type']:
fix = 0
else:
fix = -1
data.save_N_mutant(history,PARENTDIR+'/incomplete_delta%.1f'%DELTA,i)
return fix
def run_single_unpack(args):
return run_single(*args)
def run_single(i):
"""run a single voronoi tessellation model simulation"""
sys.stdout.flush()
rand = np.random.RandomState()
history = lib.run_simulation(simulation,L,TIMESTEP,TIMEND,rand,progress_on=False,
init_time=INIT_TIME,til_fix=True,save_areas=True,
return_events=False,save_cell_histories=False,N_limit=MAX_POP_SIZE,DELTA=DELTA,game=game,mutant_num=1,
domain_size_multiplier=domain_size_multiplier,rates=rates,threshold_area_fraction=threshold_area_fraction)
fixation = fixed(history,i)
meanpopsize = np.mean([len(tissue) for tissue in history])
with open(PARENTDIR+'s%.2f_%s_time.txt'%(DELTA,job_id),'a') as wfile:
wfile.write('%5d %5d %d %d\n'%(i, history[-1].time, fixation, meanpopsize))
return fixation
def run_parallel():
pool = Pool(cpu_count()-1,maxtasksperchild=1000)
# fixation = np.array(map(run_single,range(NUMBER_SIMS)))
fixation = np.array([f for f in pool.imap(run_single,range(NUMBER_SIMS))])
with open(PARENTDIR+'s%.2f_%s.txt'%(DELTA,job_id),'a') as wfile:
if NUMBER_SIMS%BATCH_SIZE != 0:
batch_size=1
else:
batch_size = BATCH_SIZE
fixation = fixation.reshape((NUMBER_SIMS/batch_size,batch_size))
for fixation_batch in fixation:
fixed = len(np.where(fixation_batch==1)[0])
lost = len(np.where(fixation_batch==0)[0])
incomplete = len(np.where(fixation_batch==-1)[0])
wfile.write('%d %d %d\n'%(fixed,lost,incomplete))
run_parallel()
```
#### File: run_files/cip_area_threshold/prob_CIP.py
```python
import numpy as np
import pandas as pd
import igraph
def number_proliferating(tissue,alpha):
Zp = sum(tissue.mesh.areas>=np.sqrt(3)/2*alpha)
return Zp
def get_number_proliferating_neighbours(tissue,neighbours,alpha):
return sum(tissue.mesh.areas[neighbours] >= np.sqrt(3)/2*alpha)
def number_proliferating_neighbours(tissue,alpha):
proliferating = np.where(tissue.mesh.areas>=np.sqrt(3)/2*alpha)[0]
if len(proliferating) == 0:
return np.array([0])
return np.array([get_number_proliferating_neighbours(tissue,tissue.mesh.neighbours[i],alpha) for i in proliferating])
def number_proliferating_neighbours_distribution(history,alpha):
data = [np.bincount(number_proliferating_neighbours(tissue,alpha)) for tissue in history]
Zp = [number_proliferating(tissue,alpha) for tissue in history]
maxlen = max(len(nnp) for nnp in data)
data = [np.pad(nnp,(0,maxlen-len(nnp)),'constant') for nnp in data]
df = pd.DataFrame([{'np_{:d}'.format(i):f for i,f in enumerate(nnp)} for nnp in data])
df.insert(0,'Zp',Zp)
df.insert(1,'clusters',number_clusters(history,alpha))
return df
def mean_number_proliferating_neighbours_df(histories,params):
data = [pd.DataFrame([{'alpha':alpha,'db':db,'Zp':number_proliferating(tissue,alpha),'nn_p':np.mean(number_proliferating_neighbours(tissue,alpha))} for tissue in history])
for history,(alpha,db) in zip(histories,params)]
return pd.concat(data)
def number_proliferating_neighbours_distribution_df(histories,params):
return pd.concat([number_proliferating_neighbours_distribution(history,alpha).assign(db=db,alpha=alpha)
for history,(alpha,db) in zip(histories,params)])
def create_pgraph(tissue,alpha):
proliferating = np.where(tissue.mesh.areas>=np.sqrt(3)/2*alpha)[0]
edges = list(set([tuple(sorted([i,np.where(proliferating==neighbour)[0][0]] ))
for i,cell_id in enumerate(proliferating)
for neighbour in tissue.mesh.neighbours[cell_id]
if neighbour in proliferating] ) )
return igraph.Graph(n=len(proliferating),edges=edges)
def number_clusters(history,alpha):
return [len(create_pgraph(tissue,alpha).clusters()) for tissue in history]
def cluster_df(histories,params):
data = [pd.DataFrame([{'alpha':alpha,'db':db,'Zp':number_proliferating(tissue,alpha),'nclusters':len(create_pgraph(tissue,alpha).clusters())}
for tissue in history])
for history,(alpha,db) in zip(histories,params)]
return pd.concat(data)
```
#### File: cip_area_threshold/tissue_data/get_data_vary_eta.py
```python
from multiprocessing import Pool
import numpy as np
import libs.pd_lib_neutral as lib #library for simulation routines
import libs.data as data
# import libs.plot as vplt #plotting library
from structure.global_constants import *
import structure.initialisation as init
from structure.cell import Tissue, BasicSpringForceNoGrowth
import pandas as pd
import os
"""run a single voronoi tessellation model simulation"""
outdir = 'VTcoupled_force_area_data_vary_eta_and_dt'
dt = 0.005
if not os.path.exists(outdir): # if the outdir doesn't exist create it
os.makedirs(outdir)
def run_sim(eta):
dt=min(0.005,0.005*eta)
print(eta,dt)
rand = np.random.RandomState()
history = lib.run_simulation_vary_eta(simulation,l,timestep,timend,rand,eta,dt,til_fix=False,save_areas=True,progress_on=True)
return (data.mean_force(history,False),data.mean_area(history,False))
l = 10 # population size N=l*l
timend = 200 # simulation time (hours)
timestep = 1. # time intervals to save simulation history
rand = np.random.RandomState()
simulation = lib.simulation_ancestor_tracking #simulation routine imported from lib
pool = Pool(maxtasksperchild=1000) # creating a pool of workers to run simulations in parallel
eta_vals = np.array((0.2,0.4,0.6,0.8,1.0),dtype=float)
hdata = {"eta":np.repeat(eta_vals,int(timend/timestep+1)),"time":np.tile(np.linspace(0,timend,timend/timestep+1),len(eta_vals))}
fa_data = np.array([fa_d for fa_d in pool.imap(run_sim,eta_vals)])
hdata["force"] = (fa_data[:,0,:]).flatten()
# hdata["area"] = (fa_data[:,1,:]).flatten()
df = pd.DataFrame(data=hdata)
df.to_csv(outdir+"/data")
```
#### File: cip_area_threshold/tissue_data/run_CIP_parallel_read_params.py
```python
import numpy as np
# from pathos.multiprocessing import cpu_count
# from pathos.pools import ParallelPool as Pool
from multiprocessing import Pool,cpu_count
import libs.contact_inhibition_lib as lib #library for simulation routines
import libs.data as data
from structure.global_constants import *
import structure.initialisation as init
from structure.cell import Tissue, BasicSpringForceNoGrowth
import sys,os
import itertools
L = 10 # population size N=l*l
TIMEND = 1824. # simulation time (hours)
MAX_POP_SIZE = 500
TIMESTEP = 24. # time intervals to save simulation history
DEATH_RATE = 0.25/24
INIT_TIME = 96.
# DATA_SAVE_FIELDS = ["pop_size","cell_histories","cycle_phases",,
# "cell_seperation"]
DATA_SAVE_FIELDS = ["pop_size","areas","ages"]
for d in DATA_SAVE_FIELDS:
if d not in data.FIELDS_DICT:
raise ValueError("not all data types are correct")
PARENTDIR = "CIP_data_area_threshold/N100_TD_0.25pd2"
with open(PARENTDIR+'/info',"w") as f:
f.write('death_rate = %.3f\n'%DEATH_RATE)
f.write('initial pop size = %3d'%(L*L))
# f.write('domain width = %3.1g'%(L*L*S0) )
simulation = lib.simulation_contact_inhibition_area_dependent #simulation routine imported from lib
def run_single_unpack(args):
return run_single(*args)
def run_single(i,threshold_area_fraction,death_to_birth_rate_ratio,domain_size_multiplier,return_history=False):
"""run a single voronoi tessellation model simulation"""
rates = (DEATH_RATE,DEATH_RATE/death_to_birth_rate_ratio)
rand = np.random.RandomState()
history = lib.run_simulation(simulation,L,TIMESTEP,TIMEND,rand,progress_on=False,
init_time=INIT_TIME,til_fix=False,save_areas=True,
return_events=False,save_cell_histories=True,N_limit=MAX_POP_SIZE,
domain_size_multiplier=domain_size_multiplier,rates=rates,threshold_area_fraction=threshold_area_fraction)
outdir = PARENTDIR+'/DtoB%.1f_Thresh%.2f'%(death_to_birth_rate_ratio,threshold_area_fraction)
if len(DATA_SAVE_FIELDS) > 0:
data.save_as_json(history,outdir,DATA_SAVE_FIELDS,{"threshold_area_fraction":threshold_area_fraction,
"death_rate":DEATH_RATE,"death_to_birth_rate_ratio":death_to_birth_rate_ratio,
"width":history[0].mesh.geometry.width},i)
def run_parallel(paramfile,repeats):
pool = Pool(cpu_count()-1,maxtasksperchild=1000)
parameters = np.loadtxt(paramfile)
args = [(i,threshold,death_rate,domain_size_multiplier)
for threshold,death_rate,domain_size_multiplier in parameters
for i in range(repeats)]
pool.map(run_single_unpack,args)
paramfile = sys.argv[1]
repeats = int(sys.argv[2])
run_parallel(paramfile,repeats)
```
#### File: cip_area_threshold/tissue_data/run_CIP_relaxation_times.py
```python
import numpy as np
import libs.contact_inhibition_lib as lib #library for simulation routines
import libs.data as data
import libs.plot as vplt #plotting library
from structure.global_constants import *
import structure.initialisation as init
from structure.cell import Tissue, BasicSpringForceNoGrowth
import matplotlib.pyplot as plt
import os
"""run a single voronoi tessellation model simulation"""
OUTDIR = "CIP_cell_division_relaxation_time2/"
l = 10 # population size N=l*l
timend = 30. # simulation time (hours)
timestep = 1.0 # time intervals to save simulation history
rand = np.random.RandomState()
simulation = lib.simulation_contact_inhibition_area_dependent #simulation routine imported from lib
threshold_area_fraction=1.0
DEATH_RATE = 1./12
rates = (DEATH_RATE,DEATH_RATE/0.4) #death_rate,division_rate
domain_size_multiplier=0.980940
eta,mu,dt=1.,-250,0.001
T_m_init=0.1
def get_relaxation_data(T_m_vals,T_m_init,eta,mu,dt,relaxtime):
history = lib.run_simulation(simulation,l,timestep,timend,rand,progress_on=True,
init_time=None,til_fix=False,save_areas=True,cycle_phase=None,eta=eta,mu=mu,dt=dt,T_m=T_m_init,
return_events=False,save_cell_histories=True,domain_size_multiplier=domain_size_multiplier,
rates=rates,threshold_area_fraction=threshold_area_fraction)
tissue = lib.run_return_final_tissue(lib.simulation_no_division(history[-1],dt,200,rand,eta),200)
division_ready = lib.check_area_threshold(tissue.mesh,threshold_area_fraction)
mother = rand.choice(division_ready)
tissue.add_daughter_cells(mother,rand)
tissue.remove(mother,True)
tissue.update(dt)
init_tissues = [tissue.copy() for T_m in T_m_vals]
for T_m,tissue in zip(T_m_vals,init_tissues):
tissue.Force = BasicSpringForceNoGrowth(mu,T_m)
histories = [lib.run(lib.simulation_no_division(tissue,dt,int(relaxtime/dt),rand,eta),int(relaxtime/dt),1) for tissue in init_tissues]
for T_m,history in zip(T_m_vals,histories):
cell1,cell2 = len(history[0])-2,len(history[0])-1
sibling_distance = get_sibling_distance(history,cell1,cell2)
mean_area = np.array([np.mean(tissue.mesh.areas[-2:]) for tissue in history])
time = np.arange(0,relaxtime,dt)
data = np.vstack((time,sibling_distance,mean_area))
try: np.savetxt(OUTDIR+"T_m=%.3f.txt"%T_m,data)
except IOError:
os.makedirs(OUTDIR)
np.savetxt(OUTDIR+"T_m=%.3f.txt"%T_m,data)
def narg(tissue,i,j):
try: return np.where(tissue.mesh.neighbours[i]==j)[0][0]
except IndexError: return np.nan
def get_sibling_distance(history,cell1,cell2):
return np.array([tissue.mesh.distances[cell1][narg(tissue,cell1,cell2)] if narg(tissue,cell1,cell2)<100 else np.nan for tissue in history])
relaxtime = 2.0
T_m_vals=[0.001,0.01,0.1,0.25,0.5,1.0,2.0]
get_relaxation_data(T_m_vals,T_m_init,eta,mu,dt,relaxtime)
```
#### File: run_files/ddalpharun/multirun_neutral.py
```python
from multiprocessing import Process,Pool,Lock #parallel processing
import multiprocessing as mp
from itertools import repeat
import sys
import os
import numpy as np
import libs.run_lib as lib
import libs.data as data
import libs.plot as vplt
rand = np.random.RandomState()
runs_per_batch = 500
batches = 4
l = 10
timend = 10000.
timestep = 2.0
width, height = float(l)*1.5, float(l)*1.5*np.sqrt(3)/2
folder = 'control2'
info = """
size-dependent selection with neutral mutation
maxtime = %.0f
timestep = %.0f
N0 = %d
box height = %.2f
box width = %.2f
""" %(timend,timestep,l*l,width,height)
if not os.path.exists(folder): # if the folder doesn't exist create it
os.makedirs(folder)
with open(folder+'/info.txt',"w",0) as infofile:
infofile.write(info)
def run_parallel(i):
rand=np.random.RandomState()
history = lib.run_simulation_size_dependent_with_neutral_mutants(l,timestep,timend,rand)
if 0 not in history[-1].properties['mutant']:
fix = 1
data.save_N_cell(history,folder+'/fixed',i)
data.save_N_mutant(history,folder+'/fixed',i)
elif 1 not in history[-1].properties['mutant']:
fix = 0
else:
fix = -1
data.save_N_cell(history,folder+'/incomplete',i)
data.save_N_mutant(history,folder+'/incomplete',i)
update_file.write('%d\n'%i)
return fix
fix_results = open(folder+'/fixation.txt','w',0)
for i in range(batches):
text = '\rbatch %d of %d'%(i,batches)
sys.stdout.write(text)
sys.stdout.flush()
update_file = open(folder+'/current_batch','w',0)
cpunum=mp.cpu_count()
pool = Pool(processes=cpunum) # creating a pool with processors equal to the number of processors
fixation = np.array(pool.map(run_parallel,range(i*runs_per_batch,(i+1)*runs_per_batch))) # mapping of all the calls necessary into the calling function
fixed = len(np.where(fixation==1)[0])
nofixed = len(np.where(fixation==0)[0])
incomplete = len(np.where(fixation==-1)[0])
fix_results.write('%d %d %d\n'%(fixed,nofixed,incomplete))
pool.close()
pool.join()
```
#### File: run_files/neutral/neighbour_data_neutral.py
```python
from multiprocessing import Pool #parallel processing
import multiprocessing as mp
import structure
from structure.global_constants import *
from structure.cell import Tissue, BasicSpringForceNoGrowth
import structure.initialisation as init
import sys
import os
import numpy as np
import libs.pd_lib_neutral as lib
import libs.data as data
from functools import partial
import pandas as pd
def distribution_data(history,mutant_id,i,all_types=False):
"""
generates neighbour data for mutants (or all cells if all_types is True)
cells are labelled by their ancestor. all cells with ancestor=mutant_id are type 1, all other cells type 0.
returns list of dicts with keys: tissueid, time, n, k, j [, type]
n = # type 1 cells
k = # neighbours
j = # type 1 neighbours
"""
if all_types:
return [{'tissueid':i,'time':int(tissue.time),'n':sum(tissue.properties['ancestor']==mutant_id),'k':len(cell_neighbours),
'j':sum((tissue.properties['ancestor']==mutant_id)[cell_neighbours]),'type': 1 if tissue.properties['ancestor'][idx]==mutant_id else 0}
for tissue in history if 1<=sum(tissue.properties['ancestor']==mutant_id)<100
for idx,cell_neighbours in enumerate(tissue.mesh.neighbours)]
else:
return [{'tissueid':i,'time':int(tissue.time),'n':sum(tissue.properties['ancestor']==mutant_id),'k':len(cell_neighbours),
'j':sum((tissue.properties['ancestor']==mutant_id)[cell_neighbours])}
for tissue in history if 1<=sum(tissue.properties['ancestor']==mutant_id)<100
for idx,cell_neighbours in enumerate(tissue.mesh.neighbours) if tissue.properties['ancestor'][idx]==mutant_id]
def run_sim(all_types,i):
"""run a single simulation and save neighbour data for mutants (or all cells if all_types is True)"""
rand = np.random.RandomState()
history = lib.run_simulation(simulation,L,TIMESTEP,TIMEND,rand,progress_on=False,
init_time=INIT_TIME,til_fix='exclude_final',save_areas=False)
mutant_id = np.argmax(np.bincount(history[-1].properties['ancestor']))
return distribution_data(history,mutant_id,i,all_types)
L = 10 # population size N = l*l
INIT_TIME = 96. # initial simulation time to equilibrate
TIMEND = 80000. # length of simulation (hours)
TIMESTEP = 12. # time intervals to save simulation history
SIM_RUNS = int(sys.argv[1]) # number of sims to run taken as command line arg
save_all_types = False # set to True to save data for cooperators AND defectors (otherwise just cooperator data saved)
simulation = lib.simulation_ancestor_tracking # tracks clones with common ancestor
outdir = 'coop_neighbour_distribution/'
if not os.path.exists(outdir):
os.makedirs(outdir)
savename = 'batch_tm'
# run simulations in parallel
cpunum=mp.cpu_count()
pool = Pool(processes=cpunum-1,maxtasksperchild=1000)
df = pd.DataFrame(sum(pool.map(partial(run_sim,save_all_types),range(SIM_RUNS)),[]))
pool.close()
pool.join()
df.to_csv(outdir+savename,index=False)
```
#### File: run_files/neutral/run_neutral.py
```python
import numpy as np
import libs.pd_lib_neutral as lib
import libs.data as data
import libs.plot as vplt #plotting library
import structure.initialisation as init
from structure.cell import Tissue, BasicSpringForceNoGrowth
import pandas as pd
"""run a single voronoi tessellation model simulation"""
def distribution_data(history,mutant_id,i,all_types=False):
"""
generates neighbour data for mutants (or all cells if all_types is True)
cells are labelled by their ancestor. all cells with ancestor=mutant_id are type 1, all other cells type 0.
returns list of dicts with keys: tissueid, time, n, k, j [, type]
n = # type 1 cells
k = # neighbours
j = # type 1 neighbours
"""
if all_types:
return [{'tissueid':i,'time':int(tissue.time),'n':sum(tissue.properties['ancestor']==mutant_id),'k':len(cell_neighbours),
'j':sum((tissue.properties['ancestor']==mutant_id)[cell_neighbours]),'type': 1 if tissue.properties['ancestor'][idx]==mutant_id else 0}
for tissue in history if 1<=sum(tissue.properties['ancestor']==mutant_id)<100
for idx,cell_neighbours in enumerate(tissue.mesh.neighbours)]
else:
return [{'tissueid':i,'time':int(tissue.time),'n':sum(tissue.properties['ancestor']==mutant_id),'k':len(cell_neighbours),
'j':sum((tissue.properties['ancestor']==mutant_id)[cell_neighbours])}
for tissue in history if 1<=sum(tissue.properties['ancestor']==mutant_id)<100
for idx,cell_neighbours in enumerate(tissue.mesh.neighbours) if tissue.properties['ancestor'][idx]==mutant_id]
L = 10 # population size N=l*l
timend = 10000 # simulation time (hours)
timestep = 12.0 # time intervals to save simulation history
init_time = 12.
rand = np.random.RandomState()
simulation = lib.simulation_ancestor_tracking # tracks clones with common ancestor
rand = np.random.RandomState()
history = lib.run_simulation(simulation,L,timestep,timend,rand,progress_on=True,
init_time=init_time,til_fix=True,save_areas=False)
```
#### File: run_files/pd_original/multirun_death_birth.py
```python
from multiprocessing import Pool #parallel processing
from itertools import repeat
import sys
from functools import partial
import os
import numpy as np
import libs.data as data
from libs.pd_lib import run_simulation,simulation_death_birth,prisoners_dilemma_averaged,prisoners_dilemma_accumulated
from functools import partial
"""command line arguments
1. str (av or acc): game_str. determines payoff accounting is averaged or accumulated
2, 3. ints: start_batch and end_batch. indices (run batches of 1000 simulations)
4, ... array floats: b_vals. values of b (prisoner's dilemma param) to run simulations for
"""
game_str = sys.argv[1]
if game_str == 'acc': game = prisoners_dilemma_accumulated
elif game_str == 'av': game = prisoners_dilemma_averaged
else: raise ValueError('invalid game string')
start_batch,end_batch = int(sys.argv[2]),int(sys.argv[3])
runs_per_batch = 1000
b_vals = np.array(sys.argv[4:],dtype=float)
c,DELTA = 1.0,0.025 #prisoner dilemma params
l = 10 #population size N=lxl
timend = 10000. #time (hours) after which simulation ends if no fixation
timestep = 12.0 #state saved every 12 hours
rand = np.random.RandomState()
outdir = 'VTpd_%s_db'%(game_str)
if not os.path.exists(outdir): # if the outdir doesn't exist create it
os.makedirs(outdir)
with open(outdir+'/info','w') as f:
f.write('N=%d, c=%.1f, delta=%.3f'%(l*l,c,DELTA))
def run_parallel(b,i):
"""run a single simulation using simulation_death_birth routine indexed by i returning 1 if resulted in mutant fixation and 0 if resident fixation.
If no fixation returns -1 and saves number of mutants at each timestep to file
"""
rand=np.random.RandomState()
history = run_simulation(simulation_death_birth,l,timestep,timend,rand,DELTA,prisoners_dilemma_averaged,(b,c),save_areas=False)
if 0 not in history[-1].properties['type']:
fix = 1
# data.save_N_mutant(history,outdir+'/fixed_b%.1f'%b,i)
elif 1 not in history[-1].properties['type']:
fix = 0
else:
fix = -1
data.save_N_mutant(history,outdir+'/incomplete_b%.1f'%b,i)
return fix
pool = Pool(maxtasksperchild=1000) # creating a pool of workers to run simulations in parallel
# for each b run 1000x(end_batch-start_batch simulations). At the end of each batch of 1000 simulations
# write to file how many are fixed, lost and incomplete
for b in b_vals:
fix_results = open(outdir+'/fix%.2f'%b,'a',0)
for i in range(start_batch,end_batch):
text = '\r running batch %d of %d'%(i+1,end_batch)
sys.stdout.write(text)
sys.stdout.flush()
fixation = np.array([f for f in pool.imap(partial(run_parallel,b),range(i*runs_per_batch,(i+1)*runs_per_batch))]) # mapping of all the calls necessary into the calling function (run parallel)
fixed = len(np.where(fixation==1)[0])
lost = len(np.where(fixation==0)[0])
incomplete = len(np.where(fixation==-1)[0])
fix_results.write('%d %d %d\n'%(fixed,lost,incomplete))
```
|
{
"source": "jessiewang158/YOLO-BG__Sub",
"score": 3
}
|
#### File: jessiewang158/YOLO-BG__Sub/mask.py
```python
import cv2
import numpy as np
def draw_mask(img, image_bg, bbox, labels):
global mask
global masked_img
alpha = 0.95
mask=[]
masked_img_final=[]
#print('bboxt: ' + str(type(bbox)) + '\n')
bbox=np.array(bbox)
#img = np.array(img)
print('bbox: ' + str(bbox) + '\n')
#print('labels: '+str(labels)+'\n')
#print('img: '+str(img.shape)+'\n')
masked_bg = np.full((img.shape[0], img.shape[1]), 0, dtype="uint8")
for i,l in enumerate(labels):
#print('bbox: '+str(bbox[i])+'\n')
crop_obj = img[bbox[i][1]:bbox[i][3], bbox[i][0]:bbox[i][2]]
#cv2.imshow('',crop_obj)
#cv2.waitKey()
crop_obj = cv2.normalize(crop_obj.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)
crop_bg = image_bg[bbox[i][1]:bbox[i][3], bbox[i][0]:bbox[i][2]]
#cv2.imshow('', crop_bg)
#cv2.waitKey()
crop_bg = cv2.normalize(crop_bg.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)
#cv2.imshow('', crop_bg)
#cv2.waitKey()
try:
crop_bg_w = alpha * crop_bg + (1 - alpha) * crop_obj
except:
print('NoneType!')
print('bg:' + str(crop_bg_w) + '\nobj:' + str(crop_obj))
exit()
mask=cv2.cvtColor((abs(crop_obj-crop_bg)*255).astype(np.uint8),cv2.COLOR_BGR2GRAY)
#cv2.imshow('', mask)
#cv2.waitKey()
#Otsu Thresholding
#ret, th = cv2.threshold(mask, 0, 255, cv2.THRESH_OTSU)
#mask = np.invert(th)
# Otsu's thresholding after Gaussian filtering
blur = cv2.GaussianBlur(mask, (5, 5), 0)
#mask = cv2.medianBlur(mask, 5)
ret3, th3 = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
mask=th3
#mask = np.invert(th3)
#print(mask)
#cv2.imshow('', mask)
#cv2.waitKey()
#masked_img = np.full((img.shape[0], img.shape[1]),0,dtype="uint8")
#masked_img = np.zeros((img.shape[0], img.shape[1]), dtype="uint8")
#print('mask shape: '+str(mask.shape)+', img: '+str(masked_img.shape)+'\n')
#print('mask_sum: ' + str(np.sum(mask))+'\n')
#for r,row in enumerate(range(bbox[i][1], min([bbox[i][3],bbox.shape[0]]))):
#for c,col in enumarate(range(bbox[i][0], min([bbox[i][2],bbox.shape[1]]))):
#masked_img[row, col] = mask[r, c]
#masked_img[row, col] = 255
#print("masked_bg.shape",masked_bg.shape)
#print("mask",mask.shape)
masked_bg[bbox[i][1]:bbox[i][3],bbox[i][0]:bbox[i][2]] = mask
masked_img=cv2.cvtColor(masked_bg,cv2.COLOR_GRAY2RGB)
#print('masked_img_sum: ' + str(np.sum(masked_img)) + '\n')
#cv2.imshow('', masked_img)
#cv2.waitKey()
#masked_img_final=np.hstack([masked_img_final,masked_img])
#print('masked_img',masked_img)
#masked_img_final.append(masked_img)
#print('masked_img_final',masked_img_final)
#masked_img_final=sum(masked_img_final)
return masked_img
```
|
{
"source": "jessiewy/ContainerScheduler",
"score": 2
}
|
#### File: ContainerScheduler/scheduler/manager.py
```python
import utils
DEFAULT_SCHEDULER_DRIVER = scheduler.filter_scheduler.FilterScheduler
class SchedulerManager(Object):
def __init__(self, scheduler_driver=None, *args, **kwargs):
if not scheduler_driver:
scheduler_driver = DEFAULT_SCHEDULER_DRIVER
self.driver = utils.import_class(scheduler_driver)(*args, **kwargs)
def select_destinations(self, request_spec, filter_properties):
dests = self.driver.select_destinations(request_spec,
filter_properties)
return dests
```
|
{
"source": "JessieYuW/CrevNet-Traffic4cast",
"score": 2
}
|
#### File: CrevNet-Traffic4cast/data/traffic.py
```python
import os
import random
import numpy as np
import torch
import pickle as pkl
import h5py
import cv2
class TRAFFIC(object):
def __init__(self, train, data_root, seq_len=13,input_len = 10):
self.train = train
self.data_type = "training" if self.train else "test"
self.data_root = data_root
self.dirs = os.listdir("{}".format(self.data_root))
self.seq_len = seq_len
self.city = ['Moscow'] # ['Berlin', 'Istanbul', 'Moscow']
self.seed_set = False
self.file_len = []
self.flist = {}
for i in self.city:
folder = self.data_root+i+'/'+i+'_'+self.data_type
fn = os.listdir(folder)
fn.sort()
self.flist[i] = fn
self.file_len.append(len(fn))
self.data = None
self.c = 0
self.f = 0
self.p = 0
self.pos = [57, 114, 174, 222, 258]
self.pos2 = [30, 69, 126, 186, 234]
self.input_len=input_len
def get_sequence(self):
t = self.seq_len
seq = []
while True:
k = self.p
cc = self.city[self.c]
files = self.flist[cc]
cf = files[self.f]
if self.p == 0:
folder = self.data_root + cc + '/' + cc + '_' + self.data_type + '/'
self.data = np.load(folder +cf)
if cc == 'Berlin':
start_time = self.pos2[self.p] - self.input_len
else:
start_time = self.pos[self.p] - self.input_len
self.p +=1
elif self.p < 4:
if cc == 'Berlin':
start_time = self.pos2[self.p] - self.input_len
else:
start_time = self.pos[self.p] - self.input_len
self.p +=1
elif self.p ==4:
if cc == 'Berlin':
start_time = self.pos2[self.p] - self.input_len
else:
start_time = self.pos[self.p] - self.input_len
self.p = 0
if self.f == self.file_len[self.c]-1:
self.f = 0
self.c +=1
else:
self.f += 1
start_time = start_time -3
for i in range(t):
im = self.data[start_time + i]
patch = np.zeros((1, 436, 3), dtype=int)
patch2 = np.zeros((496, 12, 3), dtype=int)
im = np.concatenate((im, patch), axis=0)
im = np.concatenate((im, patch2), axis=1)
seq.append(im/255.0)
print(k,cc,cf)
# return np.array(seq),k
return np.array(seq)
def __getitem__(self, index):
# if not self.seed_set:
# self.seed_set = True
# random.seed(index)
# np.random.seed(index)
# torch.manual_seed(index)
# return torch.from_numpy(self.get_sequence()[0]),self.get_sequence()[1]
return torch.from_numpy(self.get_sequence())
def __len__(self):
return len(self.dirs) * 36 * 5 # arbitrary
if __name__ == "__main__":
a = TRAFFIC(False, "/home/wei/Desktop/city/")
for i in range(2000):
c = a.get_sequence()
print(i,c[1],c[2])
# import os
# import random
# import numpy as np
# import torch
# import pickle as pkl
# import h5py
# import cv2
#
#
#
# class TRAFFIC(object):
# def __init__(self, train, data_root, seq_len=13):
# self.train = train
# self.data_type = "training" if self.train else "test"
# self.data_root = data_root
# self.dirs = os.listdir("{}".format(self.data_root))
# self.seq_len = seq_len
# self.city = ['Berlin', 'Istanbul', 'Moscow']
# self.seed_set = False
# self.flist = []
# for i in self.city:
# folder = self.data_root+i+'/'+i+'_'+self.data_type
# self.flist.append(os.listdir(folder))
# self.data = None
# self.count = 0
#
# def get_sequence(self):
# t = self.seq_len
# while True:
# if self.count == 0:
# c_idx = np.random.randint(len(self.city))
# c = self.city[c_idx]
# folder = self.data_root + c + '/' + c + '_' + self.data_type + '/'
# files = self.flist[c_idx]
# s_idx = np.random.randint(len(files))
# f = files[s_idx]
# fr = h5py.File(folder + f, 'r')
# a_group_key = list(fr.keys())[0]
# self.data = list(fr[a_group_key])
# self.count += 1
# elif self.count >= 100:
# self.count = 0
# else:
# self.count += 1
# print(self.count)
#
# seq = []
# start_time = np.random.randint(0,len(self.data)-t)
# flip = np.random.randint(2)
#
# for i in range(t):
# im = self.data[start_time + i]
# if flip == 1:
# im = cv2.flip(im, 1)
# patch = np.zeros((1, 436, 3))
# patch2 = np.zeros((496, 4, 3))
# im = np.concatenate((im, patch), axis=0)
# im = np.concatenate((im, patch2), axis=1)
# seq.append(im/255.0)
# return np.array(seq)
#
# def __getitem__(self, index):
# if not self.seed_set:
# self.seed_set = True
# random.seed(index)
# np.random.seed(index)
# # torch.manual_seed(index)
# return torch.from_numpy(self.get_sequence())
#
#
#
# def __len__(self):
# return len(self.dirs) * 36 * 5 # arbitrary
#
#
# if __name__ == "__main__":
# a = TRAFFIC(True, "/home/wei/Desktop/traffic/")
# for _ in range(1000):
# c = a.get_sequence()
# print(np.shape(c))
```
|
{
"source": "JessieZamzow/grid-website",
"score": 2
}
|
#### File: _plugins/jekyll-rst/transform.py
```python
import sys
from docutils.core import publish_parts
from optparse import OptionParser
from docutils.frontend import OptionParser as DocutilsOptionParser
from docutils.parsers.rst import Parser
def transform(writer=None, part=None):
p = OptionParser(add_help_option=False)
# Collect all the command line options
docutils_parser = DocutilsOptionParser(components=(writer, Parser()))
for group in docutils_parser.option_groups:
p.add_option_group(group.title, None).add_options(group.option_list)
p.add_option('--part', default=part)
opts, args = p.parse_args()
settings = dict({
'file_insertion_enabled': False,
'raw_enabled': False,
}, **opts.__dict__)
if len(args) == 1:
try:
content = open(args[0], 'r').read()
except IOError:
content = args[0]
else:
content = sys.stdin.read()
parts = publish_parts(
source=content,
settings_overrides=settings,
writer=writer,
)
if opts.part in parts:
return parts[opts.part]
return ''
```
|
{
"source": "JessikaSmith/AutomatedTrainTestSplit",
"score": 2
}
|
#### File: AutomatedTrainTestSplit/model/utils.py
```python
import os.path
import sys
import keras
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np
from gensim.models import KeyedVectors
from gensim.models import FastText
import pickle
import requests
import re
import pymorphy2
import pymystem3
morph = pymorphy2.MorphAnalyzer()
path_to_w2v = '/data/GAforAutomatedTrainTestSplit/model/produced_data/ruwikiruscorpora_upos_skipgram_300_2_2018.vec'
path_to_fasttext_emb = '/tmp/wiki.ru.bin'
path_to_fasttext_emb_2 = '/data/GAforAutomatedTrainTestSplit/model/produced_data/ft_native_300_ru_wiki_lenta_lemmatize.bin'
path_to_fasttext_unlem = '/tmp/ft_native_300_ru_wiki_lenta_lower_case.bin'
upt_url = 'https://raw.githubusercontent.com/akutuzov/universal-pos-tags/4653e8a9154e93fe2f417c7fdb7a357b7d6ce333/ru-rnc.map'
m = pymystem3.mystem.Mystem(mystem_bin='/home/gmaster/mystem')
mapping = {}
r = requests.get(upt_url, stream=True)
for pair in r.text.split('\n'):
pair = re.sub('\s+', ' ', pair, flags=re.U).split(' ')
if len(pair) > 1:
mapping[pair[0]] = pair[1]
def embedding(emb_type):
if emb_type == 'w2v':
model = KeyedVectors.load_word2vec_format(path_to_w2v)
if emb_type == 'fasttext':
model = FastText.load_fasttext_format(path_to_fasttext_emb)
if emb_type == 'fasttext_2':
print('loading fasttext embedding...')
model = FastText.load_fasttext_format(path_to_fasttext_emb_2)
print('Done!')
if emb_type == 'fasttext_unlem':
model = FastText.load_fasttext_format(path_to_fasttext_unlem)
return model
def add_universal_tag(word):
processed = m.analyze(word)
tagged = []
for w in processed:
try:
lemma = w["analysis"][0]["lex"].lower().strip()
pos = w["analysis"][0]["gr"].split(',')[0]
pos = pos.split('=')[0].strip()
if pos in mapping:
tagged.append(lemma + '_' + mapping[pos]) # tags conversion
else:
tagged.append(lemma + '_X')
except KeyError:
continue
return tagged
class Processor:
def __init__(self, max_features, emb_type, max_len, emb_dim=300):
self.tokenizer = None
self.max_features = max_features
self.emb_type = emb_type
self.model = None
self.emb_dim = emb_dim
self.embedding_matrix = None
self.x_train_name = None
self.max_len = max_len
def prepare_embedding_matrix(self, word_index, x_train_name):
print('Starting embedding matrix preparation...')
embedding_matrix = np.zeros((self.max_features, self.emb_dim))
if self.emb_type == 'w2v':
for word, i in word_index.items():
try:
emb_vect = self.model.wv[add_universal_tag(word)].astype(np.float32)
embedding_matrix[i] = emb_vect
# out of vocabulary exception
except:
print(word)
else:
for word, i in word_index.items():
try:
emb_vect = self.model.wv[word]
embedding_matrix[i] = emb_vect.astype(np.float32)
# out of vocabulary exception
except:
print(word)
np.save('/data/GAforAutomatedTrainTestSplit/model/produced_data/%s_%s_%s.npy' % (
self.emb_type, x_train_name, self.max_features), embedding_matrix)
return embedding_matrix
def fit_processor(self, x_train, x_train_name, other=None):
self.x_train_name = x_train_name
try:
self.embedding_matrix = np.load(
'/data/GAforAutomatedTrainTestSplit/model/produced_data/%s_%s_%s.npy' % (
self.emb_type, x_train_name, self.max_features))
with open('/data/GAforAutomatedTrainTestSplit/model/produced_data/tokenizer_%s_%s_%s.pickle' % (
self.emb_type, x_train_name, self.max_features), 'rb') as handle:
self.tokenizer = pickle.load(handle)
# not found exception
except: # to check
print('No model found...initialization...')
#x_train = [sent[0] for sent in x_train]
self.tokenizer = Tokenizer(num_words=self.max_features + 1, oov_token='oov')
if not other:
self.tokenizer.fit_on_texts(x_train)
else:
if isinstance(other[0], list):
other = [sent[0] for sent in other]
self.tokenizer.fit_on_texts(x_train)
# hopefully this staff helps to avoid issues with oov (NOT SURE needs to be checked)
self.tokenizer.word_index = {e: i for e, i in self.tokenizer.word_index.items() if i <= self.max_features}
self.tokenizer.word_index[self.tokenizer.oov_token] = self.max_features + 1
word_index = self.tokenizer.word_index
with open('/data/GAforAutomatedTrainTestSplit/model/produced_data/tokenizer_%s_%s_%s.pickle' % (
self.emb_type, x_train_name, self.max_features), 'wb') as handle:
pickle.dump(self.tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
# ======================== write tokenizer to file ===================================
print('Amount of unique tokens %s' % len(word_index))
self.model = embedding(self.emb_type)
self.embedding_matrix = self.prepare_embedding_matrix(word_index, x_train_name)
def prepare_input(self, x, y=None):
# prepare x data
if isinstance(x[0], list):
x = [sent[0] for sent in x]
sequences_x = self.tokenizer.texts_to_sequences(x)
x = pad_sequences(sequences_x, maxlen=self.max_len)
# prepare labels
if y:
if isinstance(y[0], list):
y = [y[0] for y in y]
y = np.asarray(y)
return x, y
return x
def prepare_sequence(self, text):
text = [text]
sequences = self.tokenizer.texts_to_sequences(text)
x = pad_sequences(sequences, maxlen=self.max_len)
return x
def prepare_custom_embedding(self, vocabulary, x_train_name='custom'):
self.max_features = len(vocabulary)
try:
self.embedding_matrix = np.load('/data/GAforAutomatedTrainTestSplit/model/produced_data/%s_%s_%s.npy' % (
self.emb_type, x_train_name, self.max_features))
except:
print('Starting embedding matrix preparation...')
self.model = embedding(self.emb_type)
embedding_matrix = np.zeros((len(vocabulary), self.emb_dim))
if self.emb_type == 'w2v':
for i, word in enumerate(vocabulary):
try:
emb_vect = self.model.wv[add_universal_tag(word)].astype(np.float32)
embedding_matrix[i] = emb_vect
# out of vocabulary exception
except:
print(word)
else:
for i, word in enumerate(vocabulary):
try:
emb_vect = self.model.wv[word]
embedding_matrix[i] = emb_vect.astype(np.float32)
# out of vocabulary exception
except:
print(word)
self.embedding_matrix = embedding_matrix
np.save('/data/GAforAutomatedTrainTestSplit/model/produced_data/%s_%s_%s.npy' % (
self.emb_type, x_train_name, self.max_features), embedding_matrix)
```
#### File: source/GA/crossover.py
```python
import numpy as np
class Crossover:
def __init__(self, crossover_type, **kwargs):
self.crossover_type = crossover_type
def crossover(self, parent_1, parent_2, **kwargs):
if self.crossover_type == 'pmx':
return self.crossover_pmx(parent_1=parent_1, parent_2=parent_2)
if self.crossover_type == 'ordered':
return self.ordered_crossover(parent_1=parent_1, parent_2=parent_2)
if self.crossover_type == 'cycle':
return self.cycle_crossover(parent_1=parent_1, parent_2=parent_2)
# TODO: finish the crossover pmx
def crossover_pmx(self, parent_1, parent_2):
points_num = len(parent_1)
cut_ix = np.random.choice(points_num - 2, 2, replace=False)
min_ix = np.min(cut_ix)
max_ix = np.max(cut_ix)
offspring_1 = np.zeros(points_num)
def ordered_crossover(self, parent_1, parent_2):
points_num = len(parent_1)
cut_ix = np.random.choice(points_num - 2, 2, replace=False)
min_ix = np.min(cut_ix)
max_ix = np.max(cut_ix)
offspring_1 = np.zeros(points_num)
current_ix = 0
set_1 = parent_1[min_ix:max_ix]
for i, elem in enumerate(parent_2):
if elem not in set_1:
if current_ix != min_ix:
offspring_1[current_ix] = elem
else:
current_ix = max_ix
offspring_1[current_ix] = elem
current_ix += 1
offspring_1[min_ix:max_ix] = set_1
offspring_2 = np.zeros(points_num)
current_ix = 0
set_2 = parent_2[min_ix:max_ix]
for i, elem in enumerate(parent_1):
if elem not in set_2:
if current_ix != min_ix:
offspring_2[current_ix] = elem
else:
current_ix = max_ix
offspring_2[current_ix] = elem
current_ix += 1
offspring_2[min_ix:max_ix] = set_2
return [int(i) for i in offspring_1], [int(i) for i in offspring_2]
def cycle_crossover(self, parent_1, parent_2):
raise NotImplementedError
```
|
{
"source": "JessikaSmith/jss28_bayesian",
"score": 3
}
|
#### File: jss28_bayesian/variable_selection/mcmc.py
```python
__author__ = '<NAME>'
import numpy as np
class MCMC_Metropoolis:
def __init__(self, x, func, step, thin=1):
self.x = x
self.func = func
self.step = step
self.thin = thin
self.n_dims = self.x.size # if self.x.ndim == 1 else self.x.shape[1]
def generator(self, num_sample, range=np.random):
n_acc = 0
l_trace = []
acc_rate = []
samples = np.empty([num_sample, self.n_dims])
for n in range(num_sample):
for _ in range(self.thin):
s = self.x + self.step * np.random.randn(*self.x.shape)
# r =
```
#### File: jss28_bayesian/visualization/vis_tools.py
```python
__author__ = '<NAME>'
import matplotlib.pyplot as plt
from IPython.core.pylabtools import figsize
import matplotlib
def plot_params(N_SAMPLES, alpha_samples, theta_samples):
plt.subplot(211)
plt.title(r"""Distribution of $\alpha$ with %d samples""" % N_SAMPLES)
plt.hist(alpha_samples, histtype='stepfilled',
color='darkred', bins=30, alpha=0.8, density=True)
plt.ylabel('Probability Density')
plt.subplot(212)
plt.title(r"""Distribution of $\theta$ with %d samples""" % N_SAMPLES)
plt.hist(theta_samples, histtype='stepfilled',
color='darkblue', bins=30, alpha=0.8, density=True)
plt.ylabel('Probability Density')
plt.show()
def plot_trace(alpha_samples, theta_samples):
# Plot alpha trace
plt.subplot(211)
plt.title(r'Trace of $\alpha$')
plt.plot(alpha_samples, color = 'darkred')
plt.xlabel('Samples'); plt.ylabel('Parameter')
# Plot beta trace
plt.subplot(212)
plt.title(r'Trace of $\beta$')
plt.plot(theta_samples, color='b')
plt.xlabel('Samples'); plt.ylabel('Parameter')
plt.tight_layout(h_pad=0.8)
plt.show()
```
|
{
"source": "JessikaSmith/language_model",
"score": 2
}
|
#### File: language_model/experiments/fastai_try.py
```python
from fastai.text import *
import pandas as pd
import html
import numpy as np
import re
import collections
from collections import *
import pickle
import torch
import torchvision
re1 = re.compile(r' +')
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.visible_device_list = "1"
config.allow_soft_placement = True
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
from keras.backend.tensorflow_backend import set_session
from collections import OrderedDict
set_session(session)
def fixup(x):
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace(
'nbsp;', ' ').replace('<', ' ').replace('>', ' ').replace('#36;', '$').replace(
'\\n', "\n").replace('quot;', "'").replace('<br />', "\n").replace(
'\\"', '"').replace('<unk>', 'u_n').replace(' @.@ ', '.').replace(
' @-@ ', '-').replace('\\', ' \\ ').replace('img', ' ').replace('class', ' ').replace(
'src', ' ').replace('alt', ' ').replace('email', ' ').replace('icq', ' ').replace(
'href', ' ').replace('mem', ' ').replace('link', ' ').replace('mention', ' ').replace(
'onclick', ' ').replace('icq', ' ').replace('onmouseover', ' ').replace('post', ' ').replace(
'local', ' ').replace('key', ' ').replace('target', ' ').replace('amp', ' ').replace(
'section', ' ').replace('search', ' ').replace('css', ' ').replace('style', ' ').replace(
'cc', ' ').replace('date', ' ').replace('org', ' ').replace('phone', ' ').replace(
'address', ' ').replace('name', ' ').replace('\n', '').replace('\r', '').replace(
'|', '').replace('id', ' ').replace('[', '').replace(']', '').replace('span', ' ')
return re1.sub(' ', html.unescape(x))
def fixup_2(x):
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace(
'nbsp;', ' ').replace('<', ' ').replace('>', ' ').replace('#36;', '$').replace(
'\\n', "\n").replace('quot;', "'").replace('<br />', "\n").replace(
'\\"', '"').replace('<unk>', 'u_n').replace(' @.@ ', '.').replace(
' @-@ ', '-').replace('\\', ' \\ ').replace('img', ' ').replace('class', ' ').replace(
'src', ' ').replace('alt', ' ').replace('email', ' ').replace('icq', ' ').replace(
'href', ' ').replace('mem', ' ').replace('link', ' ').replace('mention', ' ').replace(
'onclick', ' ').replace('icq', ' ').replace('onmouseover', ' ').replace('post', ' ').replace(
'local', ' ').replace('key', ' ').replace('target', ' ').replace('amp', ' ').replace(
'section', ' ').replace('search', ' ').replace('css', ' ').replace('style', ' ').replace(
'cc', ' ').replace('\n', '').replace('\r', '').replace('|', '').replace('id', ' ').replace(
'[', '').replace(']', '')
splitted = x.split()
print(splitted[:3])
if 'phone' in splitted[:3]:
part_1 = ' '.join(splitted[:3])
part_1 = part_1.replace('phone', '')
part_2 = ' '.join(splitted[3:])
x = '%s %s' % (part_1, part_2)
x = re1.sub(' ', html.unescape(x))
return re1.sub(' ', html.unescape(x))
def get_text_len(text):
return len(text.split())
class TextTokenizer:
def __init__(self, voc_size, min_freq):
self.voc_size = voc_size
self.min_freq = min_freq
self.itos = None
self.stoi = None
def get_text_len(text):
return len(text.split())
def save_set(self, df, fname='name'):
df = self._clean(df)
path = '/mnt/shdstorage/for_classification/%s.csv' % fname
df.to_csv(path, index=None)
print('Dataframe is saved to %s' % path)
def _clean(self, df):
df = df[~df['edited_text'].isna()]
df = df[~df['label'].isna()]
df['edited_text'] = df['edited_text'].apply(fixup)
df['text_len'] = df['edited_text'].apply(get_text_len)
df = df[df['text_len'] >= 10]
return df
def get_texts(self, df):
df = self._clean(df)
labels = df['label'].values.astype(np.int64)
texts = df['edited_text'].astype(str)
return texts, labels
def get_tokens(self, df):
texts, labels = self.get_texts(df)
tok = Tokenizer().process_all(texts)
return tok, list(labels)
def ids(self, tok_trn):
print(tok_trn[:10])
freq = Counter(p for o in tok_trn for p in o)
print(freq.most_common(25))
self.itos = [o for o, c in freq.most_common(self.voc_size) if c > self.min_freq]
self.itos.insert(0, '_pad_')
self.itos.insert(0, '_unk_')
self.stoi = collections.defaultdict(lambda: 0, {v: k for k, v in enumerate(self.itos)})
class Tokenizator:
def __init__(self, voc_size=60000, min_freq=2):
self.tt = TextTokenizer(voc_size=voc_size, min_freq=min_freq)
# set mode to 'train' to create ids set
def prepare_set(self, set_fname, mode=None):
set = pd.read_csv(set_fname, encoding='utf-8')
tok, labels = self.tt.get_tokens(set)
if mode == 'train':
self.tt.ids(tok)
lm = np.array([[self.tt.stoi[o] for o in p] for p in tok])
return tok, labels, lm
# the function to convert h5 model to pth
def convert(path_to_old_model, path_to_save_converted_model):
"""
path_to_old_model is the path to old model
and
path_to_save_converted_model is the path where the converted model is stored
"""
old_wgts = torch.load(path_to_old_model, map_location=lambda storage, loc: storage)
new_wgts = OrderedDict()
new_wgts['encoder.weight'] = old_wgts['0.encoder.weight']
new_wgts['0.encoder.weight'] = old_wgts['0.encoder.weight']
new_wgts['encoder_dp.emb.weight'] = old_wgts['0.encoder_with_dropout.embed.weight']
new_wgts['rnns.0.weight_hh_l0_raw'] = old_wgts['0.rnns.0.module.weight_hh_l0_raw']
new_wgts['rnns.0.module.weight_ih_l0'] = old_wgts['0.rnns.0.module.weight_ih_l0']
new_wgts['rnns.0.module.weight_hh_l0'] = old_wgts['0.rnns.0.module.weight_hh_l0_raw']
new_wgts['rnns.0.module.bias_ih_l0'] = old_wgts['0.rnns.0.module.bias_ih_l0']
new_wgts['rnns.0.module.bias_hh_l0'] = old_wgts['0.rnns.0.module.bias_hh_l0']
new_wgts['rnns.1.weight_hh_l0_raw'] = old_wgts['0.rnns.1.module.weight_hh_l0_raw']
new_wgts['rnns.1.module.weight_ih_l0'] = old_wgts['0.rnns.1.module.weight_ih_l0']
new_wgts['rnns.1.module.weight_hh_l0'] = old_wgts['0.rnns.1.module.weight_hh_l0_raw']
new_wgts['rnns.1.module.bias_ih_l0'] = old_wgts['0.rnns.1.module.bias_ih_l0']
new_wgts['rnns.1.module.bias_hh_l0'] = old_wgts['0.rnns.1.module.bias_hh_l0']
new_wgts['rnns.2.weight_hh_l0_raw'] = old_wgts['0.rnns.2.module.weight_hh_l0_raw']
new_wgts['rnns.2.module.weight_ih_l0'] = old_wgts['0.rnns.2.module.weight_ih_l0']
new_wgts['rnns.2.module.weight_hh_l0'] = old_wgts['0.rnns.2.module.weight_hh_l0_raw']
new_wgts['rnns.2.module.bias_ih_l0'] = old_wgts['0.rnns.2.module.bias_ih_l0']
new_wgts['rnns.2.module.bias_hh_l0'] = old_wgts['0.rnns.2.module.bias_hh_l0']
new_wgts['1.decoder.bias'] = old_wgts['1.decoder.weight']
torch.save(new_wgts, path_to_save_converted_model + 'lm4.pth')
convert('/home/gmaster/projects/negRevClassif/pretrained_lm/lm4.h5', '/home/gmaster/projects/negRevClassif/pretrained_lm/')
# print(ver['edited_text_old'])
#
# ver['edited_text'] = ver['edited_text_old'].apply(fixup_2)
# ver.to_csv('/mnt/shdstorage/for_classification/new_test.csv')
# tokenizer = Tokenizator()
#
train_fname = '/home/gmaster/projects/negRevClassif/source_datasets/train_v7.csv'
test_fname = '/home/gmaster/projects/negRevClassif/source_datasets/test_v7.csv'
ver_fname = '/home/gmaster/projects/negRevClassif/source_datasets/new_test.csv'
#
# #
# tok_trn, trn_labels, trn_lm = tokenizer.prepare_set(train_fname, mode='train')
# tok_test, test_labels, test_lm = tokenizer.prepare_set(test_fname)
# tok_ver, ver_labels, ver_lm = tokenizer.prepare_set(ver_fname)
#
# np.save('/home/gmaster/projects/negRevClassif/source_datasets/trn_ids_60_tt.npy', trn_lm)
# np.save('/home/gmaster/projects/negRevClassif/source_datasets/test_ids_60_tt.npy', test_lm)
# np.save('/home/gmaster/projects/negRevClassif/source_datasets/ver_ids_60_tt.npy', ver_lm)
# pickle.dump(tokenizer.tt.itos, open('/home/gmaster/projects/negRevClassif/source_datasets/itos_60_tt.pkl', 'wb'))
#
# itos = tokenizer.tt.itos
itos = pickle.load(open('/home/gmaster/projects/negRevClassif/source_datasets/itos_60_tt.pkl', 'rb'))
print(itos[:100])
#
vs = len(itos)
em_sz, nh, nl = 400, 1150, 3
#
# # lm4.h5 lm_enc4.h5
PRE_LM_PATH = '/home/gmaster/projects/negRevClassif/pretrained_lm/lm4'
lm_itos = '/home/gmaster/projects/negRevClassif/pretrained_lm/itos'
# wgts = torch.load(PRE_LM_PATH, map_location=lambda storage, loc: storage)
# enc_wgts = np.array(wgts['0.encoder.weight'])
# row_m = enc_wgts.mean(0)
# itos2 = pickle.load(open(lm_itos, 'rb'))
# stoi2 = collections.defaultdict(lambda: -1, {v: k for k, v in enumerate(itos2)})
#
# new_w = np.zeros((vs, em_sz), dtype=np.float32)
# for i, w in enumerate(itos.pkl):
# r = stoi2[w]
# new_w[i] = enc_wgts[r] if r >= 0 else row_m
#
# wgts['0.encoder.weight'] = torch.from_numpy(new_w)
# wgts['0.encoder_with_dropout.embed.weight'] = torch.from_numpy(np.copy(new_w))
# wgts['1.decoder.weight'] = torch.from_numpy(np.copy(new_w))
# opt_fn = partial(optim.Adam, betas=(0.8, 0.99))
train = pd.read_csv(train_fname)
train_df = train[['label', 'edited_text']]
train_df = train_df.rename(columns={'edited_text': 'text'})
test = pd.read_csv(test_fname)
test_df = test[['label', 'edited_text']]
test_df = test_df.rename(columns={'edited_text': 'text'})
print('Datasets are ready!')
data_lm = TextLMDataBunch.from_df(train_df=train_df, valid_df=test_df, path="")
data_clas = TextClasDataBunch.from_df(path='', train_df=train_df, valid_df=test_df,
vocab=data_lm.train_ds.vocab, bs=32)
print('Bunches are ready!')
#
# data_lm.save('/home/gmaster/projects/negRevClassif/data/')
# data_clas.save('/home/gmaster/projects/negRevClassif/data/')
# data_lm = TextLMDataBunch.load('/home/gmaster/projects/negRevClassif/data/')
# data_clas = TextClasDataBunch.load('/home/gmaster/projects/negRevClassif/data/', bs=32)
# language model
learn = language_model_learner(data_lm, pretrained_fnames=[PRE_LM_PATH, lm_itos], drop_mult=0.7)
print(learn.fit_one_cycle(1, 1e-2))
# classifier
learn = text_classifier_learner(data_clas, drop_mult=0.7)
learn.fit_one_cycle(1, 1e-2)
preds, targets = learn.get_preds()
predictions = np.argmax(preds, axis=1)
pd.crosstab(predictions, targets)
```
|
{
"source": "JessikaSmith/OptimizationAlgorithms",
"score": 3
}
|
#### File: OptimizationAlgorithms/data_utils/data_generator_.py
```python
from numpy.random import random_sample
def generate_population(population_size, problem_dim, min_bound, max_bound):
error = 1e-10
data = (max_bound + error - min_bound) * random_sample((population_size, problem_dim)) + min_bound
data[data > max_bound] = max_bound
return data
```
#### File: OptimizationAlgorithms/GA_tsp_optimisation/ga_pipeline.py
```python
from data_utils import *
import operator
from GA_tsp_optimisation import Selector, Crossover, Mutation
from vis import *
from data_utils import create_matrix
import random
coordinates = None
matrix = None
class Path:
def __init__(self, path):
self.path = path
self.fitness = _evaluate_fitness(path)
self._prob = None
def update_path(self, new_path):
self.path = new_path
self.fitness = _evaluate_fitness(new_path)
def _evaluate_fitness(path):
dist = 0
for i in range(len(path) - 1):
if i == (len(path) - 1):
dist += matrix[path[0]][path[i + 1]]
break
dist += matrix[path[i + 1]][path[i]]
return dist
def _generate_population(num_of_cities, population_size):
population = []
for _ in range(population_size):
path = np.random.permutation([i for i in range(num_of_cities)])
population.append(Path(path))
# draw_path(path, coordinates)
return population
def ga_pipeline(mat=None, population_size=20, generations=200, best_perc=0.2,
mutation_probability=0.2, mutation_intensity=0.3,
verbose=1, coord=None, plot=0):
num_of_cities = mat.shape[0]
global matrix
matrix = mat
global coordinates
coordinates = coord
population = _generate_population(num_of_cities, population_size)
s = Selector(selection_type='roulette')
c = Crossover(crossover_type='ordered')
m = Mutation(mutation_type='rsm')
x, y = [], []
for ii in range(generations):
population.sort(key=operator.attrgetter('fitness'), reverse=False)
new_generation = []
for i in range(int(population_size * best_perc)):
new_generation.append(population[i])
pairs_generator = s.selection(population=population, best_perc=best_perc)
for i, j in pairs_generator:
child_1, child_2 = c.crossover(parent_1=i.path, parent_2=j.path)
new_generation.append(Path(child_1))
new_generation.append(Path(child_2))
population = new_generation[:population_size]
for i in range(1, len(population)):
population[i].update_path(m.mutation(population[i].path, mutation_probability=mutation_probability))
population.sort(key=operator.attrgetter('fitness'), reverse=False)
if verbose:
print('========== generation %s ==========' % ii)
print('best so far: %s\n' % population[0].fitness)
x.append(ii)
y.append(population[0].fitness)
if plot:
if ii % 500 == 0:
draw_path(population[0].path, coordinates, ii)
draw_convergence(x, y, 'ps = %s, bp = %s, mr = %s, mi = %s' % (
round(population_size, 2), round(best_perc, 2), round(mutation_probability, 2), round(mutation_intensity, 2)))
return population[0].fitness
```
|
{
"source": "JessikaSmith/reviews_classification_and_aspect_extraction",
"score": 3
}
|
#### File: reviews_classification_and_aspect_extraction/classifcation/main.py
```python
from classifcation.word2vec_preparation import *
from classifcation.utils import *
from sklearn.model_selection import train_test_split
from classifcation.preprocess_data import *
import itertools
import os
import pandas as pd
from classifcation.model import CNN_model, LSTM_model, VDCNN, VAE
from numpy import genfromtxt
from sklearn.model_selection import StratifiedKFold
# from keras.preprocessing import sequence
from keras.utils import to_categorical
# from keras.datasets import imdb
def main():
NUM_CLASSES = 5
SEQUENCE_MAX_LEN = 512
# TODO: load all data + implement cross-validation
# TODO: look at distributions of ratings in train/test sets
# raw_data_path = '/home/maria/PycharmProjects/Datasets/' \
# 'amazon_review_full_csv/test.csv'
# dataset1 = pd.read_csv(raw_data_path, header=None)
# dataset1.columns = ['rating', 'subject', 'review']
# raw_data_path = '/home/maria/PycharmProjects/Datasets/' \
# 'amazon_review_full_csv/train.csv'
# dataset2 = pd.read_csv(raw_data_path, header=None)
# dataset2.columns = ['rating', 'subject', 'review']
# data = [dataset1, dataset2]
# dataset = pd.concat(data)
#
# dataset["processed_text"] = dataset["review"].apply(clean_text)
# print('Text is clean!')
# dataset["processed_text"] = dataset["processed_text"].apply(tokens_to_text)
# train, test = train_test_split(dataset, test_size=0.2)
# test['processed_text'].to_csv("data_dir/amazon/test.csv", index=False)
# test['rating'].to_csv("data_dir/amazon/y_test.csv", index=False)
# print('test sets are ready!')
# train['processed_text'].to_csv("data_dir/amazon/train.csv", index=False)
# train['rating'].to_csv("data_dir/amazon/y_train.csv", index=False)
# print('train sets is ready!')
# pd.set_option('display.max_colwidth', -1)
# dataset = pd.read_csv(raw_data_path, header=None)
# dataset.columns = ['rating', 'subject', 'review']
# dataset["processed_text"] = dataset["review"].apply(clean_text)
# dataset["processed_text"] = dataset["processed_text"].apply(tokens_to_text)
# dataset['rating'].to_csv("data_dir/amazon/y_train.csv")
# dataset['processed_text'].to_csv("data_dir/amazon/train.csv")
#
# model = w2v_model()
# model.create_model('amazon')
# nn_model = CNN_model()
# # read_data outputs frequency vectors
# vocab, train_x, test_x, max_len = read_data('amazon')
#
# # TODO: refactor this
# converting to one-hot representation
# val_list = []
# for i in genfromtxt('data_dir/amazon/y_test.csv', delimiter=','):
# val_list.append([int(i)])
# test_y = np.asarray(val_list).mean(axis=1).astype(int) - 1
# test_y = to_categorical(test_y, 5)
#
# val_list = []
# for i in genfromtxt('data_dir/amazon/y_train.csv', delimiter=','):
# val_list.append([int(i)])
# train_y = np.asarray(val_list).mean(axis=1).astype(int) - 1
# train_y = to_categorical(train_y, 5)
## for vdcnn
# train_x = codecs.open('%s/%s/train.csv' % (IO_DIR, 'amazon'), mode='r', encoding='utf-8')
# train_x = get_sequence(train_x)
# train_x = sequence.pad_sequences(train_x, maxlen=SEQUENCE_MAX_LEN, padding='post', truncating='post')
#
# test_x = codecs.open('%s/%s/test.csv' % (IO_DIR, 'amazon'), mode='r', encoding='utf-8')
# test_x = get_sequence(test_x)
# test_x = sequence.pad_sequences(test_x, maxlen=SEQUENCE_MAX_LEN, padding='post', truncating='post')
# nn_model.create_model(vocab, max_len)
# nn_model.model.get_layer('word_embedding').trainable = False
#
# print('Transforming train data to list')
# source = '%s/%s/%s.csv' % (IO_DIR, 'amazon', 'train')
# train_x = codecs.open(source, 'r', 'utf-8')
# new_train_x = []
# for line in train_x:
# new_train_x.append(line.split())
# train_x = new_train_x
#
# print('Transforming test data to list')
# source = '%s/%s/%s.csv' % (IO_DIR, 'amazon', 'test')
# test_x = codecs.open(source, 'r', 'utf-8')
# new_test_x = []
# for line in test_x:
# new_test_x.append(line.split())
# test_x = new_test_x
#
# # train_x, test_x = prepare_input_sequences(train_x, test_x, type='w2v_mean')
#
# train_x, test_x = prepare_input_sequences(train_x, test_x, max_len=max_len, type='freq_seq',
# max_num_of_words=200000)
# np.save('%s/%s/%s.npy' % (IO_DIR, 'amazon', 'train_x_pad_200000'), train_x)
# np.save('%s/%s/%s.npy' % (IO_DIR, 'amazon', 'test_x_pad_200000'), test_x)
# train_x = np.load('%s/%s/%s.npy' % (IO_DIR, 'amazon', 'train_x_pad'))
# test_x = np.load('%s/%s/%s.npy' % (IO_DIR, 'amazon', 'test_x_pad'))
# cross validation section
# skf = StratifiedKFold(indices, n_folds=n_folds, shuffle=True)
# nn_model.simple_train('amazon', vocab, train_x, train_y, test_x,
# test_y, max_len)
# nn_model.train_model(train_x, )
# transforms a list of num_samples sequences into 2D np.array shape (num_samples, num_timesteps)
raw = ''
# vocabulary, train, test,
# create LSTM_CNN model
# vocab, train_x, test_x, max_len = read_data('amazon')
# train_x = np.load('%s/%s/%s.npy' % (IO_DIR, 'amazon', 'train_x_pad'))
# test_x = np.load('%s/%s/%s.npy' % (IO_DIR, 'amazon', 'test_x_pad'))
#
# val_list = []
# for i in genfromtxt('data_dir/amazon/y_test.csv', delimiter=','):
# val_list.append([int(i)])
# test_y = np.asarray(val_list).mean(axis=1).astype(int) - 1
# test_y = to_categorical(test_y, 5)
#
# val_list = []
# for i in genfromtxt('data_dir/amazon/y_train.csv', delimiter=','):
# val_list.append([int(i)])
# train_y = np.asarray(val_list).mean(axis=1).astype(int) - 1
# train_y = to_categorical(train_y, 5)
#
# nn_model = LSTM_model()
# nn_model.create_model_with_conv_layer(len(vocab), max_len)
# nn_model.train_model(vocab, train_x, train_y, test_x, test_y, max_len)
# # one more CNN try
# vocab, train_x, test_x, max_len = read_data('amazon')
#
# val_list = []
# for i in genfromtxt('data_dir/amazon/y_test.csv', delimiter=','):
# val_list.append([int(i)])
# test_y = np.asarray(val_list).mean(axis=1).astype(int) - 1
# test_y = to_categorical(test_y, 5)
#
# val_list = []
# for i in genfromtxt('data_dir/amazon/y_train.csv', delimiter=','):
# val_list.append([int(i)])
# train_y = np.asarray(val_list).mean(axis=1).astype(int) - 1
# train_y = to_categorical(train_y, 5)
#
# train_x = np.load('%s/%s/%s.npy' % (IO_DIR, 'amazon', 'train_x_pad'))
# test_x = np.load('%s/%s/%s.npy' % (IO_DIR, 'amazon', 'test_x_pad'))
# nn_model = CNN_model()
# nn_model.create_simple_model(len(vocab), max_len, 300)
# nn_model.simple_train('amazon', vocab, train_x, train_y, test_x,
# test_y, max_len, 64, num_epochs=50)
##### test imdb
# max_features = 10000
# maxlen = 500
# (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
# x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
# x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
# print('x_train shape:', x_train.shape)
# print('x_test shape:', x_test.shape)
# nn_model = CNN_model()
# nn_model.create_imdb_model()
# nn_model.fit_imdb_model(x_train, y_train, x_test, y_test)
# train_x = sequence.pad_sequences(train_x, maxlen=SEQUENCE_MAX_LEN, padding='post', truncating='post')
# print('Train data is ready')
#
# test_x = sequence.pad_sequences(test_x, maxlen=SEQUENCE_MAX_LEN, padding='post', truncating='post')
# print('Test data is ready')
#
# nn_model = VDCNN()
# nn_model.create_model()
# print('VDCNN model was successfully created')
# nn_model.train_model(train_x, train_y, test_x, test_y, vocab)
# TODO: create and save w2v embedding matices
# # initializing word2vec
# model = w2v_model()
# model.pretrained_model_from_file('GoogleNews-vectors-negative300.bin')
# # initializing vocabulary
# vocab, train_x, test_x, max_len = read_data('amazon')
# words_embeddings, undefined_words = get_embeddings(vocab, model)
# print(len(undefined_words))
# VAE
# 1) find absent words
# 2) continue to train ready model with new unknown words
# 3) create input
# vocab, train_x, test_x, max_len = read_data('amazon') # encodes to numerical representation
model = w2v_model()
model.pretrained_model_from_file('GoogleNews-vectors-negative300.bin')
# TODO: implement padding
def return_embeddings(tokens_len=20, set_name='train'):
data_concat = []
tokens = vectorize_revs(model, set_name=set_name)
# example: take only len 20
data = [x for x in tokens if len(x) == tokens_len]
for x in data:
data_concat.append(list(itertools.chain.from_iterable(x)))
data_array = np.array(data_concat)
np.random.shuffle(data_array)
return data_array
train = return_embeddings()
test = return_embeddings(set_name='test')
print(test)
vae = VAE()
vae.create_simple_model()
vae.train_simple_model(train, test)
# result = select_input_sentences('amazon', 'train', model)
# print(len(result))
# print(result[-1])
# print(result[-2])
# write to file
# with open('%s/%s/%s.csv' % (IO_DIR, domain_name, set_name), 'w') as f:
# for sent
if __name__ == "__main__":
main()
```
#### File: reviews_classification_and_aspect_extraction/classifcation/preprocess_data.py
```python
import pymystem3
from tqdm import tqdm
from nltk.stem import PorterStemmer, WordNetLemmatizer
import nltk
from nltk.corpus import wordnet as wn
from nltk.corpus import stopwords
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from classifcation.word2vec_preparation import w2v_model
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
import json
# nltk.download('stopwords')
# nltk.download('averaged_perceptron_tagger')
# nltk.download('wordnet')
import numpy as np
import re
def convert_tag(tag):
if tag in ['NN', 'NNS', 'NNP', 'NNPS']:
return wn.NOUN
if tag in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']:
return wn.VERB
if tag in ['RB', 'RBR', 'RBS']:
return wn.ADV
if tag in ['JJ', 'JJR', 'JJS']:
return wn.ADJ
return 'trash'
def process_punkt(text):
for char in ['.', '"', ',', '(', ')', '!', '?', ';', ':']:
text = text.replace(char, ' ')
return text
def clean_text(text):
wordnet_lemmatizer = WordNetLemmatizer()
stop = stopwords.words("english")
text = text.strip().lower()
text = process_punkt(text)
tokens = re.split("[\s;,]", text)
tokens = [x for x in tokens if x.isalpha()]
tokens = [x for x in tokens if len(x) > 3]
tokens_res = []
res = nltk.pos_tag(tokens)
for i in res:
if convert_tag(i[1]) != 'trash' and i[0] not in stop:
tokens_res.append(wordnet_lemmatizer.lemmatize(i[0], convert_tag(i[1])))
return tokens_res
def process_similarity(w2v_model, word):
try:
sim = w2v_model.findSynonyms(word, 1).take(1)[0][0]
except:
return None
return sim
def create_document_with_similarity_replacement():
raise NotImplementedError
def tokens_to_text(tokens):
return " ".join(tokens)
def text_to_tokens(text):
return text.split()
def text_len(text):
return len(text)
def batch_iterator(data, batch_size, num_epoch, shuffle=True):
data = np.array(data)
data_size = len(data)
batches_per_epoch = int((len(data) - 1) / batch_size) + 1
for epoch in range(num_epoch):
if shuffle:
shuffle_indicis = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indicis]
else:
shuffled_data = data
for i in range(batches_per_epoch):
start = i * batch_size
end = min((i + 1) * batch_size, data_size)
yield shuffled_data[start:end]
# TODO: add simple binary vector representation of sentences
def prepare_binary_vectors(train_x, test_x):
raise NotImplementedError
def prepare_input_sequences(train_x, test_x, type, max_len=0, max_num_of_words=10000):
if type == 'w2v_mean':
model = w2v_model()
model.model_from_file('amazon')
train_x, test_x = _w2v_mean_preparation(train_x, test_x, model)
if type == 'freq_seq':
train_x, test_x = _freq_seq_preparation(train_x, test_x, max_len, max_num_of_words=max_num_of_words)
if type == 'bow':
train_x, test_x = _bow_preparation(train_x, test_x, max_len)
return train_x, test_x
def _w2v_mean_preparation(train_x, test_x, w2v_model):
new_train_x = []
new_test_x = []
print('Preparing w2v_mean vectors...')
for sentence in tqdm(test_x):
new_test_x.append(w2v_model.get_w2v_mean(sentence))
print('Test set: success')
for sentence in tqdm(train_x):
new_train_x.append(w2v_model.get_w2v_mean(sentence))
print('Train set: success')
np_train = np.array(new_train_x)
np_test = np.array(new_test_x)
return np.squeeze(np_train, axis=1), np.squeeze(np_test, axis=1)
def _freq_seq_preparation(train_x, test_x, max_len, max_num_of_words):
print('Tokenizer starts... ')
tokenizer = Tokenizer(num_words=max_num_of_words)
tokenizer.fit_on_texts(train_x + test_x)
print('Fitting is done')
x_train = tokenizer.texts_to_sequences(train_x)
x_test = tokenizer.texts_to_sequences(test_x)
# transforms a list of num_samples sequences into 2D np.array shape (num_samples, num_timesteps)
x_train = sequence.pad_sequences(x_train, maxlen=max_len, padding='post', truncating='post')
print('Size of training set: %i' % len(x_train))
x_test = sequence.pad_sequences(x_test, maxlen=max_len, padding='post', truncating='post')
print('Size of test set: %i' % len(x_test))
return x_train, x_test
# simple document representation => sum of one-hot text vectors
def _bow_preparation(train_x, test_x, max_len):
le = LabelEncoder(train_x + test_x)
le.fit()
# change size to max_len
enc = OneHotEncoder()
enc.fit(train_x + test_x)
return enc.transform(train_x).toarray(), enc.transform()
# TODO: search for similar words
ALPHABET = 'abcdefghijklmnopqrstuvwxyz '
# simplified variant without punctuation
def get_sequence(dataset, max_len):
all_data = []
for row in dataset:
# data = np.ones(SEQUENE_MAX_LEN)*68
all_data.append(char2vec(row, max_len))
return np.array(all_data)
def char2vec(text, max_len):
data = np.zeros(max_len)
char_dict = {}
for i, c in enumerate(ALPHABET):
char_dict[c] = i + 1
for i in range(0, len(text)):
if i > max_len:
return data
elif text[i] in char_dict:
data[i] = char_dict[text[i]]
return data
```
#### File: reviews_classification_and_aspect_extraction/classifcation/utils.py
```python
import codecs
import operator
from sklearn.model_selection import train_test_split
import numpy as np
IO_DIR = 'data_dir'
# TODO: implement dev/train/test split
def vocab_creation(domain_name, max_len=0, vocab_size=0):
source = None
try:
source = '%s/%s/train.csv' % (IO_DIR, domain_name)
except:
print("Domain %s doesn't exist" % (domain_name))
print('Vocabulary initialization...')
total, unique = 0, 0
word_freqs = {}
top = 0
text = codecs.open(source, 'r', 'utf-8')
for line in text:
words = line.split()
if max_len > 0 and len(words) > max_len:
continue
for word in words:
try:
word_freqs[word] += 1
except KeyError:
unique += 1
word_freqs[word] = 1
total += 1
print('Total amount of words %i with %i unique ones' % (total, unique))
sorted_freq = sorted(word_freqs.items(), key=operator.itemgetter(1), reverse=True)
# TODO: simplify this part
vocab = {'<pad>': 0, '<unk>': 1}
index = len(vocab)
for word, _ in sorted_freq:
vocab[word] = index
index += 1
if vocab_size > 0 and index > vocab_size + 2:
break
if vocab_size > 0:
print('Vocabulary size is %i' % vocab_size)
ofile = codecs.open('%s/%s/vocab' % (IO_DIR, domain_name), mode='w', encoding='utf-8')
sorted_vocab = sorted(vocab.items(), key=operator.itemgetter(1))
for word, index in sorted_vocab:
# TODO: remove hardcore
if index < 2:
ofile.write(word + '\t' + str(0) + '\n')
continue
ofile.write(word + '\t' + str(word_freqs[word]) + '\n')
ofile.close()
print('Vocabulary is successfully created')
return vocab
# TODO: implement me
def read_vocabulary(domain_name):
ifile = codecs.open('%s/%s/vocab' % (IO_DIR, domain_name), mode='r', encoding='utf-8')
ifile.read()
def read_set(domain_name, set_name, vocab, max_len):
assert set_name in {'train', 'test'}
source = '%s/%s/%s.csv' % (IO_DIR, domain_name, set_name)
# TODO: refactor this
unk, total = 0., 0.
max_x = 0
data_x = []
text = codecs.open(source, 'r', 'utf-8')
for line in text:
# TODO: here was strip() but the purpose was vague
words = line.split()
if max_len > 0 and len(words) > max_len:
continue
indices = []
for word in words:
if word in vocab:
indices.append(vocab[word])
else:
indices.append(vocab['<unk>'])
unk += 1
total += 1
data_x.append(indices)
if max_x < len(indices):
max_x = len(indices)
print('%s is processed' % domain_name)
return data_x, max_x
def read_data(domain_name, vocab_size=0, max_len=0):
vocab = vocab_creation(domain_name, max_len, vocab_size)
print('Reading train set...')
train, train_max = read_set(domain_name=domain_name, set_name='train', vocab=vocab, max_len=0)
print('Success')
print('Reading test set...')
test, test_max = read_set(domain_name=domain_name, set_name='test', vocab=vocab, max_len=max_len)
print('Success')
max_len = max(train_max, test_max)
return vocab, train, test, max_len
def train_test_split(data, labels):
X_train, X_test, y_train, y_test = train_test_split(data,
labels, test_size=0.2)
return
# TODO: implement me as well
def batch_iter(data, batch_size, num_epoch):
data_size = len(data)
num_batches_per_epoch = int(len(data) / batch_size) + 1
for epoch in range(num_epoch):
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
#for batch_num
```
|
{
"source": "jessilyn/DBDP-1",
"score": 3
}
|
#### File: DBDP-1/DigitalBiomarkers-GlucoseVariability/cgmquantify_functions.py
```python
import pandas as pd
import datetime as datetime
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.nonparametric.smoothers_lowess import lowess
def importdexcom(filename):
data = pd.read_csv(filename)
df = pd.DataFrame()
df['Time'] = data['Timestamp (YYYY-MM-DDThh:mm:ss)']
df['Glucose'] = pd.to_numeric(data['Glucose Value (mg/dL)'])
df.drop(df.index[:12], inplace=True)
df['Time'] = pd.to_datetime(df['Time'], format='%Y-%m-%dT%H:%M:%S')
df['Day'] = df['Time'].dt.date
df = df.reset_index()
return df
def interdaycv(df):
cvx = (np.std(df['Glucose']) / (np.mean(df['Glucose'])))*100
return cvx
def interdaysd(df):
interdaysd = np.std(df['Glucose'])
return interdaysd
def intradaycv(df):
intradaycv = []
for i in pd.unique(df['Day']):
intradaycv.append(interdaycv(df[df['Day']==i]))
intradaycv_mean = np.mean(intradaycv)
intradaycv_median = np.median(intradaycv)
intradaycv_sd = np.std(intradaycv)
return intradaycv_mean, intradaycv_median, intradaycv_sd
def intradaysd(df):
intradaysd =[]
for i in pd.unique(df['Day']):
intradaysd.append(np.std(df[df['Day']==i]))
intradaysd_mean = np.mean(intradaysd)
intradaysd_median = np.median(intradaysd)
intradaysd_sd = np.std(intradaysd)
return intradaysd_mean, intradaysd_median, intradaysd_sd
def TIR(df, sd=1, sr=5):
up = np.mean(df['Glucose']) + sd*np.std(df['Glucose'])
dw = np.mean(df['Glucose']) - sd*np.std(df['Glucose'])
TIR = len(df[(df['Glucose']<= up) & (df['Glucose']>= dw)])*sr
return TIR
def TOR(df, sd=1, sr=5):
up = np.mean(df['Glucose']) + sd*np.std(df['Glucose'])
dw = np.mean(df['Glucose']) - sd*np.std(df['Glucose'])
TOR = len(df[(df['Glucose']>= up) | (df['Glucose']<= dw)])*sr
return TOR
def POR(df, sd=1, sr=5):
up = np.mean(df['Glucose']) + sd*np.std(df['Glucose'])
dw = np.mean(df['Glucose']) - sd*np.std(df['Glucose'])
TOR = len(df[(df['Glucose']>= up) | (df['Glucose']<= dw)])*sr
POR = (TOR/(len(df)*sr))*100
return POR
def MAGE(df, sd=1):
up = np.mean(df['Glucose']) + sd*np.std(df['Glucose'])
dw = np.mean(df['Glucose']) - sd*np.std(df['Glucose'])
MAGE = np.mean(df[(df['Glucose']>= up) | (df['Glucose']<= dw)])
return MAGE
def MAGN(df, sd=1):
up = np.mean(df['Glucose']) + sd*np.std(df['Glucose'])
dw = np.mean(df['Glucose']) - sd*np.std(df['Glucose'])
MAGN = np.mean(df[(df['Glucose']<= up) & (df['Glucose']>= dw)])
return MAGN
def J_index(df):
J = 0.001*((np.mean(df['Glucose'])+np.std(df['Glucose']))**2)
return J
def LBGI_HBGI(df):
f = ((np.log(df['Glucose'])**1.084) - 5.381)
rl = []
for i in f:
if (i <= 0):
rl.append(22.77*(i**2))
else:
rl.append(0)
LBGI = np.mean(rl)
rh = []
for i in f:
if (i > 0):
rh.append(22.77*(i**2))
else:
rh.append(0)
HBGI = np.mean(rh)
return LBGI, HBGI, rh, rl
def LBGI(df):
f = ((np.log(df['Glucose'])**1.084) - 5.381)
rl = []
for i in f:
if (i <= 0):
rl.append(22.77*(i**2))
else:
rl.append(0)
LBGI = np.mean(rl)
return LBGI
def HBGI(df):
f = ((np.log(df['Glucose'])**1.084) - 5.381)
rh = []
for i in f:
if (i > 0):
rh.append(22.77*(i**2))
else:
rh.append(0)
HBGI = np.mean(rh)
return HBGI
def ADRR(df):
ADRRl = []
for i in pd.unique(df['Day']):
LBGI, HBGI, rh, rl = LBGI_HBGI(df[df['Day']==i])
LR = np.max(rl)
HR = np.max(rh)
ADRRl.append(LR+HR)
ADRRx = np.mean(ADRRl)
return ADRRx
def uniquevalfilter(df, value):
xdf = df[df['Minfrommid'] == value]
n = len(xdf)
diff = abs(xdf['Glucose'].diff())
MODD_n = np.nanmean(diff)
return MODD_n
def MODD(df):
df['Timefrommidnight'] = df['Time'].dt.time
lists=[]
for i in range(0, len(df['Timefrommidnight'])):
lists.append(int(df['Timefrommidnight'][i].strftime('%H:%M:%S')[0:2])*60 + int(df['Timefrommidnight'][i].strftime('%H:%M:%S')[3:5]) + round(int(df['Timefrommidnight'][i].strftime('%H:%M:%S')[6:9])/60))
df['Minfrommid'] = lists
df = df.drop(columns=['Timefrommidnight'])
#Calculation of MODD and CONGA:
MODD_n = []
uniquetimes = df['Minfrommid'].unique()
for i in uniquetimes:
MODD_n.append(uniquevalfilter(df, i))
#Remove zeros from dataframe for calculation (in case there are random unique values that result in a mean of 0)
MODD_n[MODD_n == 0] = np.nan
MODD = np.nanmean(MODD_n)
return MODD
def CONGA24(df):
df['Timefrommidnight'] = df['Time'].dt.time
lists=[]
for i in range(0, len(df['Timefrommidnight'])):
lists.append(int(df['Timefrommidnight'][i].strftime('%H:%M:%S')[0:2])*60 + int(df['Timefrommidnight'][i].strftime('%H:%M:%S')[3:5]) + round(int(df['Timefrommidnight'][i].strftime('%H:%M:%S')[6:9])/60))
df['Minfrommid'] = lists
df = df.drop(columns=['Timefrommidnight'])
#Calculation of MODD and CONGA:
MODD_n = []
uniquetimes = df['Minfrommid'].unique()
for i in uniquetimes:
MODD_n.append(uniquevalfilter(df, i))
#Remove zeros from dataframe for calculation (in case there are random unique values that result in a mean of 0)
MODD_n[MODD_n == 0] = np.nan
CONGA24 = np.nanstd(MODD_n)
return CONGA24
def GMI(df):
GMI = 3.31 + (0.02392*np.mean(df['Glucose']))
return GMI
def eA1c(df):
eA1c = (46.7 + np.mean(df['Glucose']))/ 28.7
return eA1c
def summary(df):
meanG = np.nanmean(df['Glucose'])
medianG = np.nanmedian(df['Glucose'])
minG = np.nanmin(df['Glucose'])
maxG = np.nanmax(df['Glucose'])
Q1G = np.nanpercentile(df['Glucose'], 25)
Q3G = np.nanpercentile(df['Glucose'], 75)
return meanG, medianG, minG, maxG, Q1G, Q3G
def plotglucosesd(df, sd=1, size=15):
glucose_mean = np.mean(df['Glucose'])
up = np.mean(df['Glucose']) + sd*np.std(df['Glucose'])
dw = np.mean(df['Glucose']) - sd*np.std(df['Glucose'])
plt.figure(figsize=(20,5))
plt.rcParams.update({'font.size': size})
plt.plot(df['Time'], df['Glucose'], '.', color = '#1f77b4')
plt.axhline(y=glucose_mean, color='red', linestyle='-')
plt.axhline(y=up, color='pink', linestyle='-')
plt.axhline(y=dw, color='pink', linestyle='-')
plt.ylabel('Glucose')
plt.show()
def plotglucosebounds(df, upperbound = 180, lowerbound = 70, size=15):
plt.figure(figsize=(20,5))
plt.rcParams.update({'font.size': size})
plt.plot(df['Time'], df['Glucose'], '.', color = '#1f77b4')
plt.axhline(y=upperbound, color='red', linestyle='-')
plt.axhline(y=lowerbound, color='orange', linestyle='-')
plt.ylabel('Glucose')
plt.show()
def plotglucosesmooth(df, size=15):
filteres = lowess(df['Glucose'], df['Time'], is_sorted=True, frac=0.025, it=0)
filtered = pd.to_datetime(filteres[:,0], format='%Y-%m-%dT%H:%M:%S')
plt.figure(figsize=(20,5))
plt.rcParams.update({'font.size': size})
plt.plot(df['Time'], df['Glucose'], '.')
plt.plot(filtered, filteres[:,1], 'r')
plt.ylabel('Glucose')
plt.show()
```
#### File: Signal-Alignment/downsample/downsample_with_dtw.py
```python
import pandas as pd
import math
import linecache
import numpy as np
from parameter_cal import cf
from dtw import dtw
import os
from scipy.misc import *
from parameter_cal.utils import get_SS1, get_fact_align, get_reverse_dict, get_SS2, write_result_file
from parameter_cal.utils import load_data, cal_warped_signals
from downsample.utils import get_true_aligned, get_group_number, get_k_accuracy, get_warped_signals
def norm(x, y):
return math.fabs(x[1] - y[1])
def pkg_dtw(file_name, line_num, df):
file_name = 'data/' + file_name
y_list = load_data(file_name, line_num)
query, reference = cal_warped_signals(y_list)
# plot warped signal
# downsample times
xvals, yinterp = get_warped_signals(query, cf.ds_time)
# calculate the corresponding point pair
query.drop(['shift', 't'], axis=1)
query2 = pd.DataFrame({'t': xvals, 'q': yinterp})
query2['close_index'] = 0
true_align_dict = get_true_aligned(cf.ds_time, query, query2)
group_num_dict = get_group_number(true_align_dict, query)
d, cost_matrix, acc_cost_matrix, path = dtw(reference[['t', 'q']].values, query2[['t', 'q']].values, dist=norm)
fact_align_dict = get_fact_align(path)
reverse_dict = get_reverse_dict(path)
error_rate = get_k_accuracy(true_align_dict, fact_align_dict, group_num_dict)
SS1 = get_SS1(fact_align_dict, cf.ds_time)
SS2 = get_SS2(fact_align_dict, reverse_dict, cf.ds_time)
df.loc[line_num] = [error_rate, SS1, SS2]
return df
if __name__ == "__main__":
# generate warped signal
os.chdir(os.path.abspath('..'))
data_dir = os.getcwd() + '\\data\\'
oslist = [f for f in os.listdir(data_dir) if os.path.isfile(data_dir+f)]
# for i in range(0, len(oslist)):
for i in range(0, 84):
event_result = pd.DataFrame(columns=['Error rate','SS1','SS2'])
for j in range(1, 16):
event_result = pkg_dtw(oslist[i], j, event_result)
print(event_result.mean())
print('file'+str(i))
write_result_file('result.csv', 'DTW', oslist[i], event_result.mean())
```
#### File: Signal-Alignment/downsample/downsample_with_eventdtw.py
```python
import pandas as pd
import math
import os, sys
import linecache
import numpy as np
from parameter_cal import cf
from dtw import dtw
from parameter_cal.utils import get_fact_align, get_reverse_dict, calculate_event, load_data, edge_matching, write_result_file
from parameter_cal.utils import get_SS1, get_SS2, cal_warped_signals, get_upslope_endings, get_downslope_endings
from downsample.utils import get_true_aligned, get_k_accuracy, get_group_number, get_warped_signals
def norm(x, y):
# return math.fabs(x[1] - y[1])
return math.fabs(x[1] - y[1]) + math.fabs(x[2] - y[2]) + math.fabs(x[3] - y[3])
def event_dtw(file_name, line_num, df):
file_name = 'data/' + file_name
y_list = load_data(file_name, line_num)
query, reference = cal_warped_signals(y_list)
reference['upslope'] = 0
reference['downslope'] = 0
# plot warped signal
# downsample times
xvals, yinterp = get_warped_signals(query, cf.ds_time)
# calculate the corresponding point pair
query.drop('shift', axis=1)
query.drop('t', axis=1)
query2 = pd.DataFrame({'t': xvals, 'q': yinterp})
query2['close_index'] = 0
query2['upslope'] = 0
query2['downslope'] = 0
true_align_dict = get_true_aligned(cf.ds_time, query, query2)
group_num_dict = get_group_number(true_align_dict, query)
raw_reference_uslope, reference_upslope = get_upslope_endings(reference['q'], cf.refer_percent)
raw_query_uslope, query_upslope = get_upslope_endings(query2['q'], cf.query_percent)
raw_reference_downlope, reference_downslope = get_downslope_endings(reference['q'], cf.refer_percent)
raw_query_downlope, query_downslope = get_downslope_endings(query2['q'], cf.query_percent)
rising_edge_grps = edge_matching(reference, query2, reference_upslope, query_upslope)
down_edge_grps = edge_matching(reference, query2, reference_downslope, query_downslope)
calculate_event(rising_edge_grps, reference, query2, True)
calculate_event(down_edge_grps, reference, query2, False)
d, cost_matrix, acc_cost_matrix, path = dtw(reference[['t', 'q', 'upslope', 'downslope']].values,
query2[['t', 'q', 'upslope', 'downslope']].values, dist=norm)
fact_align_dict = get_fact_align(path)
reverse_dict = get_reverse_dict(path)
error_rate = get_k_accuracy(true_align_dict, fact_align_dict, group_num_dict)
SS1 = get_SS1(fact_align_dict, cf.ds_time)
SS2 = get_SS2(fact_align_dict, reverse_dict, cf.ds_time)
df.loc[line_num] = [error_rate, SS1, SS2]
return df
if __name__ == "__main__":
# generate warped signal
os.chdir(os.path.abspath('..'))
data_dir = os.getcwd() + '\\data\\'
oslist = [f for f in os.listdir(data_dir) if os.path.isfile(data_dir + f)]
# target_abs_directory = os.getcwd() + '\\csv\\' + 'result.csv'
# df = pd.read_csv(target_abs_directory, engine='python')
# print(df)
# for i in range(0, len(oslist)):
for i in range(0,84):
event_result = pd.DataFrame(columns=['Error rate', 'SS1', 'SS2'])
for j in range(1, 16):
event_result = event_dtw(oslist[i], j, event_result)
print('group' + str(j))
print(event_result.mean())
print("file" + str(i) + "this is len" + str(len(event_result)))
write_result_file('result.csv', 'EventDTW', oslist[i], event_result.mean())
```
#### File: Signal-Alignment/downsample/utils.py
```python
import numpy as np
import math
from parameter_cal.utils import get_group_devi, get_SS1, get_SS2
from parameter_cal import cf
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Patch
def get_group_len(query_st, query2_checkpoint, ds_time, query, query2):
group_len = 0
for i in range(0, ds_time):
group_len += math.sqrt(pow((query2['t'][query2_checkpoint] - query['t2'][i+query_st]), 2) + pow((query2['q'][query2_checkpoint] - query['q'][i+query_st]), 2))
return group_len
def get_true_aligned(ds_time, query, query2):
if math.floor(ds_time) == math.ceil(ds_time):
ds_time = int(ds_time)
dict = {}.fromkeys(range(0, len(query2['t'])))
for i in range(len(query2['t'])):
dict[i] = np.array([])
# find the closest index
for i in range(len(query2)):
for j in range(len(query['t']) - 1):
if query['t2'][j] <= query2['t'][i] < query['t2'][j + 1]:
print(i, j, query['q'][j], query2['q'][i])
if abs(query2['q'][i] - query['q'][j]) < abs(query2['q'][i] - query['q'][j + 1]):
query2.loc[i, 'close_index'] = j
dict[i] = np.append(dict[i], j)
else:
query2.loc[i, 'close_index'] = j + 1
dict[i] = np.append(dict[i], j+1)
if query2['t'][i] > query['t2'].iloc[-1]:
query2.loc[i, 'close_index'] = len(query) - 1
elif query2['t'][i] < query['t2'].iloc[0]:
query2.loc[i, 'close_index'] = 0
dict[len(query2['t'])-1] = np.array([len(query)-1])
for i in range(len(query2['t'])):
min_len = np.inf
center = int(dict[i][0])
for j in range(-ds_time+1, 1):
st, ed = center+j,center+j+ds_time-1
if st < 0 or ed >= len(query['t2']):
continue
print(i, st, center, j)
group_len = get_group_len(st, i, ds_time, query, query2)
if group_len < min_len:
dict[i] = np.array(list(range(center+j, center+j+ds_time)))
min_len = group_len
return dict
def get_group_number(true_align_dict, query):
diction = {}.fromkeys(range(0, len(query['q'])))
for i in range(len(query['q'])):
diction[i] = np.array([])
diction = {}.fromkeys(range(0, len(query['q'])))
for i in range(len(query['q'])):
diction[i] = np.array([])
for i in range(len(true_align_dict)):
for item in true_align_dict[i]:
diction[item] = np.append(diction[item], i)
# modify those that did not find their group
for i in range(len(diction)):
if len(diction[i]) == 0:
diction[i] = np.append(diction[i], diction[i - 1][0])
return diction
def get_k_accuracy(true_align_dict, fact_align_dict, group_num_dict):
sum = 0
consider_num = 0
query_number = len(fact_align_dict)
for i in range(len(fact_align_dict)):
for item in fact_align_dict[i]:
consider_num += 1
if item in true_align_dict[i]:
sum += 0
else:
# search the group number
group_devi = get_group_devi(item, group_num_dict,i)
sum += group_devi
# sum+=min(np.abs(item-true_align_dict[i]))
return 2 * sum / ((query_number * (query_number - 1)) * (1+math.fabs(cf.ds_time)))
def slope_col(query):
# calculate the slope of query
query_last = len(query) - 1
query['slope'] = 0
query.loc[1, 'slope'] = ((query.loc[1, 'q'] - query.loc[0, 'q']) + ((query.loc[2 , 'q'] - query.loc[1, 'q']) / 2)) / 2
query.loc[0, 'slope'] = query.loc[1, 'slope']
for i in range(2, query_last - 1):
query.loc[i, 'slope'] = ((query.loc[i, 'q'] -query.loc[i-1, 'q']) + ((query.loc[i+1, 'q'] - query.loc[i, 'q']) / 2)) / 2
query.loc[query_last - 1, 'slope'] = ((query.loc[query_last - 1, 'q'] - query.loc[query_last - 2, 'q']) + (
(query.loc[query_last, 'q'] - query.loc[query_last - 1, 'q']) / 2)) / 2
query.loc[query_last, 'slope'] = query.loc[query_last - 1, 'slope']
def reference_slope_col(query, ds_time):
slope_col(query)
query['avg_slope'] = 0
left_right = ds_time
st = 0 + left_right
ed = len(query) - 1 - left_right
for i in range(0, ed+1):
query.loc[i, 'avg_slope'] = (query.loc[i+left_right, 'q'] - query.loc[i, 'q']) / 2
for i in range(ed+1, len(query) - 1):
query.loc[i, 'avg_slope'] = query.loc[i, 'slope']
def get_warped_signals(query, ds_time):
if ds_time == 1:
xvals = query['t']
xvals = np.array(xvals)
else:
xvals = np.linspace(query.loc[0,'t2'], query.iloc[-1]['t2'], math.floor(len(query['t']) / cf.ds_time))
x = query['t2']
y = query['q']
yinterp = np.array(np.interp(xvals, x, y))
return xvals, yinterp
def connect_edges(rising_edge_grps, raw_reference_uslope):
for i in range(len(rising_edge_grps)):
st_conct = int(rising_edge_grps.loc[i, 'refer_st'])
ed_conct = int(rising_edge_grps.loc[i, 'refer_ed'])
for j in range(len(raw_reference_uslope)):
if math.fabs(st_conct - raw_reference_uslope.iloc[j]['ed']) <= 2:
rising_edge_grps.iloc[i]['refer_st'] = raw_reference_uslope.iloc[j]['st']
elif math.fabs(ed_conct - raw_reference_uslope.iloc[j]['st']) <= 2:
rising_edge_grps.iloc[i]['refer_ed'] = raw_reference_uslope.iloc[j]['ed']
return rising_edge_grps
def get_matched_graph(rising_edge_grps, down_edge_grps, x, y, vertical_mov, title=None):
fig, ax = plt.subplots(1, 1, figsize=(25, 12))
legend_elements = [Line2D([0], [0], marker='o',color='w',label=' ',markerfacecolor='black',markersize=15),
Line2D([0], [0], marker='o',color='w',label=' ',markerfacecolor='blue',markersize=20),
Line2D([0], [0], color='r', lw=4),
Line2D([0], [0], color='cyan', lw=4)
]
ax.scatter(x['t'], x['q'], c='k', marker='.')
ax.scatter(y['t'], y['q'] + vertical_mov, c='b', s=160, marker='.')
b = ax.get_position()
ax.legend(ncol=2, handles = legend_elements, fontsize=40, frameon=False, loc='lower left', bbox_to_anchor=(0,1.05))
for i in range(0, len(rising_edge_grps)):
refer_st = int(rising_edge_grps.iloc[i]['refer_st'])
refer_ed = int(rising_edge_grps.iloc[i]['refer_ed'])
query_st = int(rising_edge_grps.iloc[i]['query_st'])
query_ed = int(rising_edge_grps.iloc[i]['query_ed'])
ax.plot(x['t'].loc[refer_st:refer_ed], x['q'].loc[refer_st:refer_ed], color='r', linewidth=5)
ax.plot(y['t'].loc[query_st:query_ed], y['q'].loc[query_st:query_ed] + vertical_mov, color='r', linewidth=5)
for i in range(0, len(down_edge_grps)):
refer_st = int(down_edge_grps.iloc[i]['refer_st'])
refer_ed = int(down_edge_grps.iloc[i]['refer_ed'])
query_st = int(down_edge_grps.iloc[i]['query_st'])
query_ed = int(down_edge_grps.iloc[i]['query_ed'])
ax.plot(x['t'].loc[refer_st:refer_ed], x['q'].loc[refer_st:refer_ed], color='cyan', linewidth=5)
ax.plot(y['t'].loc[query_st:query_ed], y['q'].loc[query_st:query_ed] + vertical_mov, color='cyan', linewidth=5)
ax.set_title(title, fontsize='30')
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xticks([])
ax.set_yticks([])
plt.show()
```
#### File: Signal-Alignment/parameter_cal/parameter_cal_with_dtw.py
```python
import pandas as pd
import math
import linecache
import numpy as np
from parameter_cal import cf
from parameter_cal.utils import get_k_accuracy_same, get_W, get_SS2, get_fact_align, get_reverse_dict, get_true_align, get_link_graph, load_data
from parameter_cal.utils import plot_warped_signals, cal_warped_signals
from dtw import dtw
from scipy.misc import *
import matplotlib.pyplot as plt
def norm(x, y):
return math.fabs(x[1] - y[1])
y_list = load_data(True)
query, reference = cal_warped_signals(y_list)
# plot warped signal
xvals, yinterp = plot_warped_signals(reference, query)
# store the corresponding point pair
query.drop('shift', axis=1)
query.drop('t', axis=1)
query2 = pd.DataFrame(yinterp)
query2['aligned_index'] = 0
query2['t'] = query['t']
query2.columns = ['q', 'aligned_index', 't']
query2.loc[len(query2) - 1, 'aligned_index'] = len(query) - 1
for i in range(len(query2) - 1):
for j in range(len(query['t2']) - 1):
if query['t2'][j] <= query2['t'][i] < query['t2'][j + 1]:
if abs(query2['q'][i] - query['q'][j]) < abs(query2['q'][i] - query['q'][j + 1]):
query2.loc[i, 'aligned_index'] = j
else:
query2.loc[i, 'aligned_index'] = j + 1
d, cost_matrix, acc_cost_matrix, path = dtw(reference[['t', 'q']].values, query2[['t', 'q']].values, dist=norm)
get_link_graph(reference, query2, path, -3)
true_align_dict = get_true_align(query2)
fact_dict = get_fact_align(path)
reverse_dict = get_reverse_dict(path)
print("error rate of dtw is " + str(get_k_accuracy_same(true_align_dict, fact_dict, reference)))
print("W of dtw is " + str(get_W(path)))
print("SS2 of dtw is " + str(get_SS2(fact_dict, reverse_dict, 1)))
```
#### File: Signal-Alignment/sdtw/test.py
```python
import pandas as pd
import math
import linecache
import numpy as np
from scipy import stats
from parameter_cal import cf
from dtw import dtw
from scipy.misc import *
from sdtw.config import sub_len, nBlocks
from sdtw.utils import cal_descriptor, samplingSequences, norm, get_link_graph
from parameter_cal.utils import get_fact_align, get_reverse_dict, get_SS2, get_SS1
from parameter_cal.cf import ds_time
from downsample.utils import get_true_aligned, get_group_number, get_k_accuracy
import matplotlib.pyplot as plt
# when x = 0, sigmoid's derivative value is 1/4a
def sigmoid0(x):
return (4 * cf.warp_width) / (1 + math.exp(-x / cf.warp_width))
def gaussian_bump(x, a=1):
return math.exp(1 / (pow((x / a), 2) - 1))
sigmoid = np.vectorize(sigmoid0)
# generate warped signal
y = linecache.getline('data/Beef_TRAIN', 1)
y_list = y.split(',')
# delete the index
y_list.pop(0)
y_list = [float(item) for item in y_list]
reference = pd.DataFrame(y_list)
reference['t'] = [i for i in range(0, len(reference))]
reference.columns = ['q', 't']
anchor_index = 220
anchor_shift = 10
reference['shift'] = [derivative(sigmoid, math.fabs(anchor_index - i), dx=1e-6) * anchor_shift for i in reference['t']]
query = pd.DataFrame(reference)
query.columns = ['q', 't', 'shift']
query['t2'] = 0.1
temp = []
for i, j in zip(query['t'].values, query['shift'].values):
temp.append(i - j)
query['t2'] = temp
# add gaussian bump
range_of_gaussian = 40
height_of_gaussian = 1.2
temp = query[(query['t'] < anchor_index + 40) & (query['t'] > anchor_index - 40)].index
for i in temp:
query.loc[i, 'q'] = query.loc[i, 'q'] + height_of_gaussian * gaussian_bump(i - anchor_index, range_of_gaussian)
# plot warped signal
_, ax = plt.subplots(1, 1, figsize=(20, 10))
ax.scatter(x=query['t'], y=reference['q'], c='b', marker='.', label='before warp')
ax.scatter(x=query['t2'], y=query['q'], c='r', marker='.', label='after warp')
xvals = np.linspace(0, len(query['t']) - 1, math.floor(len(query['t']) / cf.ds_time))
x = query['t2']
y = query['q']
yinterp = np.array(np.interp(xvals, x, y))
xvals = np.array(xvals)
ax.scatter(x=xvals, y=yinterp, marker='.', c='g', label='after interp')
ax.legend(fontsize='30')
# normalize the signal
reference_norm = stats.zscore(reference['q'])
yinterp_norm = stats.zscore(yinterp)
# store the corresponding point pair
query.drop('shift', axis=1)
query.drop('t', axis=1)
query2 = pd.DataFrame({'t': xvals, 'q2': yinterp})
query2['close_index'] = 0
true_align_dict = get_true_aligned(cf.ds_time, query, query2)
group_num_dict = get_group_number(true_align_dict, query)
query2.loc[len(query2) - 1, 'close_index'] = len(query) - 1
for i in range(len(query2) - 1):
for j in range(len(query['t2']) - 1):
if query['t2'][j] <= query2['t'][i] < query['t2'][j + 1]:
if abs(query2['q2'][i] - query['q'][j]) < abs(query2['q2'][i] - query['q'][j + 1]):
query2.loc[i, 'close_index'] = j
else:
query2.loc[i, 'close_index'] = j + 1
if sub_len % 2 == 0:
raise Exception("Sub_len must be odd number!")
refer_subsequences = samplingSequences(reference_norm, sub_len)
query_subsequences = samplingSequences(yinterp_norm, int(sub_len/cf.ds_time))
refer_descriptors = np.zeros((len(refer_subsequences), nBlocks * 8))
query_descriptors = np.zeros((len(query_subsequences), nBlocks * 8))
refer_nsubsequences = len(refer_subsequences)
query_nsubsequences = len(query_subsequences)
for i in range(refer_nsubsequences):
sub_seq = refer_subsequences[i]
refer_descriptors[i] = cal_descriptor(sub_seq, sub_len)
for i in range(query_nsubsequences):
sub_seq = query_subsequences[i]
query_descriptors[i] = cal_descriptor(sub_seq, int(sub_len/cf.ds_time))
d, cost_matrix, acc_cost_matrix, path = dtw(refer_descriptors, query_descriptors, dist=norm)
query2.columns = ['t2', 'q', 'close_index'] # adapt to the get_link_graph
get_link_graph(reference, query2, path, -3, 'downsampled shapedtw')
fact_align_dict = get_fact_align(path)
reverse_dict = get_reverse_dict(path)
print("error rate of shapedtw is " + str(get_k_accuracy(true_align_dict, fact_align_dict, group_num_dict)))
print("SS1 of shapedtw is " + str(get_SS1(path, cf.ds_time)))
print("SS2 of shapedtw is " + str(get_SS2(fact_align_dict, reverse_dict, ds_time)))
```
|
{
"source": "Jessime/gcp-variant-transforms",
"score": 2
}
|
#### File: gcp-variant-transforms/gcp_variant_transforms/vcf_to_bq_common_test.py
```python
import collections
import unittest
from apache_beam.io.filesystems import FileSystems
import mock
from gcp_variant_transforms import vcf_to_bq_common
from gcp_variant_transforms.vcf_to_bq_common import PipelineModes
class DataProcessorTest(unittest.TestCase):
"""Tests cases for the ``general_process`` script."""
def _create_mock_args(self, **args):
return collections.namedtuple('MockArgs', args.keys())(*args.values())
def _get_pipeline_mode(self, args):
return vcf_to_bq_common.get_pipeline_mode(args.input_pattern,
args.optimize_for_large_inputs)
def test_get_mode_raises_error_for_no_match(self):
args = self._create_mock_args(
input_pattern='', optimize_for_large_inputs=False)
with mock.patch.object(FileSystems, 'match', return_value=None), \
self.assertRaises(ValueError):
self._get_pipeline_mode(args)
def test_get_mode_optimize_set(self):
args = self._create_mock_args(
input_pattern='', optimize_for_large_inputs=True)
self.assertEqual(self._get_pipeline_mode(args), PipelineModes.LARGE)
def test_get_mode_small(self):
args = self._create_mock_args(
input_pattern='', optimize_for_large_inputs=False)
match_result = collections.namedtuple('MatchResult', ['metadata_list'])
match = match_result([None for _ in range(100)])
with mock.patch.object(FileSystems, 'match', return_value=[match]):
self.assertEqual(self._get_pipeline_mode(args), PipelineModes.SMALL)
def test_get_mode_medium(self):
args = self._create_mock_args(
input_pattern='', optimize_for_large_inputs=False)
match_result = collections.namedtuple('MatchResult', ['metadata_list'])
match = match_result(range(101))
with mock.patch.object(FileSystems, 'match', return_value=[match]):
self.assertEqual(self._get_pipeline_mode(args), PipelineModes.MEDIUM)
match = match_result(range(50000))
with mock.patch.object(FileSystems, 'match', return_value=[match]):
self.assertEqual(self._get_pipeline_mode(args), PipelineModes.MEDIUM)
def test_get_mode_large(self):
args = self._create_mock_args(
input_pattern='', optimize_for_large_inputs=False)
match_result = collections.namedtuple('MatchResult', ['metadata_list'])
match = match_result(range(50001))
with mock.patch.object(FileSystems, 'match', return_value=[match]):
self.assertEqual(self._get_pipeline_mode(args), PipelineModes.LARGE)
def test_default_optimize_for_large_inputs(self):
args = self._create_mock_args(input_pattern='')
match_result = collections.namedtuple('MatchResult', ['metadata_list'])
match = match_result(range(101))
with mock.patch.object(FileSystems, 'match', return_value=[match]):
self.assertEqual(vcf_to_bq_common.get_pipeline_mode(args.input_pattern),
PipelineModes.MEDIUM)
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.