id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
142986 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(
cov=2.0, n_samples=200, n_features=2, n_classes=2, random_state=1
)
X2, y2 = make_gaussian_quantiles(
mean=(3, 3), cov=1.5, n_samples=300, n_features=2, n_classes=2, random_state=1
)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, -y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=1), algorithm="SAMME", n_estimators=200
)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(
np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)
)
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(
X[idx, 0],
X[idx, 1],
c=c,
cmap=plt.cm.Paired,
s=20,
edgecolor="k",
label="Class %s" % n,
)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc="upper right")
plt.xlabel("x")
plt.ylabel("y")
plt.title("Decision Boundary")
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(
twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label="Class %s" % n,
alpha=0.5,
edgecolor="k",
)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc="upper right")
plt.ylabel("Samples")
plt.xlabel("Score")
plt.title("Decision Scores")
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| StarcoderdataPython |
1667672 | <reponame>Gerryflap/master_thesis
"""
Uses the pairs defined by data/split_generation/pair_generator.py output pairs of images in the following format:
((morph_inputs, comparison_images), (id_1, id_2))
where both morph_inputs and comparison_images are tuples of images of (person_1, person_2).
"""
import random
import os
import json
import PIL.Image
from torchvision.datasets import VisionDataset
from data.celeba_cropped import CelebaCropped
from shutil import copyfile
assert os.path.isdir("data")
class CelebaCroppedPairsLookAlike(VisionDataset):
cropped_base_folder = "celeba_cropped/img_align/"
def __init__(self, split="train", transform=None, target_transform=None, download=False):
super().__init__("data", transforms=None, transform=transform, target_transform=target_transform)
if not os.path.isdir("data/celeba_cropped/"):
# Initialize the dataset if it does not yet exist
CelebaCropped(split, transform, target_transform, download)
if split not in ["valid", "test"]:
raise ValueError("This Dataset can only be used for evaluation (valid or test)!")
# Load filenames and splits
split_map = {
"train": 0,
"valid": 1,
"test": 2,
"all": None,
}
split = split_map[split]
if split == 1:
with open("data/celeba_cropped/valid_pairs.json", "r") as f:
self.pairs = json.load(f)
elif split == 3:
with open("data/celeba_cropped/better_pairs_1.json", "r") as f:
self.pairs = json.load(f)
else:
with open("data/celeba_cropped/test_pairs.json", "r") as f:
self.pairs = json.load(f)
def __getitem__(self, index):
pair = self.pairs[index]
p_1 = pair[0]
p_2 = pair[1]
morph_inputs = self.load_image(p_1["morph_in"]), self.load_image(p_2["morph_in"])
comparison_images = self.load_image(p_1["comparison_pic"]), self.load_image(p_2["comparison_pic"])
idents = p_1["ident"], p_2["ident"]
return (morph_inputs, comparison_images), idents
def load_image(self, name):
X = PIL.Image.open(os.path.join(self.root, self.cropped_base_folder, name))
if self.transform is not None:
X = self.transform(X)
return X
def __len__(self):
return len(self.pairs)
if __name__ == "__main__":
ds = CelebaCroppedPairsLookAlike(download=True)
| StarcoderdataPython |
55166 | <filename>dashboard/__init__.py
from flask import Flask
app = Flask(__name__)
app.config['SECRET_KEY'] = 'fac5f35dc32ba113029dad3eb831d7d1'
from dashboard import routes | StarcoderdataPython |
150404 | <filename>data_sets/synthetic_review_prediction/article_0/__init__.py<gh_stars>1-10
from .configure import DATASET_NAME, create_data_set_properties
from .generate import run as _run
def run(client):
print(DATASET_NAME)
return _run(client, create_data_set_properties())
| StarcoderdataPython |
3252816 | <filename>client/ui.py
# Interface for registration
# Written by: <NAME>, <NAME>, <NAME>, and <NAME>
# Written for: Instant messenger project
# Created on: 4/10/2016
from client import rpc
from tkinter import *
from tkinter import messagebox
from tkinter.ttk import *
import queue
# Set up a global incoming message queue.
message_queue = queue.Queue()
"""
Handles commands pushed from the server.
"""
class Handler(rpc.Handler):
"""
Puts an incoming message into the message queue.
"""
def receive_message(self, **kwargs):
message_queue.put(kwargs)
"""
The main connection and login window.
Window shown before launching the main chat window. Handles connecting to the
server, logging in, and registration.
"""
class LoginWindow:
default_address = "0.0.0.0"
default_port = 6543
def __init__(self, window):
self.window = window
self.frame = None
self.proxy = None
self.username = StringVar()
self.password = StringVar()
window.protocol("WM_DELETE_WINDOW", self.close)
window.resizable(width=FALSE, height=FALSE)
window.rowconfigure(0, weight=1)
window.columnconfigure(0, weight=1)
self.show_connect()
"""
Shows the server connection form.
"""
def show_connect(self):
if self.frame:
self.frame.destroy()
self.window.title("Connect to server")
self.frame = Frame(self.window)
self.frame.grid(sticky=N+S+E+W, padx=10, pady=10)
Label(self.frame, text="Connect to a server", style="Title.TLabel").grid(columnspan=2, padx=10, pady=10)
Label(self.frame, text="Address").grid(row=1, column=0, sticky=E)
self.address_entry = Entry(self.frame)
self.address_entry.insert(END, LoginWindow.default_address)
self.address_entry.grid(row=1, column=1, padx=10, pady=10)
Label(self.frame, text="Port").grid(row=2, column=0, sticky=E)
self.port_entry = Entry(self.frame)
self.port_entry.insert(END, str(LoginWindow.default_port))
self.port_entry.grid(row=2, column=1, padx=10, pady=10)
button_frame = Frame(self.frame)
button_frame.grid(row=3, column=0, columnspan=2)
Button(button_frame, text="Connect", command=self.on_connect).grid(row=0, column=0, padx=10, pady=10)
Button(button_frame, text="Close", command=self.close).grid(row=0, column=1, padx=10, pady=10)
# Keyboard navigation.
self.address_entry.focus_set()
self.address_entry.bind("<Return>", lambda e: self.port_entry.focus_set())
self.port_entry.bind("<Return>", lambda e: self.on_connect())
"""
Shows the login form.
"""
def show_login(self):
if self.frame:
self.frame.destroy()
self.window.title("Login")
self.frame = Frame(self.window)
self.frame.grid(sticky=N+S+E+W, padx=10, pady=10)
Label(self.frame, text="Log in", style="Title.TLabel").grid(columnspan=2, padx=10, pady=10)
Label(self.frame, text="Username").grid(row=1, column=0, sticky=E)
self.username_entry = Entry(self.frame, textvariable=self.username)
self.username_entry.grid(row=1, column=1, padx=10, pady=10)
Label(self.frame, text="Password").grid(row=2, column=0, sticky=E)
self.password_entry = Entry(self.frame, textvariable=self.password, show="•")
self.password_entry.grid(row=2, column=1, padx=10, pady=10)
button_frame = Frame(self.frame)
button_frame.grid(row=3, column=0, columnspan=2)
Button(button_frame, text="Login", command=self.on_login).grid(row=0, column=0, padx=10, pady=10)
Button(button_frame, text="Sign Up", command=self.show_register).grid(row=0, column=1, padx=10, pady=10)
# Keyboard navigation.
self.username_entry.focus_set()
self.username_entry.bind("<Return>", lambda e: self.password_entry.focus_set())
self.password_entry.bind("<Return>", lambda e: self.on_login())
"""
Shows the registration form.
"""
def show_register(self):
if self.frame:
self.frame.destroy()
self.window.title("Register")
self.frame = Frame(self.window)
self.frame.grid(sticky=N+S+E+W, padx=10, pady=10)
# Have user create username
name = Label(self.frame, text="Username")
name.grid(row=0, sticky=E)
self.username_entry = Entry(self.frame, textvariable=self.username)
self.username_entry.grid(row=0,column=1, padx=10, pady=10)
# Have user enter password
password = Label(self.frame, text="Password")
password.grid(row=1, sticky=E)
self.password_entry = Entry(self.frame, textvariable=self.password, show="•")
self.password_entry.grid(row=1, column=1, padx=10, pady=10)
# Have user retype Password
repassword = Label(self.frame, text="Retype password")
repassword.grid(row=2, sticky=E)
self.repassword_entry = Entry(self.frame, show="•")
self.repassword_entry.grid(row=2, column=1, padx=10, pady=10)
# Have user enter first name
firstname = Label(self.frame, text="First name")
firstname.grid(row=3, sticky=E)
self.first_name_entry = Entry(self.frame)
self.first_name_entry.grid(row=3, column=1, padx=10, pady=10)
# Have user enter last name
lastname = Label(self.frame, text="Last name")
lastname.grid(row=4, sticky=E)
self.last_name_entry = Entry(self.frame)
self.last_name_entry.grid(row=4, column=1, padx=10, pady=10)
# Have user enter email
email = Label(self.frame, text="Email address")
email.grid(row=5, sticky=E)
self.email_entry = Entry(self.frame)
self.email_entry.grid(row=5, column=1, padx=10, pady=10)
# Have user enter address
address = Label(self.frame, text="Street address")
address.grid(row=6, sticky=E)
self.address_entry = Entry(self.frame)
self.address_entry.grid(row=6, column=1, padx=10, pady=10)
# Submit register information button that will send information to server
button_frame = Frame(self.frame)
button_frame.grid(row=7, column=0, columnspan=2)
Button(button_frame, text="Sign Up", command=self.on_register).grid(row=0, column=0, padx=10, pady=10)
Button(button_frame, text="Cancel", command=self.show_login).grid(row=0, column=1, padx=10, pady=10)
def on_connect(self):
address = self.address_entry.get()
port = int(self.port_entry.get())
try:
self.proxy = rpc.connect(address, port, Handler)
self.show_login()
except Exception as e:
messagebox.showerror("", "Could not connect to server.\n\nError: " + str(e))
def on_login(self):
username = self.username_entry.get()
password = self.password_entry.get()
try:
token = self.proxy.login(username=username, password=password)
# Open the chat window
self.frame.destroy()
ChatWindow(self.window, self.proxy, token)
except rpc.RpcException as e:
messagebox.showerror("", "Log in failed.\n\nError: " + str(e))
def on_register(self):
if self.repassword_entry.get() != self.password_entry.get():
messagebox.showerror("", "Password must match in both entries")
return
try:
self.proxy.create_user(
username = self.username_entry.get(),
password = self.password_entry.get(),
first_name = self.first_name_entry.get(),
last_name = self.last_name_entry.get(),
email = self.email_entry.get(),
address = self.address_entry.get()
)
messagebox.showinfo("", "Account created successfully!")
# Go back to login
self.show_login()
except rpc.RpcException as e:
messagebox.showerror("", "Registration failed.\n\nError: " + str(e))
def center(self):
self.window.update_idletasks()
w = self.window.winfo_screenwidth()
h = self.window.winfo_screenheight()
size = tuple(int(_) for _ in self.window.geometry().split('+')[0].split('x'))
x = w/2 - size[0]/2
y = h/2 - size[1]/2
self.window.geometry("+%d+%d" % (x, y))
def close(self):
if self.proxy:
self.proxy.close()
self.window.destroy()
"""
Main application window.
"""
class ChatWindow:
def __init__(self, window, proxy, token):
self.window = window
self.proxy = proxy
self.token = token
self.dest_username = None
self.dest_group = None
window.protocol("WM_DELETE_WINDOW", self.close)
window.minsize(width=200, height=200)
window.geometry("800x600")
window.resizable(width=TRUE, height=TRUE)
window.title("Instant Messenger")
window.rowconfigure(0, weight=1)
window.columnconfigure(0, weight=0)
window.columnconfigure(1, weight=1)
window.columnconfigure(2, weight=0)
self.group_frame = Frame(window)
self.group_frame.grid(row=0, column=0, sticky=N+S+E+W, padx=10, pady=10)
self.message_frame = Frame(window)
self.message_frame.grid(row=0, column=1, sticky=N+S+E+W, pady=10)
self.friends_frame = Frame(window)
self.friends_frame.grid(row=0, column=2, sticky=N+S+E+W, padx=10, pady=10)
# Groups frame.
Label(self.group_frame, text="Groups").grid(pady=(0,10))
self.group_frame.rowconfigure(1, weight=1)
self.group_list = None
Button(self.group_frame, text="Add user", command=self.on_add_group_user).grid(row=2)
Button(self.group_frame, text="Remove user", command=self.on_remove_group_user).grid(row=3)
Button(self.group_frame, text="New group", command=self.on_create_group).grid(row=4)
# Friends frame.
Label(self.friends_frame, text="Friends").grid(pady=(0,10))
self.friends_frame.rowconfigure(1, weight=1)
self.friends_list = None
Button(self.friends_frame, text="Add friend", command=self.on_add_friend).grid(row=2)
# Set up the chat log frame.
self.message_frame.rowconfigure(1, weight=1)
self.message_frame.columnconfigure(0, weight=1)
self.message_title = Label(self.message_frame)
self.message_title.grid(row=0, column=0, columnspan=2, pady=(0,10), sticky=N+S+E+W)
self.message_list = Listbox(self.message_frame)
self.message_list.grid(row=1, column=0, sticky=N+S+E+W)
self.message_scrollbar = Scrollbar(self.message_frame)
self.message_scrollbar.grid(row=1, column=1, sticky=N+S+E+W)
self.message_scrollbar.config(command=self.message_list.yview)
self.message_list.config(yscrollcommand=self.message_scrollbar.set)
# Set up the message input.
self.chat_entry = Entry(self.message_frame)
self.chat_entry.bind("<Return>", self.on_send_message)
self.chat_entry.grid(row=2, column=0, columnspan=2, sticky=N+S+E+W, pady=(5, 0), ipady=5)
self.chat_entry.focus_set()
# Show remote data.
self.refresh_groups_list()
self.refresh_friends_list()
# Schedule the incoming message callback.
self.window.after(100, self.check_message_queue)
"""
Refreshes the list of groups from the server.
"""
def refresh_groups_list(self):
groups = self.proxy.get_groups(token=self.token)
if self.group_list:
self.group_list.destroy()
self.group_list = Frame(self.group_frame)
for i, id in enumerate(groups):
group = self.proxy.get_group(token=self.token, id=id)
label = Button(self.group_list, text=group["name"], command=lambda g=id: self.choose_group(g))
label.grid(row=i, sticky=E+W)
self.group_list.grid(row=1, sticky=N+E+W)
"""
Refreshes the list of friends from the server.
"""
def refresh_friends_list(self):
friends = self.proxy.get_friends(token=self.token)
if self.friends_list:
self.friends_list.destroy()
self.friends_list = Frame(self.friends_frame)
for i, username in enumerate(friends):
label = Button(self.friends_list, text=username, command=lambda u=username: self.choose_user(u))
label.grid(row=i, sticky=E+W)
self.friends_list.grid(row=1, sticky=N+E+W)
"""
Displays the existing messages for the current room.
"""
def refresh_message_list(self):
# Remove messages already in the pane.
self.message_list.delete(0, END)
# If we are talking to a user,
if self.dest_username:
messages = self.proxy.get_messages_with_user(token=self.token, username=self.dest_username)
# If we are in a group
elif self.dest_group:
messages = self.proxy.get_messages_in_group(token=self.token, group=self.dest_group)
else:
return
for message in messages:
self.display_message(message)
"""
Sets the message destination to a user.
"""
def choose_user(self, username):
self.dest_group = None
self.dest_username = username
self.message_title.config(text="User: " + username)
self.refresh_message_list()
"""
Sets the message destination to a group.
"""
def choose_group(self, group_id):
self.dest_username = None
self.dest_group = group_id
group = self.proxy.get_group(token=self.token, id=group_id)
self.message_title.config(text="Group: " + group["name"])
self.refresh_message_list()
"""
Displays a message in the chat history.
"""
def display_message(self, message):
self.message_list.insert(END, message["sender"] + ": " + message["text"])
"""
Shows a dialog for adding a user to a group.
"""
def on_add_group_user(self):
if self.dest_group:
username = PromptWindow.prompt(self.window, "Type in a username")
self.proxy.add_group_user(token=self.token, group=self.dest_group, username=username)
self.refresh_groups_list()
self.choose_group(self.dest_group)
"""
Shows a dialog for removing a user from a group.
"""
def on_remove_group_user(self):
if self.dest_group:
username = PromptWindow.prompt(self.window, "Type in a username")
self.proxy.remove_group_user(token=self.token, group=self.dest_group, username=username)
self.refresh_groups_list()
self.choose_group(self.dest_group)
"""
Shows a dialog for creating a group.
"""
def on_create_group(self):
group_id = self.proxy.create_group(token=self.token)
self.refresh_groups_list()
self.choose_group(group_id)
"""
Shows a dialog for adding a friend.
"""
def on_add_friend(self):
username = PromptWindow.prompt(self.window, "Type in a username")
self.proxy.add_friend(token=self.token, username=username)
self.refresh_friends_list()
"""
Handles the event for sending a message.
"""
def on_send_message(self, event):
text = self.chat_entry.get()
# Slash commands are evaluated as Python code...
if text[0] == "/":
exec(text[1:])
# If we are talking to a user,
elif self.dest_username:
self.proxy.send_message(
token=self.token,
receiver={
"type": "user",
"username": self.dest_username,
},
text=text
)
# If we are in a group
elif self.dest_group:
self.proxy.send_message(
token=self.token,
receiver={
"type": "group",
"id": self.dest_group,
},
text=text
)
# Clear the message entry.
self.chat_entry.delete(0, END)
"""
Callback that runs periodically to display incoming messages in real-time.
"""
def check_message_queue(self):
while True:
try:
message = message_queue.get(False)
self.display_message(message)
except queue.Empty:
break
# Schedule again.
self.window.after(100, self.check_message_queue)
def close(self):
try:
self.proxy.logout(token=self.token)
self.proxy.close()
finally:
self.window.destroy()
"""
Convenience class for creating "prompt" dialog boxes.
"""
class PromptWindow:
def prompt(root, title):
window = PromptWindow(root, title)
root.wait_window(window.window)
return window.result
def __init__(self, root, title):
self.window = Toplevel(root)
self.window.resizable(width=FALSE, height=FALSE)
self.window.title(title)
self.label = Label(self.window, text=title)
self.label.grid(padx=10, pady=10)
self.entry = Entry(self.window)
self.entry.bind("<Return>", lambda e: self.submit())
self.entry.grid(row=1, padx=10)
self.entry.focus_set()
self.button = Button(self.window, text="OK", command=self.submit)
self.button.grid(row=2, padx=10, pady=10)
def submit(self):
self.result = self.entry.get()
self.window.destroy()
def run():
# Set up root window.
root = Tk()
# Make tkinter less ugly.
style = Style()
if "vista" in style.theme_names():
style.theme_use("vista")
elif "aqua" in style.theme_names():
style.theme_use("aqua")
else:
style.theme_use("clam")
style.configure("Title.TLabel", font=("Helvetica", 16))
# Show window.
window = LoginWindow(root)
window.center()
root.mainloop()
| StarcoderdataPython |
3349246 | from . import db
| StarcoderdataPython |
1772025 | <filename>pyfileconf/plugin/impl.py
import pluggy
hookimpl = pluggy.HookimplMarker('pyfileconf') | StarcoderdataPython |
3305099 | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.ops.argmin import ArgMinOp
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor
class ArgMinFrontExtractor(FrontExtractorOp):
op = 'ArgMin'
enabled = True
@classmethod
def extract(cls, node):
attrs = {
'top_k': 1,
'axis': None,
'keepdims': 0,
'remove_values_output': True,
'output_type': tf_dtype_extractor(node.pb.attr['output_type'].type, np.int64)
}
ArgMinOp.update_node_stat(node, attrs)
return cls.enabled
| StarcoderdataPython |
1774223 | <reponame>Luke-Ludwig/DRAGONS<filename>geminidr/gmos/tests/spect/test_determine_distortion.py
#!/usr/bin/env python
"""
Tests related to GMOS Long-slit Spectroscopy Arc primitives.
Notes
-----
- The `indirect` argument on `@pytest.mark.parametrize` fixture forces the
`ad` and `ad_ref` fixtures to be called and the AstroData object returned.
"""
import numpy as np
import os
import pytest
from matplotlib import colors
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy import ndimage
import astrodata
import geminidr
from astropy.modeling import models
from geminidr.gmos import primitives_gmos_spect
from gempy.library import astromodels, transform
from gempy.utils import logutils
from recipe_system.testing import ref_ad_factory
# Test parameters --------------------------------------------------------------
fixed_parameters_for_determine_distortion = {
"fwidth": None,
"id_only": False,
"max_missed": 5,
"max_shift": 0.05,
"min_snr": 5.,
"nsum": 10,
"spatial_order": 3,
"spectral_order": 4,
}
# Each test input filename contains the original input filename with
# "_mosaic" suffix
datasets = [
# Process Arcs: GMOS-N ---
"N20100115S0346_mosaic.fits", # B600:0.500 EEV
# "N20130112S0390_mosaic.fits", # B600:0.500 E2V
# "N20170609S0173_mosaic.fits", # B600:0.500 HAM
# "N20170403S0452_mosaic.fits", # B600:0.590 HAM Full Frame 1x1
# "N20170415S0255_mosaic.fits", # B600:0.590 HAM Central Spectrum 1x1
# "N20171016S0010_mosaic.fits", # B600:0.500 HAM, ROI="Central Spectrum", bin=1x2
# "N20171016S0127_mosaic.fits", # B600:0.500 HAM, ROI="Full Frame", bin=1x2
# "N20100307S0236_mosaic.fits", # B1200:0.445 EEV
# "N20130628S0290_mosaic.fits", # B1200:0.420 E2V
# "N20170904S0078_mosaic.fits", # B1200:0.440 HAM
# "N20170627S0116_mosaic.fits", # B1200:0.520 HAM
# "N20100830S0594_mosaic.fits", # R150:0.500 EEV
# "N20100702S0321_mosaic.fits", # R150:0.700 EEV
# "N20130606S0291_mosaic.fits", # R150:0.550 E2V
# "N20130112S0574_mosaic.fits", # R150:0.700 E2V
# "N20130809S0337_mosaic.fits", # R150:0.700 E2V
# "N20140408S0218_mosaic.fits", # R150:0.700 E2V
# "N20180119S0232_mosaic.fits", # R150:0.520 HAM
# "N20180516S0214_mosaic.fits", # R150:0.610 HAM ROI="Central Spectrum", bin=2x2
# "N20171007S0439_mosaic.fits", # R150:0.650 HAM
# "N20171007S0441_mosaic.fits", # R150:0.650 HAM
# "N20101212S0213_mosaic.fits", # R400:0.550 EEV
# "N20100202S0214_mosaic.fits", # R400:0.700 EEV
# "N20130106S0194_mosaic.fits", # R400:0.500 E2V
# "N20130422S0217_mosaic.fits", # R400:0.700 E2V
# "N20170108S0210_mosaic.fits", # R400:0.660 HAM
# "N20171113S0135_mosaic.fits", # R400:0.750 HAM
# "N20100427S1276_mosaic.fits", # R600:0.675 EEV
# "N20180120S0417_mosaic.fits", # R600:0.860 HAM
# "N20100212S0143_mosaic.fits", # R831:0.450 EEV
# "N20100720S0247_mosaic.fits", # R831:0.850 EEV
# "N20130808S0490_mosaic.fits", # R831:0.571 E2V
# "N20130830S0291_mosaic.fits", # R831:0.845 E2V
# "N20170910S0009_mosaic.fits", # R831:0.653 HAM
# "N20170509S0682_mosaic.fits", # R831:0.750 HAM
# "N20181114S0512_mosaic.fits", # R831:0.865 HAM
# "N20170416S0058_mosaic.fits", # R831:0.865 HAM
# "N20170416S0081_mosaic.fits", # R831:0.865 HAM
# "N20180120S0315_mosaic.fits", # R831:0.865 HAM
# # Process Arcs: GMOS-S ---
# # "S20130218S0126_mosaic.fits", # B600:0.500 EEV - todo: won't pass
# "S20130111S0278_mosaic.fits", # B600:0.520 EEV
# "S20130114S0120_mosaic.fits", # B600:0.500 EEV
# "S20130216S0243_mosaic.fits", # B600:0.480 EEV
# "S20130608S0182_mosaic.fits", # B600:0.500 EEV
# "S20131105S0105_mosaic.fits", # B600:0.500 EEV
# "S20140504S0008_mosaic.fits", # B600:0.500 EEV
# "S20170103S0152_mosaic.fits", # B600:0.600 HAM
# "S20170108S0085_mosaic.fits", # B600:0.500 HAM
# "S20130510S0103_mosaic.fits", # B1200:0.450 EEV
# "S20130629S0002_mosaic.fits", # B1200:0.525 EEV
# "S20131123S0044_mosaic.fits", # B1200:0.595 EEV
# "S20170116S0189_mosaic.fits", # B1200:0.440 HAM
# "S20170103S0149_mosaic.fits", # B1200:0.440 HAM
# "S20170730S0155_mosaic.fits", # B1200:0.440 HAM
# "S20171219S0117_mosaic.fits", # B1200:0.440 HAM
# "S20170908S0189_mosaic.fits", # B1200:0.550 HAM
# "S20131230S0153_mosaic.fits", # R150:0.550 EEV
# "S20130801S0140_mosaic.fits", # R150:0.700 EEV
# "S20170430S0060_mosaic.fits", # R150:0.717 HAM
# # "S20170430S0063_mosaic.fits", # R150:0.727 HAM - todo: won't pass
# "S20171102S0051_mosaic.fits", # R150:0.950 HAM
# "S20130114S0100_mosaic.fits", # R400:0.620 EEV
# "S20130217S0073_mosaic.fits", # R400:0.800 EEV
# "S20170108S0046_mosaic.fits", # R400:0.550 HAM
# "S20170129S0125_mosaic.fits", # R400:0.685 HAM
# "S20170703S0199_mosaic.fits", # R400:0.800 HAM
# "S20170718S0420_mosaic.fits", # R400:0.910 HAM
# # "S20100306S0460_mosaic.fits", # R600:0.675 EEV - todo: won't pass
# # "S20101218S0139_mosaic.fits", # R600:0.675 EEV - todo: won't pass
# "S20110306S0294_mosaic.fits", # R600:0.675 EEV
# "S20110720S0236_mosaic.fits", # R600:0.675 EEV
# "S20101221S0090_mosaic.fits", # R600:0.690 EEV
# "S20120322S0122_mosaic.fits", # R600:0.900 EEV
# "S20130803S0011_mosaic.fits", # R831:0.576 EEV
# "S20130414S0040_mosaic.fits", # R831:0.845 EEV
# "S20170214S0059_mosaic.fits", # R831:0.440 HAM
# "S20170703S0204_mosaic.fits", # R831:0.600 HAM
# "S20171018S0048_mosaic.fits", # R831:0.865 HAM
]
# Tests Definitions ------------------------------------------------------------
@pytest.mark.gmosls
@pytest.mark.preprocessed_data
@pytest.mark.regression
@pytest.mark.parametrize("ad", datasets, indirect=True)
def test_regression_for_determine_distortion_using_models_coefficients(
ad, change_working_dir, ref_ad_factory, request):
"""
Runs the `determineDistortion` primitive on a preprocessed data and compare
its model with the one in the reference file.
Parameters
----------
ad : pytest.fixture (AstroData)
Fixture that reads the filename and loads as an AstroData object.
change_working_dir : pytest.fixture
Fixture that changes the working directory
(see :mod:`astrodata.testing`).
reference_ad : pytest.fixture
Fixture that contains a function used to load the reference AstroData
object (see :mod:`recipe_system.testing`).
request : pytest.fixture
PyTest built-in containing command line options.
"""
with change_working_dir():
logutils.config(file_name='log_model_{:s}.txt'.format(ad.data_label()))
p = primitives_gmos_spect.GMOSSpect([ad])
p.viewer = geminidr.dormantViewer(p, None)
p.determineDistortion(**fixed_parameters_for_determine_distortion)
distortion_determined_ad = p.writeOutputs().pop()
ref_ad = ref_ad_factory(distortion_determined_ad.filename)
for ext, ext_ref in zip(distortion_determined_ad, ref_ad):
c = np.ma.masked_invalid(ext.FITCOORD["coefficients"])
c_ref = np.ma.masked_invalid(ext_ref.FITCOORD["coefficients"])
np.testing.assert_allclose(c, c_ref, atol=2)
if request.config.getoption("--do-plots"):
do_plots(distortion_determined_ad, ref_ad)
@pytest.mark.gmosls
@pytest.mark.preprocessed_data
@pytest.mark.regression
@pytest.mark.parametrize("ad", datasets, indirect=True)
def test_regression_for_determine_distortion_using_fitcoord_table(
ad, change_working_dir, ref_ad_factory):
"""
Runs the `determineDistortion` primitive on a preprocessed data and compare
its model with the one in the reference file. The distortion model needs to
be reconstructed because different coefficients might return same results.
Parameters
----------
ad : pytest.fixture (AstroData)
Fixture that reads the filename and loads as an AstroData object.
change_working_dir : pytest.fixture
Fixture that changes the working directory
(see :mod:`astrodata.testing`).
reference_ad : pytest.fixture
Fixture that contains a function used to load the reference AstroData
object (see :mod:`recipe_system.testing`).
"""
with change_working_dir():
logutils.config(file_name='log_fitcoord_{:s}.txt'.format(ad.data_label()))
p = primitives_gmos_spect.GMOSSpect([ad])
p.viewer = geminidr.dormantViewer(p, None)
p.determineDistortion(**fixed_parameters_for_determine_distortion)
distortion_determined_ad = p.writeOutputs().pop()
ref_ad = ref_ad_factory(distortion_determined_ad.filename)
table = ad[0].FITCOORD
model_dict = dict(zip(table['name'], table['coefficients']))
model = astromodels.dict_to_chebyshev(model_dict)
ref_table = ref_ad[0].FITCOORD
ref_model_dict = dict(zip(ref_table['name'], ref_table['coefficients']))
ref_model = astromodels.dict_to_chebyshev(ref_model_dict)
X, Y = np.mgrid[:ad[0].shape[0], :ad[0].shape[1]]
np.testing.assert_allclose(model(X, Y), ref_model(X, Y), atol=1)
# Local Fixtures and Helper Functions ------------------------------------------
@pytest.fixture(scope='function')
def ad(path_to_inputs, request):
"""
Returns the pre-processed spectrum file.
Parameters
----------
path_to_inputs : pytest.fixture
Fixture defined in :mod:`astrodata.testing` with the path to the
pre-processed input file.
request : pytest.fixture
PyTest built-in fixture containing information about parent test.
Returns
-------
AstroData
Input spectrum processed up to right before the `distortionDetermine`
primitive.
"""
filename = request.param
path = os.path.join(path_to_inputs, filename)
if os.path.exists(path):
ad = astrodata.open(path)
else:
raise FileNotFoundError(path)
return ad
def do_plots(ad, ad_ref):
"""
Generate diagnostic plots.
Parameters
----------
ad : AstroData
ad_ref : AstroData
"""
n_hlines = 25
n_vlines = 25
output_dir = "./plots/geminidr/gmos/test_gmos_spect_ls_distortion_determine"
os.makedirs(output_dir, exist_ok=True)
name, _ = os.path.splitext(ad.filename)
grating = ad.disperser(pretty=True)
bin_x = ad.detector_x_bin()
bin_y = ad.detector_y_bin()
central_wavelength = ad.central_wavelength() * 1e9 # in nanometers
# -- Show distortion map ---
for ext_num, ext in enumerate(ad):
fname, _ = os.path.splitext(os.path.basename(ext.filename))
n_rows, n_cols = ext.shape
x = np.linspace(0, n_cols, n_vlines, dtype=int)
y = np.linspace(0, n_rows, n_hlines, dtype=int)
X, Y = np.meshgrid(x, y)
model = rebuild_distortion_model(ext)
U = X - model(X, Y)
V = np.zeros_like(U)
fig, ax = plt.subplots(
num="Distortion Map {:s} #{:d}".format(fname, ext_num))
vmin = U.min() if U.min() < 0 else -0.1 * U.ptp()
vmax = U.max() if U.max() > 0 else +0.1 * U.ptp()
vcen = 0
Q = ax.quiver(
X, Y, U, V, U, cmap="coolwarm",
norm=colors.DivergingNorm(vcenter=vcen, vmin=vmin, vmax=vmax))
ax.set_xlabel("X [px]")
ax.set_ylabel("Y [px]")
ax.set_title(
"Distortion Map\n{:s} #{:d}- Bin {:d}x{:d}".format(
fname, ext_num, bin_x, bin_y))
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig.colorbar(Q, extend="max", cax=cax, orientation="vertical")
cbar.set_label("Distortion [px]")
fig.tight_layout()
fig_name = os.path.join(
output_dir, "{:s}_{:d}_{:s}_{:.0f}_distMap.png".format(
fname, ext_num, grating, central_wavelength))
fig.savefig(fig_name)
del fig, ax
# -- Show distortion model difference ---
for num, (ext, ext_ref) in enumerate(zip(ad, ad_ref)):
name, _ = os.path.splitext(ext.filename)
shape = ext.shape
data = generate_fake_data(shape, ext.dispersion_axis() - 1)
model_out = remap_distortion_model(
rebuild_distortion_model(ext), ext.dispersion_axis() - 1)
model_ref = remap_distortion_model(
rebuild_distortion_model(ext_ref), ext_ref.dispersion_axis() - 1)
transform_out = transform.Transform(model_out)
transform_ref = transform.Transform(model_ref)
data_out = transform_out.apply(data, output_shape=ext.shape)
data_ref = transform_ref.apply(data, output_shape=ext.shape)
data_out = np.ma.masked_invalid(data_out)
data_ref = np.ma.masked_invalid(data_ref)
fig, ax = plt.subplots(
dpi=150, num="Distortion Comparison: {:s} #{:d}".format(name, num))
im = ax.imshow(data_ref - data_out)
ax.set_xlabel("X [px]")
ax.set_ylabel("Y [px]")
ax.set_title(
"Difference between output and reference: \n {:s} #{:d} ".format(
name, num))
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig.colorbar(im, extend="max", cax=cax, orientation="vertical")
cbar.set_label("Distortion [px]")
fig_name = os.path.join(
output_dir, "{:s}_{:d}_{:s}_{:.0f}_distDiff.png".format(
name, num, grating, central_wavelength))
fig.savefig(fig_name)
def generate_fake_data(shape, dispersion_axis, n_lines=100):
"""
Helper function that generates fake arc data.
Parameters
----------
shape : tuple of ints
Shape of the output data with (nrows, ncols)
dispersion_axis : {0, 1}
Dispersion axis along rows (0) or along columns (1)
n_lines : int
Number of random lines to be added (default: 100)
Returns
-------
:class:`~astropy.modeling.models.Model`
2D Model that can be applied to an array.
"""
np.random.seed(0)
nrows, ncols = shape
data = np.zeros((nrows, ncols))
line_positions = np.random.random_integers(0, ncols, size=n_lines)
line_intensities = 100 * np.random.random_sample(n_lines)
if dispersion_axis == 0:
data[:, line_positions] = line_intensities
data = ndimage.gaussian_filter(data, [5, 1])
else:
data[line_positions, :] = line_intensities
data = ndimage.gaussian_filter(data, [1, 5])
data = data + (np.random.random_sample(data.shape) - 0.5) * 10
return data
def rebuild_distortion_model(ext):
"""
Helper function to recover the distortion model from the coefficients stored
in the `ext.FITCOORD` attribute.
Parameters
----------
ext : astrodata extension
Input astrodata extension which contains a `.FITCOORD` with the
coefficients that can be used to reconstruct the distortion model.
Returns
-------
:class:`~astropy.modeling.models.Model`
Model that receives 2D data and return a 1D array.
"""
model = astromodels.dict_to_chebyshev(
dict(zip(ext.FITCOORD["name"], ext.FITCOORD["coefficients"]))
)
return model
def remap_distortion_model(model, dispersion_axis):
"""
Remaps the distortion model so it can return a 2D array.
Parameters
----------
model : :class:`~astropy.modeling.models.Model`
A model that receives 2D data and returns 1D data.
dispersion_axis : {0 or 1}
Define distortion model along the rows (0) or along the columns (1).
Returns
-------
:class:`~astropy.modeling.models.Model`
A model that receives and returns 2D data.
See also
--------
- https://docs.astropy.org/en/stable/modeling/compound-models.html#advanced-mappings
"""
m = models.Identity(2)
if dispersion_axis == 0:
m.inverse = models.Mapping((0, 1, 1)) | (model & models.Identity(1))
else:
m.inverse = models.Mapping((0, 0, 1)) | (models.Identity(1) & model)
return m
# -- Recipe to create pre-processed data ---------------------------------------
def create_inputs_recipe():
"""
Creates input data for tests using pre-processed standard star and its
calibration files.
The raw files will be downloaded and saved inside the path stored in the
`$DRAGONS_TEST/raw_inputs` directory. Processed files will be stored inside
a new folder called "dragons_test_inputs". The sub-directory structure
should reflect the one returned by the `path_to_inputs` fixture.
"""
import os
from astrodata.testing import download_from_archive
from geminidr.gmos.tests.spect import CREATED_INPUTS_PATH_FOR_TESTS
module_name, _ = os.path.splitext(os.path.basename(__file__))
path = os.path.join(CREATED_INPUTS_PATH_FOR_TESTS, module_name)
os.makedirs(path, exist_ok=True)
os.chdir(path)
os.makedirs("inputs/", exist_ok=True)
print('Current working directory:\n {:s}'.format(os.getcwd()))
for filename in datasets:
print('Downloading files...')
basename = filename.split("_")[0] + ".fits"
sci_path = download_from_archive(basename)
sci_ad = astrodata.open(sci_path)
data_label = sci_ad.data_label()
print('Reducing pre-processed data:')
logutils.config(file_name='log_{}.txt'.format(data_label))
p = primitives_gmos_spect.GMOSSpect([sci_ad])
p.prepare()
p.addDQ(static_bpm=None)
p.addVAR(read_noise=True)
p.overscanCorrect()
p.ADUToElectrons()
p.addVAR(poisson_noise=True)
p.mosaicDetectors()
p.makeIRAFCompatible()
os.chdir("inputs/")
processed_ad = p.writeOutputs().pop()
os.chdir("..")
print("Wrote pre-processed file to:\n"
" {:s}".format(processed_ad.filename))
if __name__ == '__main__':
import sys
if "--create-inputs" in sys.argv[1:]:
create_inputs_recipe()
else:
pytest.main()
| StarcoderdataPython |
149676 | import pathlib
from collections import namedtuple
from PIL import Image
import warnings
Image.MAX_IMAGE_PIXELS = 100000000000
class Slide:
'''
A slide object
'''
def __init__(self, path, img_requirements=None, stain_type='Unknown'):
'''
Creates a slide object with all possible data of the slide extracted
:param path:
:param img_requirements: dictionary of required svs configurations
'''
self.path = path
self.name = pathlib.Path(path).stem
self.image_type = pathlib.Path(path).suffix
self.stain_type = stain_type
i = Image.open(path)
self.width, self.height = i.width, i.height
self.image = i
Coordinate = namedtuple('Coordinate', 'x y')
self.start_coordinate = Coordinate(0, 0)
# get svs data if its an svs path
curr_slide_data = self._extract_data(path)
self.date_scanned = curr_slide_data['date_scanned']
self.time_scanned = curr_slide_data['time_scanned']
self.compression = curr_slide_data['compression']
self.mpp = curr_slide_data['mpp']
self.apparent_magnification = curr_slide_data['apparent_magnification'] # only here while in process of removal
def crop(self, coordinates):
'''
Updates internal slide properties so that we will only use a section of the slide
:param coordinates: use only a section of the slide (top_left_x, top_left_y, bot_right_x, bot_right_y)
:return:
'''
Coordinate = namedtuple('Coordinate', 'x y')
self.start_coordinate = Coordinate(coordinates[0], coordinates[1])
self.width = coordinates[2] - coordinates[0]
self.height = coordinates[3] - coordinates[1]
def get_thumbnail(self, wh_dims):
'''
:param dims: dimensions of returned thumbnail (width, height)
:return:
'''
warnings.warn('Please use thumbnail generated from heatmap for faster results')
return Image.open(self.path).resize(wh_dims)
def is_valid_img_file(self, img_requirements):
'''
Returns true if the slide is a svs that satisfies the image requirements.
Image requirements can specify None if a specific property is unrestricted
If image is a jpg, trivially returns true
:param img_requirements: dictionary of required svs configurations
:return: boolean
'''
return True
def _extract_data(self, slide_path):
'''
Extracts useful metadata from the svs
:param slide_path:
:return:
'''
return {
'date_scanned': None,
'time_scanned': None,
'compression': None,
'mpp': None,
'apparent_magnification': None
}
| StarcoderdataPython |
1660783 | from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.fields.simple import TextAreaField
from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo
from wtforms import StringField, PasswordField, SubmitField
from app.models import Category, User
def enabled_categories():
return Category.query.all()
class SignupForm(FlaskForm):
email = StringField(
render_kw={'placeholder': 'Email'},
validators=[DataRequired(), Email(check_deliverability=True)]
)
username = StringField(
render_kw={'placeholder': 'username'},
validators=[DataRequired()]
)
password = PasswordField(
render_kw={'placeholder': 'password'},
validators=[DataRequired(),
Length(min=6,message='password is too short!'),
EqualTo('confirm_password', message='Passwords must match')]
)
confirm_password = PasswordField(
render_kw={'placeholder': 'repeat password'},
validators=[DataRequired()]
)
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('the email is taken!')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('the username is taken!')
class AddPostForm(FlaskForm):
featuring_image = FileField(
render_kw={'accept': 'image/*'},
validators=[DataRequired(), FileAllowed(['jpg', 'png', 'jpeg'])]
)
title = StringField(
render_kw={'placeholder': 'Example(How to create a flask app!)'},
validators=[DataRequired()]
)
category = QuerySelectField(
query_factory=enabled_categories,
allow_blank=False,
validators=[DataRequired()]
)
tags = StringField(
render_kw={'placeholder': 'flask,python,blog'},
validators=[DataRequired()]
)
class ProfileForm(FlaskForm):
avatar = FileField(
render_kw={'accept': 'image/*'},
validators=[FileAllowed(['jpg', 'png', 'jpeg'])]
)
fname = StringField(
render_kw={'placeholder': 'name'},
validators=[DataRequired()]
)
lname = StringField(
render_kw={'placeholder': 'lastname'},
validators=[DataRequired()]
)
about = TextAreaField(
render_kw={
'placeholder': 'Tell somthig about yourself...',
'cols':'30', 'rows':'5'
},
validators=[DataRequired()]
)
class CommentForm(FlaskForm):
username = StringField(
render_kw={'placeholder': 'username'},
validators=[DataRequired()]
)
comment = TextAreaField(
render_kw={'placeholder': 'write comment','cols':'30', 'rows':'5'},
validators=[DataRequired()]
)
class EmptyForm(FlaskForm):
submit = SubmitField('Submit',render_kw={'class': 'btn btn-primary'} )
class ResetPasswordRequestForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
class ResetPasswordForm(FlaskForm):
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField(
'Repeat Password', validators=[DataRequired(), EqualTo('password')])
| StarcoderdataPython |
1750050 | import os
import time
import subprocess
import signal
from .Testcase import Testcase
def execute(case: Testcase, judge_conf: dict,str_cmd:str) -> (int, str): # (elapsed, kses)
tmp_path = judge_conf["tmp_path"]
if not os.path.exists(tmp_path):
os.system(f"mkdir {tmp_path}")
try:
in_path = judge_conf["in_path"]
out_path = judge_conf["out_path"]
err_path = judge_conf["err_path"]
mem_limit_kb = case.memory_limit * 1024
except:
return 0, "JUDGEER"
with open("Meow","w") as F:
F.write(str_cmd)
run_command = f"ulimit -v {mem_limit_kb}; {str_cmd} 0<{in_path} 1>{out_path} 2>{err_path}; exit;"
start_time = time.time()
proc = subprocess.Popen([run_command], shell=True, preexec_fn=os.setsid)
try:
proc.communicate(timeout=case.time_limit / 1000)
t = proc.returncode
except subprocess.TimeoutExpired:
t = 124 # TLE
elapsed = time.time() - start_time
if os.path.exists("/proc/" + str(proc.pid)):
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
ts = [0, 124, 134, 136, 139]
KSES = ["OK", "TIMELXC", "ABORT", "FPEXCPT", "SEGMFLT"]
try:
kses = KSES[ts.index(t)]
except ValueError:
kses = "UNSPECI"
return int(elapsed * 1000), kses
| StarcoderdataPython |
90296 | <reponame>anglebinbin/Barista-tool
'''
Created on 08.11.2016
@author: k.hartz
'''
import os
import time
import sys
# Logfile to pass through stdout
logFile = "test/logfiles/test_log_1.log"
# delay per line in sec, 1=1sec
delayPerLine = 0.01
# Seperator to seperate the logfile output from print
seperator = "------------------"
# directory where this file is contained
dir_path = os.path.dirname(os.path.realpath(__file__))
# working directory
cwd = os.getcwd()
print("Start Output:\n\n")
print("working directory:", cwd)
print("directory where this file is contained:", dir_path)
print(seperator)
f = open(logFile)
for line in f:
sys.stdout.write(line)
sys.stdout.flush()
time.sleep(delayPerLine)
print(seperator)
print("Finished log")
| StarcoderdataPython |
1616733 | <filename>samples/python/houghcircles.py
#!/usr/bin/python
'''
This example illustrates how to use cv.HoughCircles() function.
Usage:
houghcircles.py [<image_name>]
image argument defaults to board.jpg
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import sys
def main():
try:
fn = sys.argv[1]
except IndexError:
fn = 'board.jpg'
src = cv.imread(cv.samples.findFile(fn))
img = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
img = cv.medianBlur(img, 5)
cimg = src.copy() # numpy function
circles = cv.HoughCircles(img, cv.HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30)
if circles is not None: # Check if circles have been found and only then iterate over these and add them to the image
circles = np.uint16(np.around(circles))
_a, b, _c = circles.shape
for i in range(b):
cv.circle(cimg, (circles[0][i][0], circles[0][i][1]), circles[0][i][2], (0, 0, 255), 3, cv.LINE_AA)
cv.circle(cimg, (circles[0][i][0], circles[0][i][1]), 2, (0, 255, 0), 3, cv.LINE_AA) # draw center of circle
cv.imshow("detected circles", cimg)
cv.imshow("source", src)
cv.waitKey(0)
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
| StarcoderdataPython |
57203 | <gh_stars>0
class Edge:
def __init__(self, start, end):
self.start = start
self.end = end
def __str__(self):
return "<" + str(self.start) + " " + str(self.end) + ">"
| StarcoderdataPython |
128338 | <gh_stars>1-10
"""
``permute`` provides permutation tests and confidence intervals for a variety
of nonparametric testing and estimation problems, for a variety of
randomization designs.
* Stratified and unstratified tests
* Test statistics in each stratum
* Methods of combining tests across strata
* Nonparametric combinations of tests
Problems/Methods:
-----------------
1. The 2-sample problem
2. The *n*-sample problem
3. Tests for the slope in linear regression
4. Tests for quantiles
5. Tests of independence and association: runs tests, permutation association
6. Tests of exchangeability
7. Tests of symmetry: reflection, spherical
8. Permutation ANOVA
9. Goodness of fit tests
Confidence sets
---------------
1. Constant shifts
2. Proportional shifts
3. Monotone shifts
Links
-----
UC Berkeley's Statistics 240: Nonparametric and Robust Methods.
* `2015 course
website <http://www.stat.berkeley.edu/~johann/240spring15/index.html>`_
* `<NAME>'s lecture
notes <http://www.stat.berkeley.edu/~stark/Teach/S240/Notes/index.htm>`_
"Permutation Tests for Complex Data: Theory, Applications and Software"
by <NAME>, <NAME>
* `Publisher's
website <http://www.wiley.com/WileyCDA/WileyTitle/productCd-0470516410.html>`_
* `Supplementary Material (i.e., code and
data) <http://www.wiley.com/legacy/wileychi/pesarin/material.html>`_
* `NPC test code <http://static.gest.unipd.it/~salmaso/NPC_TEST.htm>`_
"Stochastic Ordering and ANOVA: Theory and Applications with R"
by <NAME>., <NAME>., <NAME>., <NAME>.
* `R code <http://static.gest.unipd.it/~salmaso/web/springerbook.htm>`_
"""
import os.path as _osp
import importlib as _imp
import functools as _functools
import warnings as _warnings
pkg_dir = _osp.abspath(_osp.dirname(__file__))
data_dir = _osp.join(pkg_dir, 'data')
try:
from .version import version as __version__
except ImportError:
__version__ = "unbuilt-dev"
else:
del version
try:
_imp.import_module('nose')
except ImportError:
def _test(verbose=False):
"""This would run all unit tests, but nose couldn't be
imported so the test suite can not run.
"""
raise ImportError("Could not load nose. Unit tests not available.")
def _doctest(verbose=False):
"""This would run all doc tests, but nose couldn't be
imported so the test suite can not run.
"""
raise ImportError("Could not load nose. Doctests not available.")
else:
def _test(doctest=False, verbose=False, dry_run=False, run_all=True):
"""Run all unit tests."""
import nose
args = ['', pkg_dir, '--exe', '--ignore-files=^_test']
if verbose:
args.extend(['-v', '-s'])
if dry_run:
args.extend(['--collect-only'])
if not run_all:
args.extend(['-A', 'not slow'])
if doctest:
args.extend(['--with-doctest', '--ignore-files=^\.',
'--ignore-files=^setup\.py$$', '--ignore-files=test'])
# Make sure warnings do not break the doc tests
with _warnings.catch_warnings():
_warnings.simplefilter("ignore")
success = nose.run('permute', argv=args)
else:
success = nose.run('permute', argv=args)
# Return sys.exit code
if success:
return 0
else:
return 1
# do not use `test` as function name as this leads to a recursion problem with
# the nose test suite
test = _test
test_verbose = _functools.partial(test, verbose=True)
test_verbose.__doc__ = test.__doc__
doctest = _functools.partial(test, doctest=True)
doctest.__doc__ = doctest.__doc__
doctest_verbose = _functools.partial(test, doctest=True, verbose=True)
doctest_verbose.__doc__ = doctest.__doc__
| StarcoderdataPython |
44029 | import ipaddress
from collections import defaultdict
from autonetkit.design.utils import filters
from autonetkit.design.utils.general import group_by
from autonetkit.network_model.types import LAYER3_DEVICES
def assign_loopbacks(topology):
"""
@param topology:
"""
layer3_nodes = [n for n in topology.nodes()
if n.type in LAYER3_DEVICES]
loopback_block = ipaddress.IPv4Network("172.16.0.0/16")
loopback_subnets = loopback_block.subnets(new_prefix=24)
grouped_l3 = group_by(layer3_nodes, "asn")
allocated_loopbacks = defaultdict(list)
for asn, nodes in grouped_l3.items():
# can repeat the loopbacks in each asn
subnet = next(loopback_subnets)
allocated_loopbacks[asn].append(subnet)
host_ips = subnet.hosts()
for node in nodes:
host_ip = next(host_ips)
lo0 = node.loopback_zero()
lo0.set("ip", host_ip)
# also map onto node for debugging/vis
topology.set("loopbacks_by_asn", allocated_loopbacks)
def assign_bc_subnets(topology):
"""
@param topology:
"""
# the network to use to address end hosts and for inter-domain connections
allocated_blocks = defaultdict(list)
global_advertise_network = ipaddress.IPv4Network("10.0.0.0/8")
global_subnets = global_advertise_network.subnets(new_prefix=16)
bc_nodes = filters.broadcast_domains(topology)
grouped_bc = group_by(bc_nodes, "asn")
for asn, nodes in grouped_bc.items():
asn_block = next(global_subnets)
allocated_blocks[asn].append(asn_block)
# quick method: allocate a /24 to each broadcast domain
# Note: this could be significantly optimised in the future
# Note: could allocate different block to internal infrastructure too
external_blocks = asn_block.subnets(new_prefix=24)
for bc in nodes:
bc_block = next(external_blocks)
bc.set("network", bc_block)
topology.set("infrastructure_by_asn", allocated_blocks)
| StarcoderdataPython |
1603702 | import os
from flask import redirect, url_for, render_template, request, jsonify, abort
from .Room import Room
from .Permissions import Permissions
from .Task import Task
from .Token import Token
from .Layout import Layout
from . import main
from .forms import LoginForm, TokenGenerationForm
from .User import User
from flask_login import login_required, current_user, logout_user
from .. import config
@main.route('/token', methods=['GET', 'POST'])
def token():
source = request.args.get("source", None) if request.method == 'GET' else None
room = request.args.get("room", None) if request.method == 'GET' else None
if room:
room = int(room)
task = request.args.get("task", None) if request.method == 'GET' else None
if task:
task = int(task)
count = request.args.get("count", None) if request.method == 'GET' else None
if count:
count = int(count)
key = request.args.get("key", None) if request.method == 'GET' else None
form = TokenGenerationForm(task=task or 0, count=count or 1, source=source or "", key=key or "", room=room or 1)
if form.is_submitted():
source = form.source.data or ""
room = form.room.data
task = form.task.data
count = form.count.data
key = form.key.data
elif not (room and task and key):
form.room.choices = [(room.id(), room.name()) for room in Room.list()]
form.task.choices = [(task.id(), task.name()) for task in Task.list()]
return render_template('token.html',
form=form,
title=config['templates']['token-title'],
)
if key != config["server"]["secret-key"]:
return "Invalid password"
output = ""
for i in range(0, count or 1):
output += Token.create(source or "", Room.from_id(room), Task.from_id(task)).uuid()
output += "<br />"
return output
@main.route('/', methods=['GET', 'POST'])
def index():
login_token = request.args.get(
"token", None) if request.method == 'GET' else None
name = request.args.get("name", None) if request.method == 'GET' else None
token_invalid = False
if name and login_token:
login = User.login(name, Token.from_uuid(login_token))
if login:
return redirect(url_for('.chat'))
token_invalid = True
form = LoginForm()
if form.validate_on_submit():
name = form.name.data
login_token = form.token.data
user = User.login(form.name.data, Token.from_uuid(form.token.data))
if user:
return redirect(url_for('.chat'))
else:
form.token.errors.append(
"The token is either expired, was already used or isn't correct at all.")
if name:
form.name.data = name
return render_template('index.html', form=form, token=login_token, token_invalid=token_invalid,
title=config['templates']['login-title'])
@main.route('/chat')
@login_required
def chat():
room = current_user.latest_room()
token = current_user.token()
if not room:
room = token.room()
name = current_user.name()
permissions = Permissions(token, room)
print(permissions)
if name == '' or room == '':
return redirect(url_for('.index'))
return render_template('chat.html',
name=name,
room=room.label(),
title=config['templates']['chat-title'],
heading=config['templates']['chat-header'],
refresh_threshold=config['client']['refresh-threshold'],
refresh_start=config['client']['refresh-start'],
refresh_max=config['client']['refresh-max'],
ping_pong_latency_checks=config['client']['ping-pong-latency-checks'],
)
@main.route('/test', methods=['GET', 'POST'])
def test():
name = request.args.get(
"layout", None) if request.method == 'GET' else None
layout = Layout.from_json_file(name)
if not name:
return ""
return render_template('layout.html',
title=name,
html=layout.html(indent=8),
css=layout.css(indent=12),
script=layout.script(),
)
# @login_required
@main.route('/logout', methods=['GET', 'POST'])
def logout():
logout_user()
message = request.form.get('message', None)
if message:
return message
else:
return "Logout successful"
| StarcoderdataPython |
152214 | <filename>core/iam_utils.py
import boto3
import json
import random
def generate_policy_prefix(user_group_name):
'''policy prefix for user group'''
# add rangom tag to avoid attempting to overwrite a previously created and deleted policy and silently failing.
random_tag = str(int(random.random() * 10000))
tibanna_policy_prefix = 'tibanna_' + user_group_name + '_' + random_tag
return tibanna_policy_prefix
def generate_policy_list_instanceprofiles():
policy_list_instanceprofiles = {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Stmt1478801433000",
"Effect": "Allow",
"Action": [
"iam:ListInstanceProfiles"
],
"Resource": [
"*"
]
}
]
}
return policy_list_instanceprofiles
def generate_policy_cloudwatchlogs():
policy_cloudwatchlogs = {
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Resource": "arn:aws:logs:*:*:*",
"Effect": "Allow"
}
]
}
return policy_cloudwatchlogs
def generate_policy_bucket_access(bucket_names):
resource_list_buckets = ["arn:aws:s3:::" + bn for bn in bucket_names]
resource_list_objects = ["arn:aws:s3:::" + bn + "/*" for bn in bucket_names]
policy_bucket_access = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:ListBucket"
],
"Resource": resource_list_buckets
},
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:DeleteObject"
],
"Resource": resource_list_objects
}
]
}
return policy_bucket_access
def generate_policy_iam_passrole_s3(account_id, tibanna_policy_prefix):
role_resource = ['arn:aws:iam::' + account_id + ':role/' + tibanna_policy_prefix + '_s3']
policy_iam_passrole_s3 = {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Stmt1478801396000",
"Effect": "Allow",
"Action": [
"iam:PassRole"
],
"Resource": role_resource
}
]
}
return policy_iam_passrole_s3
def generate_lambdainvoke_policy(account_id, region, tibanna_policy_prefix):
function_arn_prefix = 'arn:aws:lambda:' + region + ':' + account_id + ':function/'
resource = [function_arn_prefix + 'run_task_awsem' + '_' + tibanna_policy_prefix,
function_arn_prefix + 'check_task_awsem' + '_' + tibanna_policy_prefix]
policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"lambda:InvokeFunction"
],
"Resource": resource
}
]
}
return policy
def generate_desc_stepfunction_policy(account_id, region, tibanna_policy_prefix):
execution_arn_prefix = 'arn:aws:states:' + region + ':' + account_id + ':execution:'
usergroup = tibanna_policy_prefix.replace('tibanna_', '')
resource = execution_arn_prefix + '*' + usergroup + ':*'
policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"states:DescribeExecution"
],
"Resource": resource
}
]
}
return policy
def generate_assume_role_policy_document(service):
'''service: 'ec2', 'lambda' or 'states' '''
AssumeRolePolicyDocument = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": service + ".amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
return AssumeRolePolicyDocument
def get_bucket_role_name(tibanna_policy_prefix):
return tibanna_policy_prefix + '_s3'
def get_lambda_role_name(tibanna_policy_prefix, lambda_name):
return tibanna_policy_prefix + '_' + lambda_name
def get_stepfunction_role_name(tibanna_policy_prefix):
return tibanna_policy_prefix + '_states'
def create_empty_role_for_lambda(iam, verbose=False):
client = iam.meta.client
role_policy_doc_lambda = generate_assume_role_policy_document('lambda')
empty_role_name = 'tibanna_lambda_init_role'
try:
client.get_role(RoleName=empty_role_name)
except Exception:
print("creating %s", empty_role_name)
response = client.create_role(
RoleName=empty_role_name,
AssumeRolePolicyDocument=json.dumps(role_policy_doc_lambda)
)
if verbose:
print(response)
def create_role_for_bucket(iam, tibanna_policy_prefix, account_id,
bucket_policy_name, verbose=False):
client = iam.meta.client
bucket_role_name = get_bucket_role_name(tibanna_policy_prefix)
role_policy_doc_ec2 = generate_assume_role_policy_document('ec2')
response = client.create_role(
RoleName=bucket_role_name,
AssumeRolePolicyDocument=json.dumps(role_policy_doc_ec2)
)
if verbose:
print(response)
role_bucket = iam.Role(bucket_role_name)
response = role_bucket.attach_policy(
PolicyArn='arn:aws:iam::' + account_id + ':policy/' + bucket_policy_name
)
if verbose:
print(response)
def create_role_for_run_task_awsem(iam, tibanna_policy_prefix, account_id,
cloudwatch_policy_name, bucket_policy_name,
list_policy_name, passrole_policy_name,
desc_stepfunction_policy_name,
verbose=False):
client = iam.meta.client
lambda_run_role_name = get_lambda_role_name(tibanna_policy_prefix, 'run_task_awsem')
role_policy_doc_lambda = generate_assume_role_policy_document('lambda')
response = client.create_role(
RoleName=lambda_run_role_name,
AssumeRolePolicyDocument=json.dumps(role_policy_doc_lambda)
)
role_lambda_run = iam.Role(lambda_run_role_name)
response = role_lambda_run.attach_policy(
PolicyArn='arn:aws:iam::' + account_id + ':policy/' + list_policy_name
)
if verbose:
print(response)
response = role_lambda_run.attach_policy(
PolicyArn='arn:aws:iam::' + account_id + ':policy/' + cloudwatch_policy_name
)
if verbose:
print(response)
response = role_lambda_run.attach_policy(
PolicyArn='arn:aws:iam::' + account_id + ':policy/' + passrole_policy_name
)
if verbose:
print(response)
response = role_lambda_run.attach_policy(
PolicyArn='arn:aws:iam::' + account_id + ':policy/' + bucket_policy_name
)
if verbose:
print(response)
response = role_lambda_run.attach_policy(
PolicyArn='arn:aws:iam::aws:policy/AmazonEC2FullAccess'
)
if verbose:
print(response)
response = role_lambda_run.attach_policy(
PolicyArn='arn:aws:iam::' + account_id + ':policy/' + desc_stepfunction_policy_name
)
if verbose:
print(response)
def create_role_for_check_task_awsem(iam, tibanna_policy_prefix, account_id,
cloudwatch_policy_name, bucket_policy_name,
verbose=False):
client = iam.meta.client
lambda_check_role_name = get_lambda_role_name(tibanna_policy_prefix, 'check_task_awsem')
role_policy_doc_lambda = generate_assume_role_policy_document('lambda')
response = client.create_role(
RoleName=lambda_check_role_name,
AssumeRolePolicyDocument=json.dumps(role_policy_doc_lambda)
)
if verbose:
print(response)
role_lambda_run = iam.Role(lambda_check_role_name)
response = role_lambda_run.attach_policy(
PolicyArn='arn:aws:iam::' + account_id + ':policy/' + cloudwatch_policy_name
)
if verbose:
print(response)
response = role_lambda_run.attach_policy(
PolicyArn='arn:aws:iam::' + account_id + ':policy/' + bucket_policy_name
)
if verbose:
print(response)
def create_role_for_stepfunction(iam, tibanna_policy_prefix, account_id,
lambdainvoke_policy_name, verbose=False):
client = iam.meta.client
stepfunction_role_name = get_stepfunction_role_name(tibanna_policy_prefix)
role_policy_doc = generate_assume_role_policy_document('states')
response = client.create_role(
RoleName=stepfunction_role_name,
AssumeRolePolicyDocument=json.dumps(role_policy_doc)
)
if verbose:
print(response)
role_stepfunction = iam.Role(stepfunction_role_name)
response = role_stepfunction.attach_policy(
PolicyArn='arn:aws:iam::aws:policy/service-role/AWSLambdaRole'
# PolicyArn='arn:aws:iam::' + account_id + ':policy/' + lambdainvoke_policy_name
)
if verbose:
print(response)
def create_user_group(iam, group_name, bucket_policy_name, account_id, verbose=False):
client = iam.meta.client
response = client.create_group(
GroupName=group_name
)
if verbose:
print(response)
group = iam.Group(group_name)
response = group.attach_policy(
PolicyArn='arn:aws:iam::' + account_id + ':policy/' + bucket_policy_name
)
if verbose:
print(response)
response = group.attach_policy(
PolicyArn='arn:aws:iam::aws:policy/AWSStepFunctionsFullAccess'
)
if verbose:
print(response)
response = group.attach_policy(
PolicyArn='arn:aws:iam::aws:policy/AWSStepFunctionsConsoleFullAccess'
)
if verbose:
print(response)
response = group.attach_policy(
PolicyArn='arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole'
)
if verbose:
print(response)
def create_tibanna_iam(account_id, bucket_names, user_group_name, region, verbose=False):
"""creates IAM policies and roles and a user group for tibanna
returns prefix of all IAM policies, roles and group.
Total 4 policies, 3 roles and 1 group is generated that is associated with a single user group
A user group shares permission for buckets, tibanna execution and logs
"""
# create prefix that represent a single user group
tibanna_policy_prefix = generate_policy_prefix(user_group_name)
iam = boto3.resource('iam')
client = iam.meta.client
# bucket policy
bucket_policy_name = tibanna_policy_prefix + '_bucket_access'
policy_ba = generate_policy_bucket_access(bucket_names)
response = client.create_policy(
PolicyName=bucket_policy_name,
PolicyDocument=json.dumps(policy_ba),
)
# lambda policies
# list_instanceprofiles : by default not user-dependent,
# but create per user group to allow future modification per user-group
list_policy_name = tibanna_policy_prefix + '_list_instanceprofiles'
response = client.create_policy(
PolicyName=list_policy_name,
PolicyDocument=json.dumps(generate_policy_list_instanceprofiles()),
)
if verbose:
print(response)
# cloudwatchlogs: by default not user-dependent,
# but create per user group to allow future modification per user-group
cloudwatch_policy_name = tibanna_policy_prefix + '_cloudwatchlogs'
response = client.create_policy(
PolicyName=cloudwatch_policy_name,
PolicyDocument=json.dumps(generate_policy_cloudwatchlogs()),
)
if verbose:
print(response)
# iam_passrole_s3: passrole policy per user group
passrole_policy_name = tibanna_policy_prefix + '_iam_passrole_s3'
policy_iam_ps3 = generate_policy_iam_passrole_s3(account_id, tibanna_policy_prefix)
response = client.create_policy(
PolicyName=passrole_policy_name,
PolicyDocument=json.dumps(policy_iam_ps3),
)
if verbose:
print(response)
# lambdainvoke policy for step function
lambdainvoke_policy_name = tibanna_policy_prefix + '_lambdainvoke'
policy_lambdainvoke = generate_lambdainvoke_policy(account_id, region, tibanna_policy_prefix)
response = client.create_policy(
PolicyName=lambdainvoke_policy_name,
PolicyDocument=json.dumps(policy_lambdainvoke),
)
if verbose:
print(response)
desc_stepfunction_policy_name = tibanna_policy_prefix + '_desc_sts'
policy_desc_stepfunction = generate_desc_stepfunction_policy(account_id, region, tibanna_policy_prefix)
response = client.create_policy(
PolicyName=desc_stepfunction_policy_name,
PolicyDocument=json.dumps(policy_desc_stepfunction),
)
if verbose:
print(response)
# roles
# role for bucket
create_role_for_bucket(iam, tibanna_policy_prefix, account_id, bucket_policy_name)
# role for lambda
create_role_for_run_task_awsem(iam, tibanna_policy_prefix, account_id,
cloudwatch_policy_name, bucket_policy_name,
list_policy_name, passrole_policy_name,
desc_stepfunction_policy_name)
create_role_for_check_task_awsem(iam, tibanna_policy_prefix, account_id,
cloudwatch_policy_name, bucket_policy_name)
create_empty_role_for_lambda(iam)
# role for step function
create_role_for_stepfunction(iam, tibanna_policy_prefix, account_id, lambdainvoke_policy_name)
# instance profile
instance_profile_name = get_bucket_role_name(tibanna_policy_prefix)
client.create_instance_profile(
InstanceProfileName=instance_profile_name
)
ip = iam.InstanceProfile(instance_profile_name)
ip.add_role(
RoleName=instance_profile_name
)
# create IAM group for users who share permission
create_user_group(iam, tibanna_policy_prefix, bucket_policy_name, account_id)
return tibanna_policy_prefix
| StarcoderdataPython |
1771750 | <reponame>javierlorenzod/HigherHRNet-Human-Pose-Estimation
from torch.utils.data import Dataset, DataLoader
import torch
from PIL import Image
import glob
import os.path as osp
import os
import random
"""
Posibles mejoras:
- Comprobar image format es válido --> buscar en internet
- Comprobar parámetros de entrada
- get_image_name_wo_ext podría convertirse en una lambda
- Comprobar que hay imágenes en la carpeta xD
"""
class BaseDataset(Dataset):
def __init__(self,
input_img_dir: str,
output_json_dir: str,
img_format: str = "png",
create_output_dir: bool = True,
transform = None) -> None:
if not osp.isdir(input_img_dir):
raise NotADirectoryError(f"{input_img_dir} is not a valid input image directory")
self.input_img_dir = input_img_dir
if not osp.isdir(output_json_dir) and not create_output_dir:
raise NotADirectoryError(f"{output_json_dir} directory does not exist")
elif not osp.isdir(output_json_dir) and create_output_dir:
os.makedirs(output_json_dir)
self.output_json_dir = output_json_dir
self.img_format = img_format
self.input_img_list = []
self.output_json_files_list = []
self.transform = transform
@staticmethod
def get_image_name_without_extension(img_filename):
"""
Extracts image name without extension from file name
:param img_filename:
:return:
"""
return osp.basename(img_filename).split('.')[0]
@staticmethod
def get_json_filename_for_image(image_name):
return image_name + '.json'
def generate_io_samples_pairs(self):
self.input_img_list = glob.glob(osp.join(self.input_img_dir, "*." + self.img_format))
for image_file in self.input_img_list:
image_name = self.get_image_name_without_extension(image_file)
self.output_json_files_list.append(osp.join(self.output_json_dir,
self.get_json_filename_for_image(image_name)))
def __len__(self):
return len(self.input_img_list)
def show_image_and_corresponding_json(self, idx):
print(f"Image file: {self.input_img_list[idx]}")
print(f"JSON file: {self.output_json_files_list[idx]}")
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_filename = self.input_img_list[idx]
img = Image.open(img_filename)
if self.transform:
img = self.transform(img)
return img, idx
if __name__ == "__main__":
dataset = BaseDataset("/media/jld/DATOS_JLD/datasets/cityscapes/train/", "/media/jld/DATOS_JLD/gitrepos/paper-keypoints/train/")
dataset.generate_io_samples_pairs()
for _ in range(100):
idx = random.randint(0, len(dataset))
print(f"Showing id {idx}")
dataset.show_image_and_corresponding_json(idx)
| StarcoderdataPython |
3242123 | <gh_stars>0
import sys
import pandas as pd
if __name__ == "__main__":
path = sys.argv[1]
ser = pd.read_csv(path, header=None).squeeze()
ser = ser.rolling(3).sum()
print((ser > ser.shift(1)).sum())
| StarcoderdataPython |
1644909 | """Internal representation of network name"""
from typing import List, Union
import binascii
import json
import os
from typing import List, Union
class Name(object):
"""
Internal representation of network name
"""
def __init__(self, name: Union[str, List[bytes]] = None, suite='ndn2013'):
self.suite = suite
self.digest = None
if name:
if isinstance(name, str):
self.from_string(name)
else:
self._components = name
else:
self._components = []
def from_string(self, name: str):
"""Set the name from a string, components separated by /"""
# FIXME: handle '/' as part of a component, UTF etc
comps = name.split("/")[1:]
self._components = [c.encode('ascii') for c in comps]
def components_to_string(self) -> str:
# FIXME: handle '/' as part of a component, and binary components
if len(self._components) == 0:
return '/'
if type(self._components[0]) is str:
s = '/' + '/'.join([c for c in self._components])
return s
s = '/' + '/'.join([c.decode('ascii', 'replace') for c in self._components])
return s
def to_string(self) -> str:
"""Transform name to string, components separated by /"""
s = self.components_to_string()
if self.digest:
s += "[hashId=%s]" % binascii.hexlify(self.digest).decode('ascii', 'replace')
return s
def to_json(self) -> str:
"""encoded name as JSON"""
n = {}
n['suite'] = self.suite
n['comps'] = [binascii.hexlify(c).decode('ascii', 'replace') for c in self._components]
if self.digest:
n['dgest'] = binascii.hexlify(self.digest).decode('ascii', 'replace')
return json.dumps(n)
def from_json(self, s: str) -> str:
n = json.loads(s)
self.suite = n['suite']
self._components = [binascii.dehexlify(c) for c in n['comps']]
self.digest = binascii.dehexlify(n['dgest']) if 'dgest' in n else None
return self
def setDigest(self, digest: str = None):
self.digest = digest
return self
def __str__(self) -> str:
return self.to_string()
def __repr__(self) -> str:
return f'<PiCN.Packets.Name.Name {str(self)} at {hex(id(self))}>'
def __eq__(self, other) -> bool:
if type(other) is not Name:
return False
if self.suite != other.suite:
return False
return self.to_string() == other.to_string()
def __add__(self, other) -> 'Name':
components: List[bytes] = []
for c in self._components:
components.append(c)
if type(other) is list:
for comp in other:
if type(comp) is str:
components.append(comp.encode('ascii'))
elif type(comp) is bytes:
components.append(comp)
else:
raise TypeError('Not a Name, str, List[str] or List[bytes]')
elif type(other) is str:
components.append(other.encode('ascii'))
elif isinstance(other, Name):
for comp in other._components:
components.append(comp)
else:
raise TypeError('Not a Name, str, List[str] or List[bytes]')
return Name(components)
def __hash__(self) -> int:
return self._components.__str__().__hash__()
def __len__(self):
return len(self._components)
def is_prefix_of(self, name):
"""
Checks if self is prefix of a given name
:param name: name
:return: true if self is prefix of given name, false otherwise
"""
pfx = os.path.commonprefix([self._components, name._components])
return len(pfx) == len(self._components)
def has_prefix(self, name):
"""
Checks if self has a certain prefix
:param name: prefix
:return: true if self has given prefix, false otherwise
"""
return name.is_prefix_of(self)
@property
def components(self):
"""Name components"""
return self._components
@components.setter
def components(self, components):
self._components = components
@property
def string_components(self):
"""Name components"""
return [c.decode('ascii', 'replace') for c in self._components]
@string_components.setter
def string_components(self, string_components):
self._components = [c.encode('ascii') for c in string_components]
| StarcoderdataPython |
1762325 | <reponame>hikaru-sysu/taichi<gh_stars>0
from taichi.lang.simt import warp
__all__ = ['warp']
| StarcoderdataPython |
3261587 | <gh_stars>0
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.image_classification.abstract import ImageClassifier
from models.nn_utils import conv_layer, fc_layer
class ResBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1):
super(ResBlock, self).__init__()
self.layer1 = conv_layer(
in_channels, out_channels, kernel_size, stride, padding, use_batchnorm=True
)
self.layer2 = conv_layer(
out_channels,
out_channels,
kernel_size,
1,
padding,
use_batchnorm=True,
activation="none",
)
self.downsample = conv_layer(
in_channels, out_channels, kernel_size, 2, use_batchnorm=True, auto_pad=True
)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x):
residual = self.downsample(x) if self.stride != 1 else x
out = self.layer1(x)
out = self.layer2(out)
out += residual
return self.relu(out)
class ResNet(ImageClassifier):
def __init__(self, data_handler):
self.loss_function = F.cross_entropy
self.optimizer = torch.optim.Adam
ImageClassifier.__init__(self, data_handler, self.loss_function, self.optimizer)
self.create_conv_layers()
self.model = nn.Sequential(
self.conv1,
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
self.conv2,
self.conv3,
self.conv4,
self.conv5,
)
self.head = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(),
fc_layer(512, self.num_classes),
nn.Softmax(),
)
def forward(self, x):
out = self.model(x)
return self.head(out)
def create_conv_layers(self):
self.conv1 = conv_layer(3, 64, 7, 2, use_batchnorm=True)
self.conv2 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=2),
ResBlock(64, 64, 3),
ResBlock(64, 64, 3),
)
self.conv3 = nn.Sequential(ResBlock(64, 128, 3, 2), ResBlock(128, 128, 3))
self.conv4 = nn.Sequential(ResBlock(128, 256, 3, 2), ResBlock(256, 256, 3))
self.conv5 = nn.Sequential(ResBlock(256, 512, 3, 2), ResBlock(512, 512, 3))
| StarcoderdataPython |
3351975 | <reponame>uri-yanover/sharik
from setuptools import setup, find_packages
setup(
name="sharik",
version="0.4.1",
packages=find_packages(),
description='A shar(1)-like utility with a programmatic fine-tuned API',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/uri-yanover/sharik',
entry_points={
'console_scripts': [
'sharik = sharik.cli:cli_main'
]
},
install_requires=['click', "dataclasses ; python_version<='3.6'", 'pydantic']
) | StarcoderdataPython |
73081 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2020-11-10 08:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
import django.db.models.deletion
import sentry.db.models.fields.foreignkey
class Migration(migrations.Migration):
dependencies = [
('clims', '0020_transition'),
]
operations = [
migrations.AlterField(
model_name='transition',
name='user',
field=sentry.db.models.fields.foreignkey.FlexibleForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='transitions', to=settings.AUTH_USER_MODEL),
),
]
| StarcoderdataPython |
96999 | <reponame>cclauss/CommunityCellularManager<filename>smspdu/smspdu/__main__.py
#!/usr/bin/env python3
"""
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
from .pdu import dump
if __name__ == '__main__':
import sys
dump(sys.argv[1])
# Copyright (c) 2011 eKit.com Inc (http://www.ekit.com/)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
| StarcoderdataPython |
1782244 | """Test that various serving options work"""
import json
import subprocess
import time
import pytest
from tornado import httpclient
from .conftest import CI, DARWIN, LINUX, PYPY
if CI and DARWIN: # pragma: no cover
pytest.skip("skipping flaky MacOS tests", allow_module_level=True)
if CI and LINUX and PYPY: # pragma: no cover
pytest.skip("skipping flaky Linux/PyPy tests", allow_module_level=True)
@pytest.mark.parametrize("base_url", [None, "/@foo/"])
def test_serve(
an_empty_lite_dir, script_runner, base_url, an_unused_port
): # pragma: no cover
"""verify that serving kinda works"""
args = ["jupyter", "lite", "serve", "--port", f"{an_unused_port}"]
http_headers = {"x-foo": "bar"}
extra_http_headers = {"x-baz": "boo"}
all_headers = {**http_headers, **extra_http_headers}
config = {
"LiteBuildConfig": {
"http_headers": http_headers,
"extra_http_headers": extra_http_headers,
}
}
config_path = an_empty_lite_dir / "jupyter_lite_config.json"
config_path.write_text(json.dumps(config), encoding="utf-8")
if base_url:
args += ["--base-url", base_url]
else:
base_url = "/"
url = f"http://127.0.0.1:{an_unused_port}{base_url}"
server = subprocess.Popen(args, cwd=str(an_empty_lite_dir))
time.sleep(2)
app_urls = [""]
for app in ["lab", "retro", "repl"]:
app_urls += [
f"{app}/",
f"{app}/index.html",
]
if app == "retro":
app_urls += [f"{app}/tree/", f"{app}/tree/index.html"]
maybe_errors = [
_fetch_without_errors(f"{url}{frag}", expect_headers=all_headers)
for frag in app_urls
]
errors = [e for e in maybe_errors if e]
try:
assert not errors
finally:
_fetch_without_errors(f"{url}shutdown")
server.wait(timeout=10)
def _fetch_without_errors(url, retries=10, expect_headers=None): # pragma: no cover
retries = 10
errors = []
while retries:
retries -= 1
response = None
try:
client = httpclient.HTTPClient()
response = client.fetch(url)
assert b"jupyter-config-data" in response.body
# it worked, eventually: clear errors
errors = []
break
except Exception as err: # pragma: no cover
print(f"{err}: {retries} retries left...")
time.sleep(0.5)
errors = [err]
if response and expect_headers:
errors = []
for header, value in expect_headers.items():
try:
assert response.headers[header] == value
except Exception as err: # pragma: no cover
errors += [err]
return errors
| StarcoderdataPython |
3348287 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import enum
from pyuvm import *
import random
@enum.unique
class Ops(enum.IntEnum):
"""Legal ops for the TinyALU"""
ADD = 1
AND = 2
XOR = 3
MUL = 4
class PythonProxy(uvm_component):
@staticmethod
def alu_op(A, B, op):
result = None # Make the linter happy
assert op in list(Ops), "The tinyalu op must be of type ops"
if op == Ops.ADD:
result = A + B
elif op == Ops.AND:
result = A & B
elif op == Ops.XOR:
result = A ^ B
elif op == Ops.MUL:
result = A * B
time.sleep(0.1) # Takes time as a simulation would.
return result
def build_phase(self):
# The FIFOs
self.stim_f = uvm_tlm_fifo("stim_f", self)
self.cmd_f = uvm_tlm_analysis_fifo("cmd_f", self)
self.result_f = uvm_tlm_analysis_fifo("result_f", self)
ConfigDB().set(None, "*", "PROXY", self)
# The Stimulus Ports (for send_op())
self.stim_put = uvm_put_port("stim_put", self)
self.stim_get = uvm_get_port("stim_get", self)
# The Command Ports (for get_cmd())
self.cmd_put = uvm_put_port("cmd_put", self)
self.cmd_get = uvm_get_port("cmd_get", self)
# The Result Ports (for get_result())
self.result_put = uvm_put_port("result_put",self)
self.result_get = uvm_get_port("result_get", self)
def connect_phase(self):
self.stim_put.connect(self.stim_f.put_export)
self.stim_get.connect(self.stim_f.get_export)
self.cmd_put.connect(self.cmd_f.put_export)
self.cmd_get.connect(self.cmd_f.get_export)
self.result_put.connect(self.result_f.put_export)
self.result_get.connect(self.result_f.get_export)
def send_op(self, A, B, op):
self.stim_put.put((A, B, op))
def get_cmd(self):
cmd = self.cmd_get.get()
return cmd
def get_result(self):
result = self.result_get.get()
return result
def run_phase(self):
while not ObjectionHandler().run_phase_complete():
(A, B, op) = self.stim_get.get()
result = self.alu_op(A, B, op)
self.cmd_put.put((A, B, op))
self.result_put.put(result)
| StarcoderdataPython |
3358035 | import logging
import os
import time
from flask import Flask, request, jsonify
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
from deeppavlov import build_model
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO)
logger = logging.getLogger(__name__)
sentry_sdk.init(dsn=os.getenv("SENTRY_DSN"), integrations=[FlaskIntegration()])
try:
kgdg = build_model("kg_dial_generator.json", download=True)
test_res = kgdg(["What is the capital of Russia?"], [["Q159"]])
logger.info("model loaded, test query processed")
except Exception as e:
sentry_sdk.capture_exception(e)
logger.exception(e)
raise e
app = Flask(__name__)
@app.route("/model", methods=["POST"])
def respond():
tm_st = time.time()
sentences = request.json["sentences"]
entities = request.json["entities"]
if sentences:
out_uttr = ["" for _ in sentences]
out_conf = [0.0 for _ in sentences]
else:
out_uttr = [""]
out_conf = [0.0]
f_sentences = []
f_entities = []
nf_numbers = []
for n, (sentence, entities_list) in enumerate(zip(sentences, entities)):
if len(sentence.split()) == 1 and not entities_list:
nf_numbers.append(n)
else:
f_sentences.append(sentence)
f_entities.append(entities_list)
try:
generated_utterances, confidences = kgdg(f_sentences, f_entities)
out_uttr = []
out_conf = []
cnt_fnd = 0
for i in range(len(sentences)):
if i in nf_numbers:
out_uttr.append("")
out_conf.append(0.0)
else:
out_uttr.append(generated_utterances[cnt_fnd])
out_conf.append(confidences[cnt_fnd])
cnt_fnd += 1
except Exception as e:
sentry_sdk.capture_exception(e)
logger.exception(e)
logger.info(f"wikidata_dial_service exec time: {time.time() - tm_st}")
return jsonify([out_uttr, out_conf])
if __name__ == "__main__":
app.run(debug=False, host="0.0.0.0", port=3000)
| StarcoderdataPython |
3366507 | <reponame>jwestraadt/GrainSizeTools
# ============================================================================ #
# #
# This is part of the "GrainSizeTools Script" #
# A Python script for characterizing grain size from thin sections #
# #
# Copyright (c) 2014-present <NAME> #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
# Version 3.0.2 #
# For details see: http://marcoalopez.github.io/GrainSizeTools/ #
# download at https://github.com/marcoalopez/GrainSizeTools/releases #
# #
# ============================================================================ #
# ============================================================================ #
# Functions to generate the plots using the Python matplotlib library. #
# It uses hex color codes to set colors. #
# Save this file in the same directory as GrainSizeTools #
# ============================================================================ #
# import Python scientific modules
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import norm, gaussian_kde, shapiro, iqr
# plotting funtions
def distribution(data,
plot=('hist', 'kde'),
avg=('amean', 'gmean', 'median', 'mode'),
binsize='auto',
bandwidth='silverman',
**fig_kw):
""" Return a plot with the ditribution of (apparent or actual) grain sizes
in a dataset.
Parameters
----------
data : array_like
the size of the grains
plot : string, tuple or list; optional
the type of plot, either histogram ('hist'), kernel density estimate
('kde') or both ('hist', 'kde'). Default is both.
avg : string, tuple or list; optional
the central tendency measures o show, either the arithmetic ('amean')
or geometric ('gmean') means, the median ('median'), and/or the
KDE-based mode ('mode'). Default all averages.
binsize : string or positive scalar; optional
If 'auto', it defines the plug-in method to calculate the bin size.
When integer or float, it directly specifies the bin size.
Default: the 'auto' method.
| Available plug-in methods:
| 'auto' (fd if sample_size > 1000 or Sturges otherwise)
| 'doane' (Doane's rule)
| 'fd' (Freedman-Diaconis rule)
| 'rice' (Rice's rule)
| 'scott' (Scott rule)
| 'sqrt' (square-root rule)
| 'sturges' (Sturge's rule)
bandwidth : string {'silverman' or 'scott'} or positive scalar; optional
the method to estimate the bandwidth or a scalar directly defining the
bandwidth. It uses the Silverman plug-in method by default.
**fig_kw :
additional keyword arguments to control the size (figsize) and
resolution (dpi) of the plot. Default figsize is (6.4, 4.8).
Default resolution is 100 dpi.
Call functions
--------------
- gaussian_kde (from Scipy stats)
Examples
--------
>>> distribution(data['diameters'])
>>> distribution(data['diameters'], figsize=(6.4, 4.8))
Returns
-------
A plot showing the distribution of (apparent) grain sizes and
the location of the averages defined.
"""
fig, ax = plt.subplots(**fig_kw)
if 'hist' in plot:
if isinstance(binsize, (int, float)):
binsize = int(np.ceil((data.max() - data.min()) / binsize))
y_values, bins, __ = ax.hist(data,
bins=binsize,
range=(data.min(), data.max()),
density=True,
color='#80419d',
edgecolor='#C59fd7',
alpha=0.7)
print('=======================================')
print('Number of classes = ', len(bins) - 1)
print('binsize = ', round(bins[1] - bins[0], 2))
print('=======================================')
if 'kde' in plot:
# estimate kde first
if isinstance(bandwidth, (int, float)):
fixed_bw = bandwidth / np.std(data, ddof=1)
kde = gaussian_kde(data, bw_method=fixed_bw)
elif isinstance(bandwidth, str):
kde = gaussian_kde(data, bw_method=bandwidth)
bandwidth = round(kde.covariance_factor() * data.std(ddof=1), 2)
else:
raise ValueError("bandwidth must be integer, float, or plug-in methods 'silverman' or 'scott'")
x_values = np.linspace(data.min(), data.max(), num=1000)
y_values = kde(x_values)
print('=======================================')
print('KDE bandwidth = ', round(bandwidth, 2))
print('=======================================')
if 'hist' in plot:
ax.plot(x_values, y_values,
color='#2F4858')
else:
ax.plot(x_values, y_values,
color='#2F4858')
ax.fill_between(x_values, y_values,
color='#80419d',
alpha=0.65)
# plot the location of the averages
if 'amean' in avg:
amean = np.mean(data)
ax.vlines(amean, 0, np.max(y_values),
linestyle='solid',
color='#2F4858',
label='arith. mean',
linewidth=2.5)
if 'gmean' in avg:
gmean = np.exp(np.mean(np.log(data)))
ax.vlines(gmean, 0, np.max(y_values),
linestyle='solid',
color='#fec44f',
label='geo. mean')
if 'median' in avg:
median = np.median(data)
ax.vlines(median, 0, np.max(y_values),
linestyle='dashed',
color='#2F4858',
label='median',
linewidth=2.5)
if 'mode' in avg and 'kde' in plot:
mode = x_values[np.argmax(y_values)]
ax.vlines(mode, 0, np.max(y_values),
linestyle='dotted',
color='#2F4858',
label='mode',
linewidth=2.5)
ax.set_ylabel('density', color='#252525')
ax.set_xlabel(r'apparent diameter ($\mu m$)', color='#252525')
ax.legend(loc='best', fontsize=16)
# ax.set_ylim(bottom=-0.001)
fig.tight_layout()
return fig, ax
def area_weighted(diameters, areas, binsize='auto', **fig_kw):
""" Generate an area-weighted histogram and returns different
area-weighted statistics.
Parameters
----------
diameters : array_like
the size of the grains
areas : array_like
the sectional areas of the grains
binsize : string or positive scalar, optional
If 'auto', it defines the plug-in method to calculate the bin size.
When integer or float, it directly specifies the bin size.
Default: the 'auto' method.
| Available plug-in methods:
| 'auto' (fd if sample_size > 1000 or Sturges otherwise)
| 'doane' (Doane's rule)
| 'fd' (Freedman-Diaconis rule)
| 'rice' (Rice's rule)
| 'scott' (Scott rule)
| 'sqrt' (square-root rule)
| 'sturges' (Sturge's rule)
**fig_kw :
additional keyword arguments to control the size (figsize) and
resolution (dpi) of the plot. Default figsize is (6.4, 4.8).
Default resolution is 100 dpi.
Examples
--------
>>> area_weighted(data['diameters'], data['Areas'])
>>> area_weighted(data['diameters'], data['Areas'], binsize='doane', dpi=300)
"""
# estimate weighted mean
area_total = np.sum(areas)
weighted_areas = areas / area_total
weighted_mean = np.sum(diameters * weighted_areas)
# estimate mode interval
if type(binsize) is str:
histogram, bin_edges = np.histogram(diameters, bins=binsize, range=(0.0, diameters.max()))
h = bin_edges[1]
else:
bin_edges = np.arange(0.0, diameters.max() + binsize, binsize)
h = binsize
# estimate the cumulative areas of each grain size interval
cumulativeAreas = np.zeros(len(bin_edges))
for index, values in enumerate(bin_edges):
mask = np.logical_and(diameters >= values, diameters < (values + h))
area_sum = np.sum(areas[mask])
cumulativeAreas[index] = round(area_sum, 1)
# get the index of the modal interval
getIndex = np.argmax(cumulativeAreas)
print('=======================================')
print('DESCRIPTIVE STATISTICS')
print(f'Area-weighted mean grain size = {weighted_mean:0.2f} microns')
print('=======================================')
print('HISTOGRAM FEATURES')
print(f'The modal interval is {bin_edges[getIndex]:0.2f} - {bin_edges[getIndex] + h:0.2f} microns')
if type(binsize) is str:
print(f'The number of classes are {len(histogram)}')
print(f'The bin size is {h:0.2f} according to the {binsize} rule')
print('=======================================')
# normalize the y-axis values to percentage of the total area
totalArea = sum(cumulativeAreas)
cumulativeAreasNorm = [(x / float(totalArea)) * 100 for x in cumulativeAreas]
maxValue = max(cumulativeAreasNorm)
#make plot
fig, ax = plt.subplots(**fig_kw)
# figure aesthetics
ax.bar(bin_edges, cumulativeAreasNorm, width=h,
color='#55A868',
edgecolor='#FEFFFF',
align='edge',
alpha=1)
ax.vlines(weighted_mean, ymin=0, ymax=maxValue,
linestyle='--',
color='#1F1F1F',
label='area weighted mean',
linewidth=2)
ax.set_ylabel('normalized area fraction (%)', color='#252525')
ax.set_xlabel(r'apparent diameter ($\mu m$)', color='#252525')
ax.legend(loc='best', fontsize=15)
fig.tight_layout()
return fig, ax
def normalized(data, avg='amean', bandwidth='silverman', **fig_kw):
"""Return a log-transformed normalized ditribution of the grain
population. This is useful to compare grain size distributions
beween samples with different average values.
Parameters
----------
data : array-like
the dataset
avg : str, optional
the normalization factor, either 'amean' or 'median'.
Default: 'amean'
bandwidth : str or scalar, optional
the bandwidth of the KDE, by default 'silverman'
**fig_kw :
additional keyword arguments to control the size (figsize) and
resolution (dpi) of the plot. Default figsize is (6.4, 4.8).
Default resolution is 100 dpi.
"""
data = np.log(data)
amean = np.mean(data)
median = np.median(data)
# normalize the data
if avg == 'amean':
norm_factor = amean
norm_data = data / norm_factor
elif avg == 'median':
norm_factor = median
norm_data = data / median
else:
raise ValueError("Normalization factor has to be defined as 'amean' or 'median'")
# estimate KDE
if isinstance(bandwidth, (int, float)):
fixed_bw = bandwidth / np.std(norm_data, ddof=1)
kde = gaussian_kde(norm_data, bw_method=fixed_bw)
elif isinstance(bandwidth, str):
kde = gaussian_kde(norm_data, bw_method=bandwidth)
bandwidth = round(kde.covariance_factor() * norm_data.std(ddof=1), 2)
else:
raise ValueError("bandwidth must be integer, float, or plug-in methods 'silverman' or 'scott'")
x_values = np.linspace(norm_data.min(), norm_data.max(), num=1000)
y_values = kde(x_values)
# Provide details
print('=======================================')
if avg == 'amean':
print(f'Normalized SD = {np.std(norm_data):0.3f}')
if avg == 'median':
print(f'Normalized IQR = {iqr(norm_data):0.3f}')
print('KDE bandwidth = ', round(bandwidth, 2))
print('=======================================')
#make plot
fig, ax = plt.subplots(**fig_kw)
ax.plot(x_values, y_values,
color='#2F4858')
ax.fill_between(x_values, y_values,
color='#d1346b',
alpha=0.5)
ax.vlines(amean / norm_factor, 0, np.max(y_values),
linestyle='solid',
color='#2F4858',
label='arith. mean',
linewidth=2.5)
ax.vlines(median / norm_factor, 0, np.max(y_values),
linestyle='dashed',
color='#2F4858',
label='median',
linewidth=2.5)
ax.set_ylabel('density', color='#252525')
if avg == 'amean':
ax.set_xlabel(r'normalized log grain size ($y / \bar{y}$)', color='#252525')
else:
ax.set_xlabel(r'normalized log grain size ($y / med_{y}$)', color='#252525')
ax.legend(loc='best', fontsize=15)
fig.tight_layout()
return fig, ax
def qq_plot(data, percent=2, **fig_kw):
""" Test whether the underlying distribution follows a lognormal
distribution using a quantile–quantile (q-q) plot and a Shapiro-
Wilk test.
Parameters
----------
data : array-like
the apparent diameters or any other type of data
percent : scalar between 0 and 100
the percentil interval to estimate, default is 2 %
Call functions
--------------
shapiro from scipy's stats
"""
data = np.sort(np.log(data))
# estimate percentiles in the actual data
percentil = np.arange(1, 100, percent)
actual_data = np.percentile(data, percentil)
# estimate percentiles for theoretical data
mean, std = np.mean(data), np.std(data)
theoretical_data = norm.ppf(percentil / 100, loc=mean, scale=std)
min_val, max_val = theoretical_data.min(), theoretical_data.max()
# make the plot
fig, ax = plt.subplots(**fig_kw)
ax.plot([min_val, max_val], [min_val, max_val],
'-',
color='#2F4858',
label='perfect lognormal')
ax.plot(theoretical_data, actual_data,
'o',
color='C0',
alpha=0.5)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xlabel('theoretical', color='#252525')
ax.set_ylabel('observed', color='#252525')
ax.legend(loc='best', fontsize=18)
# ax.set_aspect('equal')
fig.tight_layout()
# Shapiro-Wilk test
if len(data) > 250:
W, p_value = shapiro(np.random.choice(data, size=250))
else:
W, p_value = shapiro(data)
print('=======================================')
print('Shapiro-Wilk test (lognormal):')
print(f'{W:0.2f}, {p_value:0.2f} (test statistic, p-value)')
if p_value >= 0.05:
print('It looks like a lognormal distribution')
print('(⌐■_■)')
else:
print('It doesnt look like a lognormal distribution (p-value < 0.05)')
print('(╯°□°)╯︵ ┻━┻')
print('=======================================')
return fig, ax
if __name__ == '__main__':
pass
else:
print('module plot imported')
| StarcoderdataPython |
1785702 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-07-01 02:55
from __future__ import unicode_literals
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bruv', '0004_auto_20160518_1404'),
]
operations = [
migrations.AlterField(
model_name='set',
name='depth',
field=models.DecimalField(decimal_places=2, help_text='m', max_digits=12, null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.01'))]),
),
]
| StarcoderdataPython |
1670868 | <filename>src/githyperlink/cli.py
import sys
import subprocess
from click import command, argument
from .main import get_hyperlink
@command()
@argument("path", required=False)
def get_and_print_hyperlink(path=None):
if path is None:
path = "."
try:
url = get_hyperlink(path)
except subprocess.CalledProcessError:
# `git` error is already printed to stderr. (Most likely error: not a git repo).
sys.exit(1) # So, just quit.
else:
print(url)
| StarcoderdataPython |
67345 | <filename>test_crf_layer.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Use CRF as a neural network layer built by GLuon to conduct training and prediction tests.
# @author:kenjewu
# @date:2018/10/05
import time
import numpy as np
import mxnet as mx
from mxnet import nd, gluon, autograd
from crf import CRF
ctx = mx.gpu()
START_TAG = "<bos>"
STOP_TAG = "<eos>"
# generate pseudo data
tag2idx = {"B": 0, "I": 1, "O": 2, START_TAG: 3, STOP_TAG: 4}
x = nd.random.normal(shape=(10, 5), ctx=ctx)
y = nd.array([[0, 1, 0, 2, 1, 0, 1],
[0, 2, 0, 0, 2, 0, 1],
[1, 1, 1, 0, 1, 0, 2],
[0, 0, 2, 2, 0, 1, 0],
[1, 1, 1, 1, 2, 2, 1],
[0, 1, 2, 2, 0, 0, 1],
[2, 2, 0, 2, 0, 1, 1],
[1, 1, 2, 0, 1, 0, 0],
[0, 2, 1, 2, 1, 2, 0],
[0, 1, 2, 0, 1, 1, 2]], ctx=ctx)
dataset_train = gluon.data.ArrayDataset(x, y)
iter_train = gluon.data.DataLoader(dataset_train, batch_size=5, shuffle=True)
class CRF_MODEL(gluon.nn.Block):
'''Here we construct a neural network.
Although there is only one CRF layer in the model,
we can test whether CRF can be reused as a custom layer.
Args:
gluon ([type]): [description]
Returns:
[type]: [description]
'''
def __init__(self, tag2idx, ctx=mx.gpu(), ** kwargs):
super(CRF_MODEL, self).__init__(** kwargs)
with self.name_scope():
self.crf = CRF(tag2idx, ctx=ctx)
def forward(self, x):
return self.crf(x)
# build a model
model = CRF_MODEL(tag2idx, ctx=ctx)
model.initialize(ctx=ctx)
# print params of the model
print(model.collect_params())
print(model.collect_params()['crf_model0_crf0_transitions'].data())
optimizer = gluon.Trainer(model.collect_params(), 'sgd', {'learning_rate': 0.01, 'wd': 1e-4})
# train
start_time = time.time()
for epoch in range(100):
for batch_x, batch_y in iter_train:
batch_x = nd.broadcast_axis(nd.expand_dims(batch_x, axis=0), axis=0, size=7)
with autograd.record():
# loss
neg_log_likelihood = model.crf.neg_log_likelihood(batch_x, batch_y)
# backward and update params
neg_log_likelihood.backward()
optimizer.step(5)
print(model.collect_params()['crf_model0_crf0_transitions'].data())
# predict
print(model(nd.broadcast_axis(nd.expand_dims(x, axis=0), axis=0, size=7)))
print('use {0} secs!'.format(time.time()-start_time))
| StarcoderdataPython |
120642 | """Support for showing the time in a different time zone."""
from __future__ import annotations
from datetime import tzinfo
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import CONF_NAME, CONF_TIME_ZONE
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
import homeassistant.util.dt as dt_util
CONF_TIME_FORMAT = "time_format"
DEFAULT_NAME = "Worldclock Sensor"
DEFAULT_TIME_STR_FORMAT = "%H:%M"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_TIME_ZONE): cv.time_zone,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_TIME_FORMAT, default=DEFAULT_TIME_STR_FORMAT): cv.string,
}
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the World clock sensor."""
time_zone = dt_util.get_time_zone(config[CONF_TIME_ZONE])
async_add_entities(
[
WorldClockSensor(
time_zone,
config[CONF_NAME],
config[CONF_TIME_FORMAT],
)
],
True,
)
class WorldClockSensor(SensorEntity):
"""Representation of a World clock sensor."""
_attr_icon = "mdi:clock"
def __init__(self, time_zone: tzinfo | None, name: str, time_format: str) -> None:
"""Initialize the sensor."""
self._attr_name = name
self._time_zone = time_zone
self._time_format = time_format
async def async_update(self) -> None:
"""Get the time and updates the states."""
self._attr_native_value = dt_util.now(time_zone=self._time_zone).strftime(
self._time_format
)
| StarcoderdataPython |
71872 | # coding=utf-8
from .abstract import ContactActionAbstract
from .contants import *
from .create import ContactCreateAction
from .delete import ContactDeleteAction
from .read import ContactReadAction
from .update import ContactUpdateAction
from .write import ContactWriteAction
| StarcoderdataPython |
1630990 | <reponame>thejohnfreeman/picard
"""Picard combines the idea of Ansible with the execution of Make."""
# I'm only slightly worried about the rebindings of ``file`` and ``rule``...
from picard.api import make, sync
from picard.context import Context
from picard.file import (
FileRecipePostConditionError, FileTarget, file, file_target
)
from picard.pattern import pattern
from picard.rule import rule
from picard.shell import sh
from picard.typing import Target
| StarcoderdataPython |
3398110 | import argparse
import os
import os.path as osp
from shutil import copyfile
import mmcv
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--annotation_path", default="/data/coco_train.json")
parser.add_argument("--image_root", default="/data/train")
parser.add_argument("--output_root", default="/data/sources")
return parser.parse_args()
def main():
args = parse_args()
annotations = mmcv.load(args.annotation_path)
for sample in tqdm(annotations["images"]):
source_root = osp.join(args.output_root, sample["source"])
os.makedirs(source_root, exist_ok=True)
copyfile(osp.join(args.image_root, sample["file_name"]), osp.join(source_root, sample["file_name"]))
if __name__ == "__main__":
main()
| StarcoderdataPython |
173984 | from django.db import models
# Create your models here.
class Stocks(models.Model):
ticker = models.CharField(max_length=5, default='STOCK',)
date = models.DateField()
open = models.IntegerField()
high = models.IntegerField()
low = models.IntegerField()
close = models.IntegerField()
volume = models.IntegerField()
after1 = models.IntegerField()
after5 = models.IntegerField()
after20 = models.IntegerField()
plain1 = models.IntegerField()
plain5 = models.IntegerField()
plain20 = models.IntegerField()
grouped1 = models.IntegerField()
grouped5 = models.IntegerField()
grouped20 = models.IntegerField()
onehot1 = models.IntegerField()
onehot5 = models.IntegerField()
onehot20 = models.IntegerField()
class PlainStocks(models.Model):
ticker = models.CharField(max_length=5, default='STOCK',)
date = models.DateField()
open = models.IntegerField()
high = models.IntegerField()
low = models.IntegerField()
close = models.IntegerField()
volume = models.IntegerField()
obv = models.FloatField()
co = models.FloatField()
macd = models.FloatField()
signal = models.FloatField()
histogram = models.FloatField()
bollinger_high = models.FloatField()
bollinger_low = models.FloatField()
class GroupedStocks(models.Model):
ticker = models.CharField(max_length=5, default='STOCK',)
date = models.DateField()
obv_comparison = models.IntegerField()
obv_position = models.IntegerField()
co_comparison = models.IntegerField()
co_position = models.IntegerField()
macd_comparison = models.IntegerField()
macd_position = models.IntegerField()
signal_comparison = models.IntegerField()
signal_position = models.IntegerField()
histogram_comparison = models.IntegerField()
histogram_position = models.IntegerField()
bb_condition = models.IntegerField()
class OneHotStocks(models.Model):
ticker = models.CharField(max_length=5, default='STOCK',)
date = models.DateField()
obv_comparison_naik = models.IntegerField()
obv_comparison_tetap = models.IntegerField()
obv_comparison_turun = models.IntegerField()
obv_position_positif = models.IntegerField()
obv_position_negatif = models.IntegerField()
obv_position_nol = models.IntegerField()
co_position_positif = models.IntegerField()
co_position_negatif = models.IntegerField()
co_position_nol = models.IntegerField()
co_comparison_naik = models.IntegerField()
co_comparison_turun = models.IntegerField()
co_comparison_tetap = models.IntegerField()
macd_position_positif = models.IntegerField()
macd_position_negatif = models.IntegerField()
macd_position_nol = models.IntegerField()
macd_comparison_naik = models.IntegerField()
macd_comparison_turun = models.IntegerField()
macd_comparison_tetap = models.IntegerField()
signal_position_positif = models.IntegerField()
signal_position_negatif = models.IntegerField()
signal_position_nol = models.IntegerField()
signal_comparison_naik = models.IntegerField()
signal_comparison_turun = models.IntegerField()
signal_comparison_tetap = models.IntegerField()
histogram_position_positif = models.IntegerField()
histogram_position_negatif = models.IntegerField()
histogram_position_nol = models.IntegerField()
histogram_comparison_naik = models.IntegerField()
histogram_comparison_turun = models.IntegerField()
histogram_comparison_tetap = models.IntegerField()
bb_condition_overbought = models.IntegerField()
bb_condition_oversold = models.IntegerField()
bb_condition_normal = models.IntegerField() | StarcoderdataPython |
4814575 | <reponame>slp-ntua/slp-labs<filename>lab2/dnn/torch_dataset.py
import os
import kaldi_io
import numpy as np
from torch.utils.data import Dataset
class TorchSpeechDataset(Dataset):
def __init__(self, recipe_dir, ali_dir, dset, feature_context=2):
self.recipe_dir = recipe_dir
self.ali_dir = ali_dir
self.feature_context = feature_context
self.dset = dset
self.feats, self.labels = self.read_data()
self.feats, self.labels, self.uttids, self.end_indices = self.unify_data(self.feats, self.labels)
def read_data(self):
feat_path = os.path.join(self.recipe_dir, 'data', self.dset, 'feats.scp')
label_path = os.path.join(self.recipe_dir, self.ali_dir)
feat_opts = "apply-cmvn --utt2spk=ark:{0} ark:{1} ark:- ark:- |". \
format(
os.path.join(self.recipe_dir, 'data', self.dset, 'utt2spk'),
os.path.join(self.recipe_dir, 'data', self.dset, self.dset + '_cmvn_speaker.ark')
)
feat_opts += " add-deltas --delta-order=2 ark:- ark:- |"
if self.feature_context:
feat_opts += " splice-feats --left-context={0} --right-context={0} ark:- ark:- |". \
format(str(self.feature_context))
label_opts = 'ali-to-pdf'
feats = {k: m for k, m in kaldi_io.read_mat_ark(
'ark:copy-feats scp:{} ark:- | {}'.format(feat_path, feat_opts))}
lab = {k: v for k, v in kaldi_io.read_vec_int_ark(
'gunzip -c {0}/ali*.gz | {1} {0}/final.mdl ark:- ark:-|'.format(label_path, label_opts))
if k in feats}
feats = {k: v for k, v in feats.items() if k in lab}
return feats, lab
def unify_data(self, feats, lab, optional_array=None):
fea_conc = np.concatenate([v for k, v in sorted(feats.items())])
lab_conc = np.concatenate([v for k, v in sorted(lab.items())])
if optional_array:
opt_conc = np.concatenate([v for k, v in sorted(optional_array.items())])
names = [k for k, v in sorted(lab.items())]
end_snt = 0
end_indices = []
for k, v in sorted(lab.items()):
end_snt += v.shape[0]
end_indices.append(end_snt)
lab = lab_conc.astype('int64')
if optional_array:
opt = opt_conc.astype('int64')
return fea_conc, lab, opt, names, end_indices
return fea_conc, lab, names, end_indices
def __getitem__(self, idx):
return self.feats[idx], self.labels[idx]
def __len__(self):
return len(self.labels)
if __name__ == "__main__":
data = TorchSpeechDataset("./", "./exp_tri1_ali_train", "train")
import ipdb; ipdb.set_trace()
| StarcoderdataPython |
142321 | <filename>recipes/xkbcommon/all/conanfile.py<gh_stars>0
from conans import ConanFile, Meson, tools
from conans.errors import ConanInvalidConfiguration
import os
class XkbcommonConan(ConanFile):
name = "xkbcommon"
description = "keymap handling library for toolkits and window systems"
topics = ("conan", "xkbcommon", "keyboard")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/xkbcommon/libxkbcommon"
license = "MIT"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_x11": [True, False],
"with_wayland": [True, False],
"xkbregistry": [True, False],
"docs": [True, False]
}
default_options = {
"shared": False,
"fPIC": True,
"with_x11": True,
"with_wayland": False,
"xkbregistry": True,
"docs": False
}
generators = "pkg_config"
_meson = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
@property
def _has_xkbregistry_option(self):
return tools.Version(self.version) >= "1.0.0"
def config_options(self):
if not self._has_xkbregistry_option:
del self.options.xkbregistry
def configure(self):
if self.settings.os != "Linux":
raise ConanInvalidConfiguration("This library is only compatible with Linux")
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def requirements(self):
self.requires("xorg/system")
if self.options.get_safe("xkbregistry"):
self.requires("libxml2/2.9.10")
def build_requirements(self):
self.build_requires("meson/0.56.0")
self.build_requires("bison/3.7.1")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = "libxkbcommon-" + self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def _configure_meson(self):
if self._meson:
return self._meson
defs={
"enable-wayland": self.options.with_wayland,
"enable-docs": self.options.docs,
"enable-x11": self.options.with_x11,
"libdir": os.path.join(self.package_folder, "lib"),
"default_library": ("shared" if self.options.shared else "static")}
if self._has_xkbregistry_option:
defs["enable-xkbregistry"] = self.options.xkbregistry
# workaround for https://github.com/conan-io/conan-center-index/issues/3377
# FIXME: do not remove this pkg-config file once xorg recipe fixed
xeyboard_config_pkgfile = os.path.join(self.build_folder, "xkeyboard-config.pc")
if os.path.isfile(xeyboard_config_pkgfile):
os.remove(xeyboard_config_pkgfile)
self._meson = Meson(self)
self._meson.configure(
defs=defs,
source_folder=self._source_subfolder,
build_folder=self._build_subfolder,
pkg_config_paths=self.build_folder)
return self._meson
def build(self):
with tools.run_environment(self):
meson = self._configure_meson()
meson.build()
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
meson = self._configure_meson()
meson.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "share"))
def package_info(self):
self.cpp_info.components["libxkbcommon"].libs = ["xkbcommon"]
self.cpp_info.components["libxkbcommon"].name = "xkbcommon"
self.cpp_info.components["libxkbcommon"].requires = ["xorg::xkeyboard-config"]
if self.options.with_x11:
self.cpp_info.components["libxkbcommon-x11"].libs = ["xkbcommon-x11"]
self.cpp_info.components["libxkbcommon-x11"].name = "xkbcommon-x11"
self.cpp_info.components["libxkbcommon-x11"].requires = ["libxkbcommon", "xorg::xcb", "xorg::xcb-xkb"]
if self.options.get_safe("xkbregistry"):
self.cpp_info.components["libxkbregistry"].libs = ["xkbregistry"]
self.cpp_info.components["libxkbregistry"].name = "xkbregistry"
self.cpp_info.components["libxkbregistry"].requires = ["libxml2::libxml2"]
if tools.Version(self.version) >= "1.0.0":
bin_path = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bin_path))
self.env_info.PATH.append(bin_path)
| StarcoderdataPython |
3286827 | <gh_stars>0
from credstuffer.proxy.grabber import ProxyGrabber
from credstuffer.proxy.provider import ProxyProvider
| StarcoderdataPython |
23872 | #!/usr/bin/env python
import random
import argparse
def generate_passwords(password_file_path):
password_file = open(password_file_path, 'w')
chars = 'abcdefghijklmnopqrstuvxyz01234567890_-!*'
secret_key = ''.join(random.SystemRandom().choice(chars) for _ in range(50))
password_file.write("SECRET_KEY = '%s'\n" % secret_key)
password_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('password_file_path', help='Where password file will be placed')
args = parser.parse_args()
generate_passwords(args.password_file_path)
| StarcoderdataPython |
1642064 | from .revenues import Revenues
import pandas as pd
class ForwardPlanning:
def __init__(self, worksite):
"""Gestion prévisionnelle ( à la fin de la synthèse chantier )."""
self.worksite = worksite
for name in worksite.category_names:
if name != "PRODUITS" and name != "DIVERS":
if worksite.category_names.index(name) == 0:
self.forward_planning = pd.DataFrame(
columns=worksite.categories[name].columns.copy()
)
line = worksite.categories[name].iloc[-1] # ligne du total
line.name = name
self.forward_planning = self.forward_planning.append(
line, ignore_index=False
)
self.add_total()
self.format_forward_planning()
def calculate_margins(
self, month, year, with_year=True, with_cumul=False, with_month=False
):
# Calcule et crée le tableau de marge à l'avancement
revenues = Revenues(self.worksite.expenses.data)
year_revenues = revenues.calculate_year_revenues(year)
anterior_revenues = revenues.calculate_cumulative_with_year_limit(
year - 1
)
cumulative_revenues = year_revenues + anterior_revenues
month_revenues = revenues.calculate_month_revenues(month, year)
year_expenses = self.worksite.calculate_year_expenses(month, year)
cumulative_expenses = self.worksite.calculate_cumul_expenses(
month, year
)
anterior_expenses = cumulative_expenses - year_expenses
month_expenses = self.worksite.calculate_month_expenses(month, year)
margin_year = year_revenues - year_expenses
margin_anterior = anterior_revenues - anterior_expenses
margin_total = cumulative_revenues - cumulative_expenses
margin_month = month_revenues - month_expenses
percent_margin_year = (
(margin_year / year_revenues) * 100 if year_revenues != 0 else 0
)
percent_margin_anterior = (
(margin_anterior / anterior_revenues) * 100
if anterior_revenues != 0
else 0
)
percent_margin_total = (
(margin_total / cumulative_revenues) * 100
if cumulative_revenues != 0
else 0
)
percent_margin_month = (
(margin_month / month_revenues) * 100 if month_revenues != 0 else 0
)
row_indexes = []
data = []
column_indexes = ["CA", "Dépenses", "Marge brute", "Marge brute %"]
if with_year:
for i in ["Année courante", "Années antérieures"]:
row_indexes.append(i)
for i in [
[
year_revenues,
year_expenses,
margin_year,
percent_margin_year,
],
[
anterior_revenues,
anterior_expenses,
margin_anterior,
percent_margin_anterior,
],
]:
data.append(i)
if with_cumul:
for i in ["Cumulé"]:
row_indexes.append(i)
for i in [
[
cumulative_revenues,
cumulative_expenses,
margin_total,
percent_margin_total,
]
]:
data.append(i)
if with_month:
for i in ["Mois"]:
row_indexes.append(i)
for i in [
[
month_revenues,
month_expenses,
margin_month,
percent_margin_month,
]
]:
data.append(i)
out = pd.DataFrame( # Création du tableau
data=data, index=row_indexes, columns=column_indexes
)
out["CA"] = out["CA"].apply("{:0,.2f}€".format)
out["Dépenses"] = out["Dépenses"].apply("{:0,.2f}€".format)
out["Marge brute"] = out["Marge brute"].apply("{:0,.2f}€".format)
out["Marge brute %"] = out["Marge brute %"].apply("{:0,.2f}%".format)
return out
def calculate_pfdc_tab(self, budget):
# Calcule et crée le tableau Marge à fin de chantier
column_indexes = ["PFDC"]
row_indexes = ["CA Chantier", "Marge brute", "Marge brute %"]
try:
sell_price = (
budget.loc[
budget["POSTE"] == "PRIX DE VENTE",
self.worksite.worksite_name,
].sum()
if budget is not None
else 0
)
except Exception:
sell_price = 0
total_sell = sell_price - self.worksite.get_pfdc_total()
percent = total_sell / (sell_price) if (sell_price) != 0 else 0
data = [sell_price, total_sell, percent * 100]
out = pd.DataFrame( # Création du tableau
data=data, index=row_indexes, columns=column_indexes
)
out.loc["CA Chantier"] = out.loc["CA Chantier"].apply(
"{:0,.2f}€".format
)
out.loc["Marge brute"] = out.loc["Marge brute"].apply(
"{:0,.2f}€".format
)
out.loc["Marge brute %"] = out.loc["Marge brute %"].apply(
"{:0,.2f}%".format
)
return out
def add_total(self):
columns = ["Dépenses du mois","Dépenses cumulées",\
"Budget","RAD","PFDC","Ecart PFDC/Budget"]
data = [
self.forward_planning["Dépenses du mois"].sum(),
self.forward_planning["Dépenses cumulées"].sum(),
self.forward_planning["Budget"].sum(),
self.forward_planning["RAD"].sum(),
self.forward_planning["PFDC"].sum(),
self.forward_planning["Ecart PFDC/Budget"].sum()
]
line = pd.Series(data=data,index=columns)
line.name = "TOTAL"
self.forward_planning = self.forward_planning.append(
line, ignore_index=False
)
def format_forward_planning(self):
self.forward_planning = self.forward_planning.drop('Marché',errors='ignore',axis=1)
self.forward_planning = self.forward_planning.drop('Avenants',errors='ignore',axis=1)
self.forward_planning["Budget"] = self.forward_planning["Budget"].apply("{:0,.2f}€".format)
self.forward_planning["Dépenses du mois"] = self.forward_planning["Dépenses du mois"].apply("{:0,.2f}€".format)
self.forward_planning["Dépenses cumulées"] = self.forward_planning["Dépenses cumulées"].apply("{:0,.2f}€".format)
self.forward_planning["RAD"] = self.forward_planning["RAD"].apply("{:0,.2f}€".format)
self.forward_planning["PFDC"] = self.forward_planning["PFDC"].apply("{:0,.2f}€".format)
self.forward_planning["Ecart PFDC/Budget"] = self.forward_planning["Ecart PFDC/Budget"].apply("{:0,.2f}€".format)
| StarcoderdataPython |
1680200 | <filename>setup.py
from distutils.core import setup
setup(
name='wallet-utils',
version='0.0.0',
packages=['wallet_utils'],
url='',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Utilities for dealing with crypto wallets (support for HD wallets). Makes use of the excellent '
'bip32utils library, which does the EC crypto key derivation heavy lifting.',
requires=['bip32utils', 'pysha3']
)
| StarcoderdataPython |
3233294 | <reponame>sandeep-krishna/100DaysOfCode
'''
Life the universe and Everything
Your program is to use the brute-force approach in order to find the Answer to Life, the Universe, and Everything. More precisely... rewrite small numbers from input to output.
Stop processing input after reading in the number 42.
All numbers at input are integers of one or two digits.
SAMPLE INPUT
1
2
88
42
99
SAMPLE OUTPUT
1
2
88
'''
num = int(input())
while num != 42:
print(num)
num = int(input())
| StarcoderdataPython |
1649651 | # This scrip starts the salt-master service and the salt-minion service
# For each device it starts a salt-proxy daemon
import os
import yaml
print ('starting salt master')
os.system('docker exec -it salt service salt-master restart')
print ('starting salt minion')
os.system('docker exec -it salt service salt-minion restart')
f=open('pillar/top.sls', 'r')
vars = yaml.load(f.read())
for item in vars['base']:
if item != '*':
print ('starting salt proxy for device ' + item)
shell_cmd = 'docker exec -it salt salt-proxy -d --proxyid=' + item
os.system(shell_cmd) | StarcoderdataPython |
1648031 | # Copyright (c) 2019 <NAME>
import numpy as np
from PokerRL.cfr._CFRBase import CFRBase as _CFRBase
class CFRPlus(_CFRBase):
def __init__(self,
name,
chief_handle,
game_cls,
agent_bet_set,
other_agent_bet_set=None,
starting_stack_sizes=None,
delay=0, ):
"""
delay (int): Linear Averaging delay of CFR+ (only applicable if ""cfr_plus"" is
True)
"""
super().__init__(name=name,
chief_handle=chief_handle,
game_cls=game_cls,
starting_stack_sizes=starting_stack_sizes,
agent_bet_set=agent_bet_set,
other_agent_bet_set=other_agent_bet_set,
algo_name="CFRp_delay" + str(delay)
)
self.delay = delay
self.reset()
def _evaluate_avg_strats(self):
if self._iter_counter > self.delay:
return super()._evaluate_avg_strats()
def _regret_formula_after_first_it(self, ev_all_actions, strat_ev, last_regrets):
return np.maximum(ev_all_actions - strat_ev + last_regrets, 0)
def _regret_formula_first_it(self, ev_all_actions, strat_ev):
return np.maximum(ev_all_actions - strat_ev, 0) # not max of axis; this is like relu
def _compute_new_strategy(self, p_id):
for t_idx in range(len(self._trees)):
def _fill(_node):
if _node.p_id_acting_next == p_id:
N = len(_node.children)
_reg = _node.data["regret"]
_reg_sum = np.expand_dims(np.sum(_reg, axis=1), axis=1).repeat(N, axis=1)
with np.errstate(divide='ignore', invalid='ignore'):
_node.strategy = np.where(
_reg_sum > 0.0,
_reg / _reg_sum,
np.full(shape=(self._env_bldrs[t_idx].rules.RANGE_SIZE, N,), fill_value=1.0 / N,
dtype=np.float32)
)
for c in _node.children:
_fill(c)
_fill(self._trees[t_idx].root)
def _add_strategy_to_average(self, p_id):
def _fill(_node):
if _node.p_id_acting_next == p_id:
# if self._iter_counter > self.delay:
# current_weight = np.sum(np.arange(self.delay + 1, self._iter_counter + 1))
# new_weight = self._iter_counter - self.delay + 1
# m_old = current_weight / (current_weight + new_weight)
# m_new = new_weight / (current_weight + new_weight)
# _node.data["avg_strat"] = m_old * _node.data["avg_strat"] + m_new * _node.strategy
# assert np.allclose(np.sum(_node.data["avg_strat"], axis=1), 1, atol=0.0001)
# elif self._iter_counter == self.delay:
# _node.data["avg_strat"] = np.copy(_node.strategy)
# assert np.allclose(np.sum(_node.data["avg_strat"], axis=1), 1, atol=0.0001)
contrib = _node.strategy * np.expand_dims(_node.reach_probs[p_id], axis=1) * (self._iter_counter + 1)
if self._iter_counter > 0:
_node.data["avg_strat_sum"] += contrib
else:
_node.data["avg_strat_sum"] = contrib
_s = np.expand_dims(np.sum(_node.data["avg_strat_sum"], axis=1), axis=1)
with np.errstate(divide='ignore', invalid='ignore'):
_node.data["avg_strat"] = np.where(_s == 0,
np.full(shape=len(_node.allowed_actions),
fill_value=1.0 / len(_node.allowed_actions)),
_node.data["avg_strat_sum"] / _s
)
assert np.allclose(np.sum(_node.data["avg_strat"], axis=1), 1, atol=0.0001)
for c in _node.children:
_fill(c)
for t_idx in range(len(self._trees)):
_fill(self._trees[t_idx].root)
| StarcoderdataPython |
3266211 | <filename>tests/test_bump_create_commit_message.py<gh_stars>10-100
import sys
from pathlib import Path
from textwrap import dedent
import pytest
from packaging.version import Version
from commitizen import bump, cli, cmd, exceptions
conversion = [
(
("1.2.3", "1.3.0", "bump: $current_version -> $new_version [skip ci]"),
"bump: 1.2.3 -> 1.3.0 [skip ci]",
),
(("1.2.3", "1.3.0", None), "bump: version 1.2.3 → 1.3.0"),
(("1.2.3", "1.3.0", "release $new_version"), "release 1.3.0"),
]
@pytest.mark.parametrize("test_input,expected", conversion)
def test_create_tag(test_input, expected):
current_version, new_version, message_template = test_input
new_tag = bump.create_commit_message(
Version(current_version), Version(new_version), message_template
)
assert new_tag == expected
@pytest.mark.parametrize("retry", (True, False))
def test_bump_pre_commit_changelog(tmp_commitizen_project, mocker, freezer, retry):
freezer.move_to("2022-04-01")
testargs = ["cz", "bump", "--changelog", "--yes"]
if retry:
testargs.append("--retry")
else:
pytest.xfail("it will fail because pre-commit will reformat CHANGELOG.md")
mocker.patch.object(sys, "argv", testargs)
with tmp_commitizen_project.as_cwd():
# Configure prettier as a pre-commit hook
Path(".pre-commit-config.yaml").write_text(
"""
repos:
- repo: https://github.com/pre-commit/mirrors-prettier
rev: v2.6.2
hooks:
- id: prettier
stages: [commit]
"""
)
# Prettier inherits editorconfig
Path(".editorconfig").write_text(
"""
[*]
indent_size = 4
"""
)
cmd.run("git add -A")
cmd.run("git commit -m 'fix: _test'")
cmd.run("pre-commit install")
cli.main()
# Pre-commit fixed last line adding extra indent and "\" char
assert Path("CHANGELOG.md").read_text() == dedent(
"""\
## 0.1.1 (2022-04-01)
### Fix
- \\_test
"""
)
@pytest.mark.parametrize("retry", (True, False))
def test_bump_pre_commit_changelog_fails_always(
tmp_commitizen_project, mocker, freezer, retry
):
freezer.move_to("2022-04-01")
testargs = ["cz", "bump", "--changelog", "--yes"]
if retry:
testargs.append("--retry")
mocker.patch.object(sys, "argv", testargs)
with tmp_commitizen_project.as_cwd():
Path(".pre-commit-config.yaml").write_text(
"""
repos:
- repo: local
hooks:
- id: forbid-changelog
name: changelogs are forbidden
entry: changelogs are forbidden
language: fail
files: CHANGELOG.md
"""
)
cmd.run("git add -A")
cmd.run("git commit -m 'feat: forbid changelogs'")
cmd.run("pre-commit install")
with pytest.raises(exceptions.BumpCommitFailedError):
cli.main()
| StarcoderdataPython |
1671516 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
More or less a python port of Stewart method from R SpatialPositon package
(https://github.com/Groupe-ElementR/SpatialPosition/).
@author: mthh
"""
import numpy as np
from matplotlib.pyplot import contourf
from shapely import speedups
from shapely.ops import unary_union, transform
from shapely.geometry import Polygon, MultiPolygon
from geopandas import GeoDataFrame
try:
from jenkspy import jenks_breaks
except: jenks_breaks = None
from .helpers_classif import get_opt_nb_class, maximal_breaks, head_tail_breaks
if speedups.available and not speedups.enabled: speedups.enable()
def quick_idw(input_geojson_points, variable_name, power, nb_class,
nb_pts=10000, resolution=None, disc_func=None,
mask=None, user_defined_breaks=None,
variable_name2=None, output='GeoJSON', **kwargs):
"""
Function acting as a one-shot wrapper around SmoothIdw object.
Read a file of point values and optionnaly a mask file,
return the smoothed representation as GeoJSON or GeoDataFrame.
Parameters
----------
input_geojson_points : str
Path to file to use as input (Points/Polygons) or GeoDataFrame object,
must contains a relevant numerical field.
variable_name : str
The name of the variable to use (numerical field only).
power : int or float
The power of the function.
nb_class : int, optionnal
The number of class, if unset will most likely be 8.
(default: None)
nb_pts: int, optionnal
The number of points to use for the underlying grid.
(default: 10000)
resolution : int, optionnal
The resolution to use (in meters), if not set a default
resolution will be used in order to make a grid containing around
10000 pts (default: None).
disc_func: str, optionnal
The name of the classification function to be used to decide on which
break values to use to create the contour layer.
(default: None)
mask : str, optionnal
Path to the file (Polygons only) to use as clipping mask,
can also be a GeoDataFrame (default: None).
user_defined_breaks : list or tuple, optionnal
A list of ordered break to use to construct the contours
(overrides `nb_class` and `disc_func` values if any, default: None).
variable_name2 : str, optionnal
The name of the 2nd variable to use (numerical field only); values
computed from this variable will be will be used as to divide
values computed from the first variable (default: None)
output : string, optionnal
The type of output expected (not case-sensitive)
in {"GeoJSON", "GeoDataFrame"} (default: "GeoJSON").
Returns
-------
smoothed_result : bytes or GeoDataFrame,
The result, dumped as GeoJSON (utf-8 encoded) or as a GeoDataFrame.
Examples
--------
Basic usage, output to raw geojson (bytes):
>>> result = quick_idw("some_file.geojson", "some_variable", power=2)
More options, returning a GeoDataFrame:
>>> smooth_gdf = quick_stewart("some_file.geojson", "some_variable",
nb_class=8, disc_func="percentiles",
output="GeoDataFrame")
"""
return SmoothIdw(input_geojson_points,
variable_name,
power,
nb_pts,
resolution,
variable_name2,
mask,
**kwargs
).render(nb_class=nb_class,
disc_func=disc_func,
user_defined_breaks=user_defined_breaks,
output=output)
def quick_stewart(input_geojson_points, variable_name, span,
beta=2, typefct='exponential',nb_class=None,
nb_pts=10000, resolution=None, mask=None,
user_defined_breaks=None, variable_name2=None,
output="GeoJSON", **kwargs):
"""
Function acting as a one-shot wrapper around SmoothStewart object.
Read a file of point values and optionnaly a mask file,
return the smoothed representation as GeoJSON or GeoDataFrame.
Parameters
----------
input_geojson_points : str
Path to file to use as input (Points/Polygons) or GeoDataFrame object,
must contains a relevant numerical field.
variable_name : str
The name of the variable to use (numerical field only).
span : int
The span (meters).
beta : float
The beta!
typefct : str, optionnal
The type of function in {"exponential", "pareto"} (default: "exponential").
nb_class : int, optionnal
The number of class, if unset will most likely be 8
(default: None)
nb_pts: int, optionnal
The number of points to use for the underlying grid.
(default: 10000)
resolution : int, optionnal
The resolution to use (in meters), if not set a default
resolution will be used in order to make a grid containing around
10000 pts (default: None).
mask : str, optionnal
Path to the file (Polygons only) to use as clipping mask,
can also be a GeoDataFrame (default: None).
user_defined_breaks : list or tuple, optionnal
A list of ordered break to use to construct the contours
(override `nb_class` value if any, default: None).
variable_name2 : str, optionnal
The name of the 2nd variable to use (numerical field only); values
computed from this variable will be will be used as to divide
values computed from the first variable (default: None)
output : string, optionnal
The type of output expected (not case-sensitive)
in {"GeoJSON", "GeoDataFrame"} (default: "GeoJSON").
Returns
-------
smoothed_result : bytes or GeoDataFrame,
The result, dumped as GeoJSON (utf-8 encoded) or as a GeoDataFrame.
Examples
--------
Basic usage, output to raw geojson (bytes):
>>> result = quick_stewart("some_file.geojson", "some_variable",
span=12500, beta=3, typefct="exponential")
More options, returning a GeoDataFrame:
>>> smooth_gdf = quick_stewart("some_file.geojson", "some_variable",
span=12500, beta=3, typefct="pareto",
output="GeoDataFrame")
"""
return SmoothStewart(
input_geojson_points,
variable_name,
span,
beta,
typefct,
nb_pts,
resolution,
variable_name2,
mask,
**kwargs
).render(
nb_class=nb_class,
user_defined_breaks=user_defined_breaks,
output=output)
def make_regular_points_with_no_res(bounds, nb_points=10000):
"""
Return a regular grid of points within `bounds` with the specified
number of points (or a close approximate value).
Parameters
----------
bounds : 4-floats tuple
The bbox of the grid, as xmin, ymin, xmax, ymax.
nb_points : int, optionnal
The desired number of points (default: 10000)
Returns
-------
points : numpy.array
An array of coordinates
shape : 2-floats tuple
The number of points on each dimension (width, height)
"""
minlon, minlat, maxlon, maxlat = bounds
minlon, minlat, maxlon, maxlat = bounds
offset_lon = (maxlon - minlon) / 8
offset_lat = (maxlat - minlat) / 8
minlon -= offset_lon
maxlon += offset_lon
minlat -= offset_lat
maxlat += offset_lat
nb_x = int(nb_points**0.5)
nb_y = int(nb_points**0.5)
return (
np.linspace(minlon, maxlon, nb_x),
np.linspace(minlat, maxlat, nb_y),
(nb_y, nb_x)
)
def make_regular_points(bounds, resolution, longlat=True):
"""
Return a regular grid of points within `bounds` with the specified
resolution.
Parameters
----------
bounds : 4-floats tuple
The bbox of the grid, as xmin, ymin, xmax, ymax.
resolution : int
The resolution to use, in the same unit as `bounds`
Returns
-------
points : numpy.array
An array of coordinates
shape : 2-floats tuple
The number of points on each dimension (width, height)
"""
# xmin, ymin, xmax, ymax = bounds
minlon, minlat, maxlon, maxlat = bounds
offset_lon = (maxlon - minlon) / 8
offset_lat = (maxlat - minlat) / 8
minlon -= offset_lon
maxlon += offset_lon
minlat -= offset_lat
maxlat += offset_lat
if longlat:
height = hav_dist(
np.array([(maxlon + minlon) / 2, minlat]),
np.array([(maxlon + minlon) / 2, maxlat])
)
width = hav_dist(
np.array([minlon, (maxlat + minlat) / 2]),
np.array([maxlon, (maxlat + minlat) / 2])
)
else:
height = np.linalg.norm(
np.array([(maxlon + minlon) / 2, minlat])
- np.array([(maxlon + minlon) / 2, maxlat]))
width = np.linalg.norm(
np.array([minlon, (maxlat + minlat) / 2])
- np.array([maxlon, (maxlat + minlat) / 2]))
nb_x = int(round(width / resolution))
nb_y = int(round(height / resolution))
if nb_y * 0.6 > nb_x:
nb_x = int(nb_x + nb_x / 3)
elif nb_x * 0.6 > nb_y:
nb_y = int(nb_y + nb_y / 3)
return (
np.linspace(minlon, maxlon, nb_x),
np.linspace(minlat, maxlat, nb_y),
(nb_y, nb_x)
)
def _compute_centroids(geometries):
res = []
for geom in geometries:
if hasattr(geom, '__len__'):
ix_biggest = np.argmax([g.area for g in geom])
res.append(geom[ix_biggest].centroid)
else:
res.append(geom.centroid)
return res
def make_dist_mat(xy1, xy2, longlat=True):
"""
Return a distance matrix between two set of coordinates.
Use geometric distance (default) or haversine distance (if longlat=True).
Parameters
----------
xy1 : numpy.array
The first set of coordinates as [(x, y), (x, y), (x, y)].
xy2 : numpy.array
The second set of coordinates as [(x, y), (x, y), (x, y)].
longlat : boolean, optionnal
Whether the coordinates are in geographic (longitude/latitude) format
or not (default: False)
Returns
-------
mat_dist : numpy.array
The distance matrix between xy1 and xy2
"""
if longlat:
return hav_dist(xy1[:, None], xy2)
else:
d0 = np.subtract.outer(xy1[:, 0], xy2[:, 0])
d1 = np.subtract.outer(xy1[:, 1], xy2[:, 1])
return np.hypot(d0, d1)
def hav_dist(locs1, locs2):
"""
Return a distance matrix between two set of coordinates.
Use geometric distance (default) or haversine distance (if longlat=True).
Parameters
----------
locs1 : numpy.array
The first set of coordinates as [(long, lat), (long, lat)].
locs2 : numpy.array
The second set of coordinates as [(long, lat), (long, lat)].
Returns
-------
mat_dist : numpy.array
The distance matrix between locs1 and locs2
"""
# locs1 = np.radians(locs1)
# locs2 = np.radians(locs2)
cos_lat1 = np.cos(locs1[..., 0])
cos_lat2 = np.cos(locs2[..., 0])
cos_lat_d = np.cos(locs1[..., 0] - locs2[..., 0])
cos_lon_d = np.cos(locs1[..., 1] - locs2[..., 1])
return 6367000 * np.arccos(
cos_lat_d - cos_lat1 * cos_lat2 * (1 - cos_lon_d))
def isopoly_to_gdf(collec_poly, levels, field_name="levels"):
"""
Convert a collection of matplotlib.contour.QuadContourSet to a GeoDataFrame
Set an attribute `field_name` on each feature, according to `levels` values
(`levels` must have the same number of features as the collection of contours)
Parameters
----------
collection_polygons : matplotlib.contour.QuadContourSet
The result of a grid interpolation from matplotlib.
levels : array-like
The value to use as attributes for the constructed GeoDataFrame.
field_name : str
The name of the field to be fill by values contained in
`levels` variable (default: "levels").
Returns
-------
gdf_contours : GeoDataFrame
The result as a GeoDataFrame.
"""
polygons, data = [], []
for i, polygon in enumerate(collec_poly.collections):
mpoly = []
for path in polygon.get_paths():
path.should_simplify = False
poly = path.to_polygons()
exterior, holes = [], []
if len(poly) > 0 and len(poly[0]) > 3:
exterior = poly[0]
if len(poly) > 1:
holes = [h for h in poly[1:] if len(h) > 3]
mpoly.append(Polygon(exterior, holes))
if len(mpoly) > 1:
mpoly = MultiPolygon(mpoly)
polygons.append(mpoly)
data.append(levels[i])
elif len(mpoly) == 1:
polygons.append(mpoly[0])
data.append(levels[i])
return GeoDataFrame(geometry=polygons,
data=data,
columns=[field_name])
class BaseSmooth:
def __repr__(self):
return "\n".join([self.info, self.info2, self.info3])
def __str__(self):
return "\n".join([self.info, self.info2, self.info3])
@property
def properties(self):
print("\n".join([self.info, self.info2, self.info3]))
def open_mask(self, mask, input_layer):
# Read the mask according to its format:
if isinstance(mask, GeoDataFrame):
self.mask = mask
elif isinstance(mask, str) and isinstance(input_layer, str) \
and mask == input_layer:
self.mask = self.gdf.copy()
else:
self.mask = GeoDataFrame.from_file(mask)
self.check_mask()
def check_mask(self):
# Ensure the mask is made of Polygon/MultiPolygon:
if len(set(self.mask.type)
.intersection({"Polygon", "MultiPolygon"})) > 0:
# Use the same projection for the mask as for the input layer:
if self.mask.crs and self.mask.crs is not self.proj_to_use:
self.use_mask = True
self.mask.to_crs(self.proj_to_use, inplace=True)
else:
self.use_mask = True
self.mask.crs = self.proj_to_use
else:
self.mask = None
self.use_mask = False
def filter_missing_values(self, variable_name, variable_name2):
# Convert the first value field to a numeric field if not already,
# and dont take into account features with no value / NaN value
if not self.gdf[variable_name].dtype in (float, int):
self.gdf.loc[:, variable_name] = \
self.gdf[variable_name].replace('', np.NaN)
self.gdf.loc[:, variable_name] = self.gdf[variable_name].astype(float)
self.gdf = self.gdf[self.gdf[variable_name].notnull()]
# Convert the second value field to a numeric field if not already,
# and dont take into account features with no value / NaN value
if variable_name2:
if not self.gdf[variable_name2].dtype in (float, int):
self.gdf.loc[:, variable_name2] = \
self.gdf[variable_name2].replace('', np.NaN)
self.gdf.loc[:, variable_name2] = \
self.gdf[variable_name2].astype(float)
self.gdf = self.gdf[self.gdf[variable_name2].notnull()]
# Provide a new index if entries have been removed :
self.gdf.index = range(len(self.gdf))
def define_levels(self, nb_class, disc_func):
zi = self.zi
_min = np.nanmin(zi)
if not nb_class:
# nb_class = int(get_opt_nb_class(len(zi)) - 2)
nb_class = 8
if not disc_func or "prog_geom" in disc_func:
levels = [_min] + [
np.nanmax(zi) / i for i in range(1, nb_class + 1)][::-1]
elif "equal_interval" in disc_func:
_bin = np.nanmax(zi) / nb_class
levels = [_min] + [_bin * i for i in range(1, nb_class+1)]
elif "percentiles" in disc_func:
levels = np.percentile(
np.concatenate((zi[zi.nonzero()], np.array([_min]))),
np.linspace(0.0, 100.0, nb_class+1))
elif "jenks" in disc_func:
levels = list(jenks_breaks(np.concatenate(
([_min], zi[zi.nonzero()])), nb_class))
levels[0] = levels[0] - _min * 0.01
elif "head_tail" in disc_func:
levels = head_tail_breaks(np.concatenate(
([_min], zi[zi.nonzero()])))
elif "maximal_breaks" in disc_func:
levels = maximal_breaks(np.concatenate(
([_min], zi[zi.nonzero()])), nb_class)
else:
raise ValueError
return levels
def render(self, nb_class=8, disc_func=None, user_defined_breaks=None,
output="GeoJSON", new_mask=False):
"""
Parameters
----------
nb_class : int, optionnal
The number of class (default: 8).
disc_func : str, optionnal
The kind of data classification to be used (to be choosed in
"equal_interval", "jenks", "percentiles, "head_tail_breaks"
and "prog_geom"), default: None.
user_defined_breaks : list or tuple, optionnal
A list of ordered break to use to construct the contours
(override `nb_class` and `disc_func` values if any)
(default: None).
output : string, optionnal
The type of output expected (not case-sensitive)
in {"GeoJSON", "GeoDataFrame"} (default: "GeoJSON").
new_mask : str, optionnal
Use a new mask by giving the path to the file (Polygons only)
to use as clipping mask, can also be directly a GeoDataFrame
(default: False).
Returns
-------
smoothed_result : bytes or GeoDataFrame
The result, dumped as GeoJSON (utf-8 encoded) or as a GeoDataFrame.
"""
if disc_func and 'jenks' in disc_func and not jenks_breaks:
raise ValueError(
"Missing jenkspy package - could not use jenks breaks")
zi = self.zi
if isinstance(new_mask, (type(False), type(None))):
if not self.use_mask:
self.use_mask = False
self.mask = None
else:
self.open_mask(new_mask, None)
# We want levels with the first break value as the minimum of the
# interpolated values and the last break value as the maximum of theses
# values:
if user_defined_breaks:
levels = user_defined_breaks
if levels[len(levels) - 1] < np.nanmax(zi):
levels = levels + [np.nanmax(zi)]
if levels[0] > np.nanmin(zi):
levels = [np.nanmin(zi)] + levels
else:
levels = self.define_levels(nb_class, disc_func)
# Ensure that the levels are unique/increasing
# to avoid error from `contourf` :
s_levels = set(levels)
if len(s_levels) != len(levels):
levels = list(s_levels)
levels.sort()
try:
collec_poly = contourf(
self.XI, self.YI,
zi.reshape(tuple(reversed(self.shape))).T,
levels,
vmax=abs(np.nanmax(zi)), vmin=-abs(np.nanmin(zi)))
# Retry without setting the levels :
except ValueError:
collec_poly = contourf(
self.XI, self.YI,
zi.reshape(tuple(reversed(self.shape))).T,
vmax=abs(np.nanmax(zi)), vmin=-abs(np.nanmin(zi)))
# Fetch the levels returned by contourf:
levels = collec_poly.levels
# Set the maximum value at the maximum value of the interpolated values:
levels[-1] = np.nanmax(zi)
# Transform contourf contours into a GeoDataFrame of (Multi)Polygons:
res = isopoly_to_gdf(collec_poly, levels=levels[1:], field_name="max")
if self.longlat:
def f(x, y, z=None):
return (x / 0.017453292519943295,
y / 0.017453292519943295)
res.geometry = [transform(f, g) for g in res.geometry]
res.crs = self.proj_to_use
# Set the min/max/center values of each class as properties
# if this contour layer:
res["min"] = [np.nanmin(zi)] + res["max"][0:len(res)-1].tolist()
res["center"] = (res["min"] + res["max"]) / 2
# Compute the intersection between the contour layer and the mask layer:
ix_max_ft = len(res) - 1
if self.use_mask:
res.loc[0:ix_max_ft, "geometry"] = res.geometry.buffer(
0).intersection(unary_union(self.mask.geometry.buffer(0)))
# res.loc[0:ix_max_ft, "geometry"] = res.geometry.buffer(
# 0).intersection(self.poly_max_extend.buffer(-0.1))
# Repair geometries if necessary :
if not all(t in ("MultiPolygon", "Polygon") for t in res.geom_type):
res.loc[0:ix_max_ft, "geometry"] = \
[geom if geom.type in ("Polygon", "MultiPolygon")
else MultiPolygon(
[j for j in geom if j.type in ('Polygon', 'MultiPolygon')]
)
for geom in res.geometry]
if "geojson" in output.lower():
return res.to_crs({"init": "epsg:4326"}).to_json().encode()
else:
return res
class SmoothStewart(BaseSmooth):
"""
Main object, allowing to create an instance with some required parameters
(span, beta, etc.) then render the contour polygons according to various
parameters (data classification, number of bins, output format, etc.)
Parameters
----------
input_layer : str
Path to file to use as input (Points/Polygons) or GeoDataFrame object,
must contains a relevant numerical field.
variable_name : str
The name of the variable to use (numerical field only).
span : int
The span!
beta : float
The beta!
typefct : str, optionnal
The type of function in {"exponential", "pareto"} (default: "exponential").
resolution_pts: int, optionnal
The resolution to use (in number of points). Can be overrided by the
'resolution' parameter if set.
resolution : int, optionnal
The resolution to use (in unit of the input file).
mask : str, optionnal
Path to the file (Polygons only) to use as clipping mask (default: None).
variable_name2 : str, optionnal
The name of the 2nd variable to use (numerical field only); values
computed from this variable will be will be used as to divide
values computed from the first variable (default: None)
Attributes
----------
zi : numpy.ndarray
The computed potential values for each `unknownpts`.
Methods
-------
render(nb_class=8, disc_func=None, user_defined_breaks=None,
output="GeoJSON", new_mask=False)
Render the contour polygon according to the choosen number of class and
the choosen classification method (or according to
`user_defined_breaks` which will overwrite these parameters)
"""
def __init__(self, input_layer, variable_name, span, beta,
typefct='exponential', nb_pts=10000,
resolution=None, variable_name2=None, mask=None, **kwargs):
self.sizelimit = kwargs.get('sizelimit', float('infinity'))
self.longlat = kwargs.get("distGeo", kwargs.get("longlat", True))
self.proj_to_use = {'init': 'epsg:4326'} if self.longlat \
else kwargs.get("projDistance", None) \
or ("""+proj=robin +lon_0=0 +x_0=0 +y_0=0 """
"""+ellps=WGS84 +datum=WGS84 +units=m +no_defs""")
self.gdf = input_layer.copy() if isinstance(input_layer, GeoDataFrame) \
else GeoDataFrame.from_file(input_layer)
if self.gdf.crs and self.gdf.crs is not self.proj_to_use:
self.gdf.to_crs(self.proj_to_use, inplace=True)
else:
self.gdf.crs = self.proj_to_use
self.info = (
'SmoothStewart - variable : {}{} ({} features)\n'
'beta : {} - span : {} - function : {}'
).format(variable_name,
" / {}".format(variable_name2) if variable_name2 else "",
len(self.gdf), beta, span, typefct)
if mask is not None:
self.open_mask(mask, input_layer)
else:
self.use_mask = False
self.info2 = ""
self.info3 = "Clipping mask: {}".format(self.use_mask)
# Don't use features with missing values:
self.filter_missing_values(variable_name, variable_name2)
# Calculate the value for each unknown points of the grid:
self.compute_zi(variable_name, span, beta,
variable_name2=variable_name2,
nb_pts=nb_pts,
resolution=resolution,
typefct=typefct)
@staticmethod
def _compute_interact_density(matdist, typefun, beta, span):
if 'pareto' in typefun:
alpha = (2.0 ** (1.0 / beta) - 1.0) / span
return (1 + alpha * matdist) ** (-beta)
elif 'exponential' in typefun:
alpha = np.log(2) / span ** beta
return np.exp(- alpha * matdist ** beta)
else:
raise ValueError('Bad interaction function argument: {}'
.format(typefun))
def compute_zi(self, variable_name, span, beta,
nb_pts, resolution=None, typefct="exponential",
variable_name2=None):
knownpts = self.gdf
if self.use_mask:
bounds = self.mask.total_bounds
else:
bounds = knownpts.total_bounds
if self.longlat:
bounds = list(map(lambda x : x * np.pi / 180, bounds))
# Get the x and y axis of the grid:
self.XI, self.YI, self.shape = make_regular_points(bounds, resolution) \
if resolution else make_regular_points_with_no_res(bounds, nb_pts)
# Verify that the size of the matrix doesn't exceed the sizelimit value if any:
if len(knownpts) * self.shape[0] * self.shape[1] > self.sizelimit:
raise ValueError('Too high resolution or to many input points')
# Compute the coordinates of each point of the grid :
unknownpts = np.array([(x, y) for x in self.XI for y in self.YI])
# Use the centroid if the feature is a Polygon
# or use the centroid of the largest Polygon for a MultiPolygon:
if all(i in ("Polygon", "Point") for i in knownpts.geom_type.values):
centroids = knownpts.geometry.centroid
else:
centroids = _compute_centroids(knownpts.geometry)
# Coordinates of every known point:
knwpts_coords = np.array([
(g.coords.xy[0][0], g.coords.xy[1][0])
for g in centroids])
if self.longlat:
knwpts_coords *= np.pi / 180
# Compute the interaction matrix:
mat_dens = self._compute_interact_density(
make_dist_mat(knwpts_coords, unknownpts, longlat=self.longlat),
typefct, beta, span)
if not variable_name2:
self.zi = (
knownpts[variable_name].values[:, np.newaxis] * mat_dens
).sum(axis=0).round(8)
else:
self.zi1 = (
knownpts[variable_name].values[:, np.newaxis] * mat_dens
).sum(axis=0)
self.zi2 = (
knownpts[variable_name2].values[:, np.newaxis] * mat_dens
).sum(axis=0)
self.zi = (np.true_divide(self.zi1, self.zi2)).round(8)
# Replace NaN values by -1.0 :
self.zi[np.argwhere(np.isnan(self.zi)).reshape(-1)] = -1.0
# Replace inf values by -1.0 :
self.zi[np.argwhere(np.isinf(self.zi)).reshape(-1)] = -1.0
self.info2 = ("unknown points : {} - interpolation grid shape : {}"
).format(len(unknownpts), self.shape)
class SmoothIdw(BaseSmooth):
"""
Main object, allowing to create an instance with the appropriate power
parameter then render the contour polygons according to various parameters
(data classification, number of bins, output format, etc.)
Parameters
----------
input_layer : str
Path to file to use as input (Points/Polygons) or GeoDataFrame object,
must contains a relevant numerical field.
variable_name : str
The name of the variable to use (numerical field only).
power : float
The power parameter of the IDW weighting function, as defined by Shepard.
resolution_pts: int, optionnal
The resolution to use (in number of points). Can be overrided by the
'resolution' parameter if set.
resolution : int, optionnal
The resolution to use (in unit of the input file).
mask : str, optionnal
Path to the file (Polygons only) to use as clipping mask (default: None).
variable_name2 : str, optionnal
The name of the 2nd variable to use (numerical field only); values
computed from this variable will be will be used as to divide
values computed from the first variable (default: None)
Attributes
----------
zi : numpy.ndarray
The interpolated values (for each `unknownpts`).
Methods
-------
render(nb_class=8, disc_func=None, user_defined_breaks=None,
output="GeoJSON", new_mask=False)
Render the contour polygon according to the choosen number of class and
the choosen classification method (or according to
`user_defined_breaks` which will overwrite these parameters)
"""
def __init__(self, input_layer, variable_name, power, nb_pts=10000,
resolution=None, variable_name2=None, mask=None, **kwargs):
self.sizelimit = kwargs.get('sizelimit', float('infinity'))
self.longlat = kwargs.get("distGeo", kwargs.get("longlat", True))
self.proj_to_use = {'init': 'epsg:4326'} if self.longlat \
else kwargs.get("projDistance", None) \
or ("""+proj=robin +lon_0=0 +x_0=0 +y_0=0 """
"""+ellps=WGS84 +datum=WGS84 +units=m +no_defs""")
self.gdf = input_layer.copy() if isinstance(input_layer, GeoDataFrame) \
else GeoDataFrame.from_file(input_layer)
if self.gdf.crs and self.gdf.crs is not self.proj_to_use:
self.gdf.to_crs(self.proj_to_use, inplace=True)
else:
self.gdf.crs = self.proj_to_use
self.info = (
'SmoothIdw - variable : {}{} ({} features)\n'
).format(variable_name,
" / {}".format(variable_name2) if variable_name2 else "",
len(self.gdf))
if mask is not None:
self.open_mask(mask, input_layer)
else:
self.use_mask = False
self.info2 = ""
self.info3 = "Clipping mask: {}".format(self.use_mask)
# Don't use features with missing values:
self.filter_missing_values(variable_name, variable_name2)
# Calculate the value for each unknown points of the grid:
self.compute_zi(variable_name,
power,
nb_pts=nb_pts,
resolution=resolution,
variable_name2=variable_name2)
def compute_zi(self, variable_name, power,
nb_pts, resolution=None, variable_name2=None):
knownpts = self.gdf
if self.use_mask:
bounds = self.mask.total_bounds
else:
bounds = knownpts.total_bounds
if self.longlat:
bounds = list(map(lambda x : x * np.pi / 180, bounds))
# Get the x and y axis of the grid:
self.XI, self.YI, self.shape = make_regular_points(bounds, resolution) \
if resolution else make_regular_points_with_no_res(bounds, nb_pts)
# Verify that the size of the matrix doesn't exceed the sizelimit value if any:
if len(knownpts) * self.shape[0] * self.shape[1] > self.sizelimit:
raise ValueError('Too high resolution or to many input points')
# Compute the coordinates of each point of the grid :
unknownpts = np.array([(x, y) for x in self.XI for y in self.YI])
# Use the centroid if the feature is a Polygon
# or use the centroid of the largest Polygon for a MultiPolygon:
if all(i in ("Polygon", "Point") for i in knownpts.geom_type.values):
centroids = knownpts.geometry.centroid
else:
centroids = _compute_centroids(knownpts.geometry)
# Coordinates of every known point:
knwpts_coords = np.array([
(g.coords.xy[0][0], g.coords.xy[1][0])
for g in centroids])
if self.longlat:
knwpts_coords *= np.pi / 180
mat_weights = 1 / np.power(
make_dist_mat(knwpts_coords, unknownpts, longlat=self.longlat),
power)
# Make weights sum to one
mat_weights /= mat_weights.sum(axis=0)
# Multiply the weights for each interpolated point by all observed Z-values
self.zi = np.dot(mat_weights.T, knownpts[variable_name].values[:, np.newaxis])
# Replace NaN values by -1.0 :
self.zi[np.argwhere(np.isnan(self.zi)).reshape(-1)] = -1.0
# Replace inf values by -1.0 :
self.zi[np.argwhere(np.isinf(self.zi)).reshape(-1)] = -1.0
self.info2 = ("unknown points : {} - interpolation grid shape : {}"
).format(len(unknownpts), self.shape)
| StarcoderdataPython |
7770 | <reponame>mederrata/probability
# Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The MaskedIndependent distribution class."""
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import bijector as bijector_lib
from tensorflow_probability.python.distributions import batch_broadcast
from tensorflow_probability.python.distributions import distribution as distribution_lib
from tensorflow_probability.python.distributions import kullback_leibler
from tensorflow_probability.python.distributions import log_prob_ratio
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensor_util
def _add_event_dims_to_mask(validity_mask, *, dist=None, event_ndims=None):
validity_mask = tf.convert_to_tensor(validity_mask)
if event_ndims is None:
event_ndims = ps.rank_from_shape(dist.event_shape_tensor())
return tf.reshape(
validity_mask,
ps.concat([
ps.shape(validity_mask),
ps.ones(event_ndims, dtype=tf.int32)
], axis=0))
def _make_masked_fn(fn_name, n_event_shapes, safe_value,
make_arg0_safe=False):
"""Implements functions like mean, variance, etc.
Args:
fn_name: Name of the method called on the underlying distribution.
n_event_shapes: Number of event shape repeats in the shape of the underlying
function's output.
safe_value: The value to be placed in invalid locations. May be
`'safe_sample'` to specify we should use the "safe sample" value.
make_arg0_safe: If `True`, we will apply `self.safe_sample_fn` to ensure the
argument passed into the underlying routine is a "safe" sample.
Returns:
fn: Callable implementing the given function.
"""
def fn(self, *args, **kwargs):
if safe_value == 'safe_sample' or make_arg0_safe: # Only if needed.
safe_val = tf.stop_gradient(self.safe_sample_fn(self.distribution))
validity_mask = tf.convert_to_tensor(self.validity_mask)
if make_arg0_safe:
x = args[0]
safe_x = tf.where(
_add_event_dims_to_mask(validity_mask, dist=self), x, safe_val)
args = (safe_x,) + args[1:]
val = getattr(self.distribution, fn_name)(*args, **kwargs)
if n_event_shapes:
validity_mask = tf.reshape(
validity_mask,
ps.concat(
[ps.shape(validity_mask)] +
[ps.ones_like(self.event_shape_tensor())] * n_event_shapes,
axis=0))
if safe_value == 'safe_sample':
sentinel = tf.cast(safe_val, val.dtype)
else:
sentinel = tf.cast(safe_value, val.dtype)
return tf.where(validity_mask, val, sentinel)
fn.__name__ = f'_{fn_name}'
return fn
def _fixed_sample(d):
return d.sample(seed=samplers.zeros_seed())
class _Masked(distribution_lib.Distribution):
"""A distribution that masks invalid underlying distributions.
Sometimes we may want a way of masking out a subset of distributions. Perhaps
we have labels for only a subset of batch members and want to evaluate a
log_prob. Or we may want to encode a sparse random variable as a dense
random variable with a mask applied. In single-program/multiple-data regimes,
it can be necessary to pad Distributions and the samples thereof to a given
size in order to achieve the "single-program" desideratum.
When computing a probability density in this regime, we would like to mask out
the contributions of invalid batch members. We may also want to ensure that
the values being sampled are valid parameters for descendant distributions in
a hierarchical model, even if they are ultimately masked out. This
distribution answers those requirements. Specifically, for invalid batch
elements:
- `log_prob(x) == 0.` for all `x`, with no gradients back to `x`, nor any
gradients to the parameters of `distribution`.
- `sample() == tf.stop_gradient(safe_value_fn(distribution))`, with no
gradients back to the parameters of `distribution`.
The distribution accepts a mask specified by `validity_mask`, a boolean tensor
broadcastable with the underlying distribution's batch shape which specifies
for each batch element whether or not it is valid.
Entries in `validity_mask` which are `False` denote missing distributions,
which means that the corresponding entries in the measures (e.g. `prob`)
and statistics (e.g. `mean`) must not be treated as coming from some real
distribution. Whenever doing a reduction across those quantites, make sure to
either mask out the invalid entries or make sure the returned value
corresponds to the identity element of the reduction. For a couple examples:
- OK: `reduce_sum(masked_dist.log_prob(x))`
- OK: `tfd.Independent(masked_dist, ...)`
- Not OK: `reduce_var(masked_dist.mean())` will underestimate the variance
because it uses too large an `N`.
- Not OK: `tf.linalg.cholesky(masked_dist.covariance())` will fail for invalid
batch elements.
The default `safe_value_fn` is to draw a fixed-seeded sample from the
underlying `distribution`. Since this may be expensive, it is suggested to
specify a computationally cheaper method. Some options might include:
- `tfd.Distribution.mode`
- `tfd.Distribution.mean`
- `lambda d: d.quantile(.5)` (median)
- `lambda _: 0.` (if zero is always in the support of d)
- `lambda d: d.experimental_default_event_space_bijector()(0.)`
Besides the output of `sample`, results from `safe_value_fn` may also appear
in (invalid batch members of) `masked.default_event_space_bijector().forward`.
#### Examples
```
# Use tf.sequence_mask for `range(n) < num_valid`.
num_valid = 3
num_entries = 4
d = tfd.Masked(
tfd.MultivariateNormalDiag(tf.zeros([2, num_entries, 5]), tf.ones([5])),
tf.sequence_mask(num_valid, num_entries))
d.batch_shape # [2, 4]
d.event_shape # [5]
d.log_prob(tf.zeros([5])) # shape [2, 4]
# => [[nonzero, nonzero, nonzero, 0.],
# [nonzero, nonzero, nonzero, 0.]]
# Explicitly denote which elements are valid, adding a new batch dim of 2.
d = tfd.Masked(tfd.MultivariateNormalDiag(tf.zeros([4, 5]), tf.ones([5])),
[[False], [True]])
d.batch_shape # [2, 4]
d.event_shape # [5]
d.log_prob(tf.zeros([5])) # shape [2, 4]
# => [[0., 0., 0., 0.],
# [nonzero, nonzero, nonzero, nonzero]]
# Use `BatchBroadcast` and `Independent` to achieve the equivalent of adding
# positional mask functionality to `tfd.Sample`.
# Suppose we wanted to achieve this:
# `tfd.Sample(tfd.Normal(tf.zeros(2), 1), [3, 4], validity_mask=mask)`
# We can write:
d = tfd.Independent(
tfd.Masked(tfd.BatchBroadcast(tfd.Normal(0, 1), [2, 3, 4]), mask),
reinterpreted_batch_ndims=2)
d.batch_shape # [2]
d.event_shape # [3, 4]
d.log_prob(tf.ones([3, 4])) # shape [2]
```
"""
def __init__(self,
distribution,
validity_mask,
safe_sample_fn=_fixed_sample,
validate_args=False,
allow_nan_stats=True,
name=None):
"""Constructs a Masked distribution.
Args:
distribution: The underlying distribution, which will be masked.
validity_mask: Boolean mask where `True` indicates an element is valid.
`validity_mask` must broadcast with the batch shape of the underlying
distribution. Invalid batch elements are masked so that sampling returns
`safe_sample_fn(dist)` in invalid positions and `log_prob(x)` returns
`0.` for invalid positions.
safe_sample_fn: A callable which takes a distribution (namely,
the `distribution` argument) and returns a determinstic, safe sample
value. This helps to avoid `nan` gradients and allows downstream usage
of samples from a `Masked` distribution to assume a "safe" even if
invalid value. (Be careful to ensure that such downstream usages are
themselves masked!) Note that the result of this function will be
wrapped in a `tf.stop_gradient` call.
validate_args: Boolean indicating whether argument assertions should be
run. May impose performance penalties.
allow_nan_stats: Boolean indicating whether statistical functions may
return `nan`, or should instead use asserts where possible.
name: Optional name for operation scoping.
"""
parameters = dict(locals())
with tf.name_scope(name or f'Masked{distribution.name}') as name:
self._distribution = distribution
self._validity_mask = tensor_util.convert_nonref_to_tensor(
validity_mask, dtype_hint=tf.bool)
self._safe_sample_fn = safe_sample_fn
super(_Masked, self).__init__(
dtype=distribution.dtype,
reparameterization_type=distribution.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
return dict(
distribution=parameter_properties.BatchedComponentProperties(),
validity_mask=parameter_properties.ParameterProperties(
shape_fn=parameter_properties.SHAPE_FN_NOT_IMPLEMENTED))
@property
def distribution(self):
return self._distribution
@property
def validity_mask(self):
return self._validity_mask
@property
def safe_sample_fn(self):
return self._safe_sample_fn
@property
def experimental_is_sharded(self):
return self.distribution.experimental_is_sharded
def _event_shape(self):
return self.distribution.event_shape
def _event_shape_tensor(self):
return self.distribution.event_shape_tensor()
def _sample_n(self, n, seed=None, **kwargs):
validity_mask = tf.convert_to_tensor(self.validity_mask)
# To avoid the shape gymnastics of drawing extra samples, we delegate
# sampling to the BatchBroadcast distribution.
bb = batch_broadcast.BatchBroadcast(self.distribution,
ps.shape(validity_mask))
samples = bb.sample(n, seed=seed, **kwargs)
safe_val = tf.stop_gradient(self.safe_sample_fn(self.distribution))
return tf.where(_add_event_dims_to_mask(validity_mask, dist=self),
samples, safe_val)
_log_prob = _make_masked_fn(
'log_prob', n_event_shapes=0, safe_value=0., make_arg0_safe=True)
_prob = _make_masked_fn(
'prob', n_event_shapes=0, safe_value=1., make_arg0_safe=True)
_log_cdf = _make_masked_fn(
'log_cdf', n_event_shapes=0, safe_value=0., make_arg0_safe=True)
_cdf = _make_masked_fn(
'cdf', n_event_shapes=0, safe_value=1., make_arg0_safe=True)
_log_survival_function = _make_masked_fn(
'log_survival_function', n_event_shapes=0, safe_value=-float('inf'),
make_arg0_safe=True)
_survival_function = _make_masked_fn(
'survival_function', n_event_shapes=0, safe_value=0.,
make_arg0_safe=True)
_entropy = _make_masked_fn(
'entropy', n_event_shapes=0, safe_value=0.)
_mode = _make_masked_fn(
'mode', n_event_shapes=1, safe_value='safe_sample')
_mean = _make_masked_fn(
'mean', n_event_shapes=1, safe_value='safe_sample')
_variance = _make_masked_fn(
'variance', n_event_shapes=1, safe_value=0.)
_stddev = _make_masked_fn(
'stddev', n_event_shapes=1, safe_value=0.)
_covariance = _make_masked_fn(
'covariance', n_event_shapes=2, safe_value=0.)
_quantile = _make_masked_fn(
'quantile', n_event_shapes=1, safe_value='safe_sample')
def _default_event_space_bijector(self, *args, **kwargs):
underlying_bijector = (
self.distribution.experimental_default_event_space_bijector())
if underlying_bijector is None:
return None
return _MaskedBijector(self, underlying_bijector)
class Masked(_Masked, distribution_lib.AutoCompositeTensorDistribution):
def __new__(cls, *args, **kwargs):
"""Maybe return a non-`CompositeTensor` `_Masked`."""
if cls is Masked:
if args:
distribution = args[0]
else:
distribution = kwargs.get('distribution')
if not isinstance(distribution, tf.__internal__.CompositeTensor):
return _Masked(*args, **kwargs)
return super(Masked, cls).__new__(cls)
Masked.__doc__ = _Masked.__doc__ + '\n' + (
'If `distribution` is a `CompositeTensor`, then the resulting `Masked` '
'instance is a `CompositeTensor` as well. Otherwise, a '
'non-`CompositeTensor` `_Masked` instance is created instead. Distribution '
'subclasses that inherit from `Masked` will also inherit from '
'`CompositeTensor`.')
@kullback_leibler.RegisterKL(_Masked, _Masked)
def _kl_masked_masked(a, b, name=None):
"""KL divergence between Masked distributions."""
with tf.name_scope(name or 'kl_masked_masked'):
a_valid = tf.convert_to_tensor(a.validity_mask)
b_valid = tf.convert_to_tensor(b.validity_mask)
underlying_kl = kullback_leibler.kl_divergence(
a.distribution, b.distribution)
# The treatment for KL is as follows:
# When both random variables are valid, the underlying KL applies.
# When neither random variable is valid, the KL is 0., i.e.
# `a log a - a log b = 0` because log a and log b are everywhere 0.
# When exactly one is valid, we (a) raise an assertion error, if either
# distribution's allow_nan_stats is set to False, or (b) return nan in
# such positions.
asserts = []
if not (a.allow_nan_stats and b.allow_nan_stats):
asserts.append(assert_util.assert_equal(
a_valid, b_valid,
message='KL is only valid for matching mask values'))
with tf.control_dependencies(asserts):
both_valid = (a_valid & b_valid)
neither_valid = (~a_valid) & (~b_valid)
dtype = underlying_kl.dtype
return tf.where(both_valid, underlying_kl,
tf.where(neither_valid,
tf.zeros([], dtype), float('nan')))
@log_prob_ratio.RegisterLogProbRatio(_Masked)
def _masked_log_prob_ratio(p, x, q, y, name=None):
"""Computes log p(x) - log q(y) for Masked p, q."""
with tf.name_scope(name or 'masked_log_prob_ratio'):
p_valid = tf.convert_to_tensor(p.validity_mask)
safe_x = tf.where(_add_event_dims_to_mask(p_valid, dist=p),
x, tf.stop_gradient(p.safe_sample_fn(p.distribution)))
q_valid = tf.convert_to_tensor(q.validity_mask)
safe_y = tf.where(_add_event_dims_to_mask(q_valid, dist=q),
y, tf.stop_gradient(q.safe_sample_fn(q.distribution)))
underlying = log_prob_ratio.log_prob_ratio(
p.distribution, safe_x, q.distribution, safe_y)
asserts = []
# As with KL, we return the underlying log_prob_ratio where both are valid,
# `0.` where neither is valid, and `nan` otherwise (or an assertion if
# either distribution does not `allow_nan_stats`).
if not (p.allow_nan_stats and p.allow_nan_stats):
asserts.append(assert_util.assert_equal(
p_valid, q_valid,
message='Masked log_prob_ratio only valid for matching mask values'))
with tf.control_dependencies(asserts):
both_valid = (p_valid & q_valid)
neither_valid = (~p_valid) & (~q_valid)
return tf.where(both_valid, underlying,
tf.where(neither_valid,
tf.zeros([], dtype=underlying.dtype),
float('nan')))
class _NonCompositeTensorMaskedBijector(bijector_lib.Bijector):
"""Event space bijector for Masked distributions."""
def __init__(self, masked, underlying_bijector):
self._masked = masked
self._bijector = underlying_bijector
super(_NonCompositeTensorMaskedBijector, self).__init__(
validate_args=underlying_bijector.validate_args,
dtype=underlying_bijector.dtype,
forward_min_event_ndims=underlying_bijector.forward_min_event_ndims,
inverse_min_event_ndims=underlying_bijector.inverse_min_event_ndims)
def _forward_event_shape(self, x):
return self._bijector.forward_event_shape(x)
def _forward_event_shape_tensor(self, x):
return self._bijector.forward_event_shape_tensor(x)
def _inverse_event_shape(self, y):
return self._bijector.inverse_event_shape(y)
def _inverse_event_shape_tensor(self, y):
return self._bijector.inverse_event_shape_tensor(y)
def _make_safe_x(self, x, validity_mask):
bij = self._bijector
masked = self._masked
pullback_event_ndims = ps.rank_from_shape(
lambda: bij.inverse_event_shape_tensor(masked.event_shape_tensor()),
self._bijector.inverse_event_shape(masked.event_shape))
pullback_event_mask = _add_event_dims_to_mask(
validity_mask, event_ndims=pullback_event_ndims)
# We presume that 0 in unconstrained space is safe.
return tf.where(pullback_event_mask, x, 0.)
def _forward(self, x):
mask = self._masked.validity_mask
safe_x = self._make_safe_x(x, mask)
return self._make_safe_y(self._bijector.forward(safe_x), mask)
def _forward_log_det_jacobian(self, x):
validity_mask = tf.convert_to_tensor(self._masked.validity_mask)
safe_x = self._make_safe_x(x, validity_mask)
return tf.where(validity_mask,
self._bijector.forward_log_det_jacobian(safe_x),
0.)
def _make_safe_y(self, y, validity_mask):
safe_val = tf.stop_gradient(
self._masked.safe_sample_fn(self._masked.distribution))
event_mask = _add_event_dims_to_mask(validity_mask, dist=self._masked)
return tf.where(event_mask, y, safe_val)
def _inverse(self, y):
safe_y = self._make_safe_y(y, self._masked.validity_mask)
return self._bijector.inverse(safe_y)
def _inverse_log_det_jacobian(self, y):
validity_mask = tf.convert_to_tensor(self._masked.validity_mask)
safe_y = self._make_safe_y(y, validity_mask)
return tf.where(validity_mask,
self._bijector.inverse_log_det_jacobian(safe_y),
0.)
class _MaskedBijector(_NonCompositeTensorMaskedBijector,
bijector_lib.AutoCompositeTensorBijector):
"""Event space bijector for Masked distributions."""
def __new__(cls, *args, **kwargs):
"""Maybe return a `_NonCompositeTensorMaskedBijector`."""
if cls is _MaskedBijector:
if args:
masked = args[0]
else:
masked = kwargs.get('masked')
if len(args) > 1:
bijector = args[1]
else:
bijector = kwargs.get('underlying_bijector')
if not (isinstance(masked, tf.__internal__.CompositeTensor)
and isinstance(bijector, tf.__internal__.CompositeTensor)):
return _NonCompositeTensorMaskedBijector(*args, **kwargs)
return super(_MaskedBijector, cls).__new__(cls)
| StarcoderdataPython |
130125 | #!/usr/bin/env python3
from collections import OrderedDict
import re
import sys
try:
filename = sys.argv[1]
if '.' not in sys.argv[2]:
raise ValueError
except:
print('Usage: python3 {} filename (entity_id [attribute...])...'.format(sys.argv[0]))
sys.exit(1)
attrs = {}
entity_id = None
for arg in sys.argv[2:]:
if '.' in arg:
if entity_id is not None:
attrs[entity_id] = entity_attrs
entity_id = arg
entity_attrs = []
else:
entity_attrs.append(arg)
attrs[entity_id] = entity_attrs
haevent = re.compile(
r'([0-9-]+ [0-9:]+).*homeassistant_(start|started|stop|final_write|close)\[.*'
)
new_state_none = re.compile(r'([0-9-]+ [0-9:]+)(.*)new_state=None(.*)')
ent_id = re.compile(r'.*entity_id=([^,>]+).*')
new_state = re.compile(
r'([0-9-]+ [0-9:]+).*new_state=<state ([^=]+)=([^;]*); (.*) @ ([0-9+-:.T]+)>.*')
new_state2 = re.compile(
r'([0-9-]+ [0-9:]+).*new_state=<state ([^=]+)=([^@]*) @ ([0-9+-:.T]+)>.*')
ent_hdr = 'entity_id'
max_ent = len(ent_hdr)
ts_hdr = 'log time'
max_ts = len(ts_hdr)
lc_hdr = 'last_changed'
max_lc = len(lc_hdr)
state_hdr = 'state'
max_state = len(state_hdr)
if len(attrs) == 1:
max_attr = {}
for attr in entity_attrs:
max_attr[attr] = len(attr)
else:
attr_hdr = 'attributes'
HAEVENT = 'Home Assistant'
HAFMT = ' {} {{}} '.format(HAEVENT)
states = []
with open(filename) as f:
for line in f:
m = haevent.match(line)
if m:
ts = m.group(1)
max_ts = max(max_ts, len(ts))
last_changed = HAFMT.format(m.group(2).replace('_', ' ').title())
max_lc = max(max_lc, len(last_changed))
states.append((None, ts, last_changed, None, None))
continue
m = new_state_none.match(line)
if m:
n = ent_id.match(m.group(2)) or ent_id.match(m.group(3))
entity_id = n.group(1)
if entity_id in attrs:
max_ent = max(max_ent, len(entity_id))
ts = m.group(1)
max_ts = max(max_ts, len(ts))
state = '=== None ==='
max_state = max(max_state, len(state))
states.append((entity_id, ts, '', state, {}))
continue
m = new_state.match(line)
if m:
s = m.group(4)
last_changed = m.group(5)
else:
m = new_state2.match(line)
s = ''
last_changed = m.group(4) if m else ''
if m and m.group(2) in attrs:
entity_id = m.group(2)
max_ent = max(max_ent, len(entity_id))
ts = m.group(1)
max_ts = max(max_ts, len(ts))
max_lc = max(max_lc, len(last_changed))
state = m.group(3)
max_state = max(max_state, len(state))
_attrs = OrderedDict()
for attr in attrs[entity_id]:
try:
start = s.index(attr+'=')+len(attr)+1
_attr = s[start:s.rfind(', ', start, s.find('=', start))]
except:
_attr = '???'
_attrs[attr] = _attr
if len(attrs) == 1:
for attr in entity_attrs:
max_attr[attr] = max(max_attr[attr], len(_attrs[attr]))
states.append((entity_id, ts, last_changed, state, _attrs))
if len(attrs) > 1:
print('{:{}} | '.format(ent_hdr, max_ent), end='')
print('{:{}} | {:{}} | {:{}}'.format(ts_hdr, max_ts, lc_hdr, max_lc, state_hdr, max_state), end='')
if len(attrs) == 1:
for attr in entity_attrs:
print(' | {:{}}'.format(attr, max_attr[attr]), end='')
else:
print(' | {}'.format(attr_hdr), end='')
print('')
if len(attrs) > 1:
print('-'*max_ent, end='-|-')
print('-'*max_ts, '-'*max_lc, '-'*max_state, sep='-|-', end='')
if len(attrs) == 1:
for attr in entity_attrs:
print('', '-'*max_attr[attr], sep='-|-', end='')
else:
print('-|-', end='')
print('-'*len(attr_hdr), end='')
print('')
prev_entity_id = None
for entity_id, ts, last_changed, state, _attrs in states:
if HAEVENT in last_changed:
entity_id = '='*max_ent
last_changed = '{:=^{}}'.format(last_changed, max_lc)
state = '='*max_state
if len(attrs) == 1:
_attrs = OrderedDict()
for attr in entity_attrs:
_attrs[attr] = '='*max_attr[attr]
else:
_attrs = {'=': '='*(len(attr_hdr)-2)}
if len(attrs) > 1:
print('{:{}} | '.format('' if entity_id == prev_entity_id and HAEVENT not in last_changed else entity_id, max_ent), end='')
prev_entity_id = entity_id
print('{:{}} | {:{}} | {:{}}'.format(ts, max_ts, last_changed , max_lc, state , max_state), end='')
if len(attrs) == 1:
for k,v in _attrs.items():
print(' | {:{}}'.format(v if HAEVENT not in last_changed else '='*max_attr[k], max_attr[k]), end='')
else:
print(' |', end='')
for k,v in _attrs.items():
print(' {}={}'.format(k, v), end='')
print('')
| StarcoderdataPython |
1739474 | <reponame>black-shadows/LeetCode-Solutions<filename>Python/convex-polygon.py
# Time: O(n)
# Space: O(1)
class Solution(object):
def isConvex(self, points):
"""
:type points: List[List[int]]
:rtype: bool
"""
def det(A):
return A[0][0]*A[1][1] - A[0][1]*A[1][0]
n, prev, curr = len(points), 0, None
for i in xrange(len(points)):
A = [[points[(i+j) % n][0] - points[i][0], points[(i+j) % n][1] - points[i][1]] for j in (1, 2)]
curr = det(A)
if curr:
if curr * prev < 0:
return False
prev = curr
return True
| StarcoderdataPython |
1720290 | <reponame>saxobroko/node-launcher
from unittest.mock import MagicMock
from tempfile import NamedTemporaryFile
import pytest
from node_launcher.gui.menu.nodes_manage.manage_dialogs.configuration import ConfigurationDialog
from node_launcher.node_set.lib.configuration import Configuration
from node_launcher.node_set.lib.configuration_property import ConfigurationProperty
@pytest.fixture
def configuration_dialog() -> ConfigurationDialog:
with NamedTemporaryFile(suffix='node.conf', delete=False) as f:
path = f.name
configuration = Configuration(name='node', path=path)
configuration.load()
node = MagicMock()
node.configuration = configuration
dialog = ConfigurationDialog(node)
return dialog
class TestConfigurationDialog(object):
def test_init(self, configuration_dialog: ConfigurationDialog):
assert configuration_dialog
def test_append_key_value(
self, configuration_dialog: ConfigurationDialog):
configuration_dialog.add_row(
'test_key',
'test_new_value',
'1'
)
assert configuration_dialog.table.rowCount() == 1
assert configuration_dialog.table.item(0, 0).text() == '1'
assert configuration_dialog.table.item(0, 1).text() == 'test_key'
assert configuration_dialog.table.item(0, 2).text() == 'test_new_value'
def test_update_key(self, configuration_dialog: ConfigurationDialog):
configuration_dialog.node.configuration.configuration_changed.emit(
None, ConfigurationProperty('1', 'key', 'value')
)
configuration_dialog.node.configuration.configuration_changed.emit(
None, ConfigurationProperty('2', 'key2', 'value2')
)
assert configuration_dialog.table.rowCount() == 2
assert configuration_dialog.table.item(0, 0).text() == '1'
assert configuration_dialog.table.item(0, 1).text() == 'key'
assert configuration_dialog.table.item(0, 2).text() == 'value'
assert configuration_dialog.table.item(1, 0).text() == '2'
assert configuration_dialog.table.item(1, 1).text() == 'key2'
assert configuration_dialog.table.item(1, 2).text() == 'value2'
def test_handle_cell_change(self,
configuration_dialog: ConfigurationDialog):
configuration_dialog.add_row(
'test_key',
'test_new_value',
''
)
configuration_dialog.table.item(0, 2).setText('test_edit_value')
assert configuration_dialog.node.configuration['test_key'] == 'test_edit_value'
configuration_dialog.add_row(
'test_key',
'test_new_value2',
''
)
configuration_dialog.table.item(1, 2).setText('test_edit_multi_value')
all_config_values = [cp.value for cp in configuration_dialog.node.configuration.get_all_configurations()]
assert sorted(all_config_values) == sorted([
'test_edit_value',
'test_edit_multi_value'
])
| StarcoderdataPython |
62961 | from twisted.internet import defer
import config
import log
from color import colorize
from err import *
class Scanner(object):
def __init__(self, target, checks, title=None, verbose=False, runningResults=False):
self.target = target
self.checks = checks
self.title = title
self.scans = []
self.verbose = verbose
self.runningResults = runningResults
def __repr__(self):
return "<Scanner({0.target}, {0.title}, v={0.verbose})>".format(self)
def checkFinished(self, check):
if self.runningResults:
self.showResult(check.result)
def showResult(self, result):
# TODO: RESULT_SUB master results should be shown with warningsOnly on!
output = self.formatResult(result)
if output:
print(output)
for subresult in result.check.subresults:
output = self.formatResult(subresult, sub=True)
if output:
print(output)
def showResults(self):
if not self.runningResults:
if config.warningsOnly:
hasWarning = False
for scan in self.scans:
if not scan.result.status:
hasWarning = True
for subresult in scan.subresults:
if not subresult.status:
hasWarning = True
if not hasWarning:
return
print("")
print(colorize("@W{0}@x".format(self.title)))
print(config.bar)
for scan in self.scans:
self.showResult(scan.result)
print("")
def formatResult(self, result, sub=False):
if result.extra:
extra = colorize('@B--@x ') + result.extra
else:
extra = ''
if not result.status:
last = colorize('@B[@R!!!@B]@x')
elif config.warningsOnly:
return
elif result.status == CHECK_NOT_APPLICABLE:
last = '@B[@D - @B]@x'
elif result.status == CHECK_RESULT_HIDDEN:
last = ''
elif result.status == CHECK_RESULT_SUB:
last = colorize('@B[---]@x')
elif result.status == CHECK_RESULT_UNCERTAIN:
last = colorize('@B[@Y ? @B]@x')
else:
last = colorize('@B[ @G- @B]@x')
if sub:
output = colorize(" @y-@x {0:49} {1}{2}".format(result.text, last, extra))
else:
output = colorize(" @Y*@x {0:51} {1}{2}".format(result.text, last, extra))
return output
def run(self):
if self.runningResults:
print("")
print(colorize("@W{0}@x".format(self.title)))
print(config.bar)
for check in self.checks:
c = check(self.target)
# "lambda x: c, x" takes the (same) c in scope each loop; we have to do
# manual assignment to get the current c.
def checkFinishedTrigger(value, c=c):
self.checkFinished(c)
d = (c.run()
.addBoth(checkFinishedTrigger))
c.deferred = d
self.scans.append(c)
dl = (defer.DeferredList([c.deferred for c in self.scans])
.addCallback(lambda x: self.showResults())
.addErrback(log.err)) # Uncaught error
return dl
class DomainScanner(Scanner):
pass
class HostScanner(Scanner):
pass
class LocalScanner(Scanner):
pass
| StarcoderdataPython |
29248 | import rv.api
class Command(object):
args = ()
processed = False
def __init__(self, *args, **kw):
self._apply_args(*args, **kw)
def __repr__(self):
attrs = ' '.join(
'{}={!r}'.format(
arg,
getattr(self, arg),
)
for arg in self.args
if hasattr(self, arg)
)
return '<{}{}>'.format(
self.__class__.__name__,
' ' + attrs if attrs else '',
)
def _apply_args(self, *args, **kw):
for key, value in zip(self.args, args):
setattr(self, key, value)
for key, value in kw.items():
if key in self.args:
setattr(self, key, value)
def copy(self, *args, **kw):
c2 = self.__class__()
for key in self.args:
try:
value = getattr(self, key)
except AttributeError:
pass
else:
setattr(c2, key, value)
c2._apply_args(*args, **kw)
return c2
class ConnectModules(Command):
args = 'engine', 'src', 'dest'
class Engine(Command):
class Track(object):
def __init__(self, engine, index):
self.engine = engine
self.index = index
def __repr__(self):
return '<Track {}>'.format(self.index)
def __ror__(self, other):
if isinstance(other, NoteOn):
note = other.copy(engine=self.engine, track=self)
return note
def off(self):
return NoteOff(self.engine, self)
class Output(object):
index = 0
def __init__(self, engine):
self.engine = engine
def __repr__(self):
return '<Output>'
def __lshift__(self, other):
return ConnectModules(self.engine, other.module, self)
def __rshift__(self, other):
return ConnectModules(self.engine, self, other.module)
def __init__(self, *args, **kw):
super(Engine, self).__init__(*args, **kw)
self.output = self.Output(self)
def new_module(self, obj, *args, **kw):
if isinstance(obj, type) and issubclass(obj, rv.m.Module):
obj = obj(*args, **kw)
if isinstance(obj, rv.m.Module):
return Module(self, obj)
else:
raise ValueError()
def track(self, index):
return self.Track(self, index)
class Generator(Command):
args = 'fn', 'fn_args', 'fn_kw'
generator = None
def advance(self, cursor):
if self.generator is not None:
try:
self.generator.send(cursor)
except StopIteration:
self.stop() | cursor
@property
def started(self):
return self.generator is not None
def start(self):
if not self.started:
self.generator = self.fn(*self.fn_args, **self.fn_kw)
self.generator.send(None)
def stop(self):
return GeneratorStop(self)
@classmethod
def factory(cls, fn):
def factory_fn(*args, **kw):
return cls(fn, args, kw)
return factory_fn
class GeneratorStop(Command):
args = 'parent',
class Module(Command):
args = 'engine', 'module'
def __lshift__(self, other):
return ConnectModules(self, other.module, self.module)
def __rshift__(self, other):
return ConnectModules(self, self.module, other.module)
def on(self, note, vel=None):
return NoteOn(note, vel, self.engine, self.module)
class NoteOff(Command):
args = 'engine', 'track'
class NoteOn(Command):
args = 'note', 'vel', 'engine', 'module', 'track'
def off(self):
return NoteOff(self.engine, self.track)
| StarcoderdataPython |
4803319 | from __future__ import absolute_import
from version_information.version import version as __version__
from version_information.version_information import *
| StarcoderdataPython |
3321969 | # -*- coding: UTF-8 -*-
#
from os import environ
DEBUG = True
YDB_ENDPOINT = environ.get("YDB_ENDPOINT")
YDB_DATABASE = environ.get("YDB_DATABASE", "")
YDB_PATH = environ.get("YDB_PATH", "")
YDB_TOKEN = environ.get("YDB_TOKEN")
| StarcoderdataPython |
4817660 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import pandas as pd
import numpy as np
#import matplotlib.pyplot as plt
#from pylab import rcParams
#import matplotlib
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
train = pd.read_csv('train.csv')
print('Shape of the train data with all features:', train.shape)
train = train.select_dtypes(exclude=['object'])
print("")
print('Shape of the train data with numerical features:', train.shape)
train.drop('Id',axis = 1, inplace = True)
train.fillna(0,inplace=True)
test = pd.read_csv('test.csv')
test = test.select_dtypes(exclude=['object'])
ID = test.Id
test.fillna(0,inplace=True)
test.drop('Id',axis = 1, inplace = True)
print("")
print("List of features contained our dataset:",list(train.columns)) | StarcoderdataPython |
4824472 | import gym
import numpy as np
from gym.envs import register
from joblib import Parallel, delayed
import pickle
import matplotlib.pyplot as plt
from q_learning import Qlearning
from tile_coding_action_value_function import TileCodingActionValueFunction
from true_online_sarsa_lambda import TrueOnlineSarsaLambda
register(
id='MountainCar-v3',
entry_point='gym.envs.classic_control:MountainCarEnv',
max_episode_steps=10000,
reward_threshold=-110.0,
)
MAX_EPISODES = 500
NUM_RUNS = 100
TEST_ENV = 'CartPole-v0'
NUM_TILINGS = 16
NUM_PARLL_JOBS = 4
WINDOW_SIZE = 50
# ALPHAS = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
ALPHAS = [0.5, 0.6, 0.7]
# ALPHAS = [0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
# ALPHAS = [0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
# ALPHAS = [0.5]
EPSILONS = [1.0]
EPSILONS_DECAY = [0.5, 0.8, 0.9]
def tau_to_lambda(tau):
return 1.0 - (1.0/tau)
# LAMBDA_PARAMS = tau_to_lambda(np.asarray([1, 2, 4, 8, 16, 32]))
# LAMBDA_PARAMS = tau_to_lambda(np.asarray([2, 4, 8]))
LAMBDA_PARAMS = [0.0]
# LAMBDA_PARAMS = [0, 0.5, 0.75, 0.8, 0.9, 0.95, 0.99, 0.999]
# LAMBDA_PARAMS = [0.4, 0.5, 0.6]
def make_q_learner(env, alpha, epsilon, gamma, action_value_function, epsilon_decay_factor, lambda_param=None):
return Qlearning(env,
alpha / NUM_TILINGS,
epsilon,
gamma,
action_value_function,
epsilon_decay_factor)
def make_sarsa_learner(env, alpha, epsilon, gamma, action_value_function, epsilon_decay_factor, lambda_param):
return TrueOnlineSarsaLambda(env,
alpha / NUM_TILINGS,
epsilon,
gamma,
action_value_function,
epsilon_decay_factor,
lambda_param)
class RewardsInfo:
def __init__(self, algorithm_name, alpha, epsilon, lambda_param, mean_return, episodes_window, total_episodes,
total_time_steps, epsilon_decay):
self.algorithm_name = algorithm_name
self.alpha = alpha
self.epsilon = epsilon
self.lambda_param = lambda_param
self.epsilon_decay = epsilon_decay
self.episodes_window = episodes_window
self.total_episodes = total_episodes
self.total_time_steps = total_time_steps
self.mean_return = mean_return
def do_experiment(parameters):
alpha, epsilon, lambda_param, epsilon_decay = parameters
env = gym.make(TEST_ENV)
dim_ranges = [env.observation_space.high[i] - env.observation_space.low[i] for i in
range(0, env.observation_space.high.size)]
episodes_completed = []
episode_returns = []
episodes_windows = []
total_time_steps_list = []
print("Running alpha: ", alpha, ", epsilon: ", epsilon, ", lambda: ", lambda_param, ", eps_decay: ", epsilon_decay)
for i in range(NUM_RUNS):
env.reset()
action_value_function = TileCodingActionValueFunction(env.observation_space.shape[0],
dim_ranges,
env.action_space.n,
num_tiles=2**11,
num_tilings=NUM_TILINGS,
scale_inputs=False)
# algorithm = TrueOnlineSarsaLambda(env,
# alpha / NUM_TILINGS,
# epsilon,
# 1.0,
# action_value_function,
# epsilon_decay_factor=epsilon_decay,
# lambda_param=lambda_param)
algorithm = Qlearning(env,
alpha / NUM_TILINGS,
epsilon,
1,
action_value_function,
epsilon_decay_factor=epsilon_decay)
algorithm.do_learning(MAX_EPISODES, show_env=False)
episodes_completed.append(np.size(algorithm.episode_return))
episode_returns.append(np.mean(algorithm.episode_return))
episodes_windows.append(
algorithm.episode_return[np.size(algorithm.episode_return) - WINDOW_SIZE:np.size(algorithm.episode_return)])
total_time_steps_list.append(np.abs(np.sum(algorithm.episode_return)))
return RewardsInfo(algorithm_name="Q Learning",
alpha=alpha,
epsilon=epsilon,
lambda_param=lambda_param,
mean_return=episode_returns,
episodes_window=episodes_windows,
total_episodes=episodes_completed,
total_time_steps=total_time_steps_list,
epsilon_decay=epsilon_decay)
def run_experiment():
rewards_list = []
parameters_list = [(alpha, epsilon, lambda_param, epsilon_decay) for alpha in ALPHAS for epsilon in EPSILONS for lambda_param in
LAMBDA_PARAMS for epsilon_decay in EPSILONS_DECAY]
for reward_info in Parallel(n_jobs=NUM_PARLL_JOBS)(
delayed(do_experiment)(parameters) for parameters in parameters_list):
rewards_list.append(reward_info)
pickle.dump(rewards_list, open("cart_pole_qlearn_rewards_list_100_runs_rerun.p", "wb"))
return rewards_list
def print_best_results(rewards_list):
best_reward = None
for reward_obj in rewards_list:
# if best_reward is None or np.mean(reward_obj.episodes_window) > np.mean(best_reward.episodes_window):
# best_reward = reward_obj
if best_reward is None or np.mean(reward_obj.total_time_steps) < np.mean(best_reward.total_time_steps) and np.mean(reward_obj.episodes_window) > -110.0:
best_reward = reward_obj
print("Best algorithm: alpha: ", best_reward.alpha, ", epsilon: ", best_reward.epsilon, ", lambda: ",
best_reward.lambda_param, ", eps_decay: ", best_reward.epsilon_decay)
# print("Best algorithm: alpha: ", best_reward.alpha, ", epsilon: ", best_reward.epsilon, ", lambda: ",
# best_reward.lambda_param)
print("Mean return: ", np.mean(best_reward.mean_return))
print("Last 50 Episodes window: ", np.mean(best_reward.episodes_window))
print("Total episodes before solve: ", np.mean(best_reward.total_episodes), "+-", np.std(best_reward.total_episodes) / np.sqrt(len(best_reward.total_episodes)))
print("Max episodes before solve: ", np.max(best_reward.total_episodes))
print("Total time steps: ", np.mean(best_reward.total_time_steps), "+-", np.std(best_reward.total_time_steps) / np.sqrt(len(best_reward.total_time_steps)))
def print_all_results(reward_file):
rewards_list = pickle.load(open(reward_file, "rb"))
for reward in rewards_list:
# if reward.lambda_param == 0.875:
print("Algorithm: alpha: ", reward.alpha, ", epsilon: ", reward.epsilon, ", lambda: ",
reward.lambda_param, ", eps_decay: ", reward.epsilon_decay)
print("Mean return: ", np.mean(reward.mean_return))
print("Last 10 Episodes window: ", np.mean(reward.episodes_window))
print("Total episodes before solve: ", np.mean(reward.total_episodes), "+-", np.std(reward.total_episodes) / np.sqrt(len(reward.total_episodes)))
print("Max episodes before solve: ", np.max(reward.total_episodes))
print("Total time steps: ", np.mean(reward.total_time_steps), "+-", np.std(reward.total_time_steps) / np.sqrt(len(reward.total_time_steps)))
if __name__ == "__main__":
# rewards_list = run_experiment()
# print_best_results(rewards_list)
# print_all_results("/home/zach/PycharmProjects/CMPUT690Project/mc_rewards_list_100_runs.p")
print_best_results(rewards_list=pickle.load(open("/Users/zach/PycharmProjects/CMPUT690Project/cart_pole_sarsa_rewards_list_100_runs_rerun.p", "rb")))
| StarcoderdataPython |
43131 | import json
from service.tests.functional import MonitoringTestCase
class TestAlarm(MonitoringTestCase):
ENDPOINT = '/nbi/monitoring/api/alarms/'
def test_get_alarms(self):
"""
Test that validates getting all alarms
It asserts the response code 200, the default alarm limit 5 and the fields in an alarm:
metrics
state
alarm_definition
updated_timestamp
created_timestamp
state_updated_timestamp
id
:return:
"""
result = self.app.get(TestAlarm.ENDPOINT, headers={'X-Auth-Token': self.cloud_admin})
self.assertEqual(result.status, 200)
data = json.loads(result.body.decode('utf-8'))
self.assertTrue('alarms' in data)
for alarm in data.get('alarms'):
for field in ['metrics', 'state', 'alarm_definition', 'updated_timestamp', 'created_timestamp',
'state_updated_timestamp', 'id']:
self.assertTrue(field in alarm)
self.assertTrue(len(data.get('alarms')) <= 5)
def test_get_alarms_filter(self):
"""
Test that validates getting an alarm using a filter
It asserts the response code 200, the filtered information and the limit
:return:
"""
result = self.app.get(TestAlarm.ENDPOINT, headers={'X-Auth-Token': self.cloud_admin},
params={'limit': 2, 'state': 'OK'})
self.assertTrue(result.status, 200)
data = json.loads(result.body.decode('utf-8'))
self.assertTrue('alarms' in data)
for alarm in data.get('alarms'):
self.assertTrue(alarm.get('state'), 'OK')
self.assertTrue(len(data.get('alarms')) <= 2)
def test_get_alarms_unauth(self):
"""
Test that validates an unauthenticated user can't access the alarms
It asserts the response code 401
:return:
"""
result = self.app.get(TestAlarm.ENDPOINT, params={'limit': 2, 'state': 'OK'}, status=401)
self.assertTrue(result.status, 401)
def test_specific_alarm(self):
"""
Test that validates getting a specific alarm
It asserts the response code 200 and the fields in the alarm:
metrics
state
alarm_definition
updated_timestamp
created_timestamp
state_updated_timestamp
id
:return:
"""
result = self.app.get(TestAlarm.ENDPOINT + '000072da-53f7-434e-8d89-77c4b77f1636',
headers={'X-Auth-Token': self.cloud_admin})
self.assertTrue(result.status, 200)
data = json.loads(result.body.decode('utf-8'))
for field in ['metrics', 'state', 'alarm_definition', 'updated_timestamp', 'created_timestamp',
'state_updated_timestamp', 'id']:
self.assertTrue(field in data)
def test_get_invalid_alarm(self):
"""
Test that validates a user can't collect an invalid alarm id
It asserts the response code 404
:return:
"""
result = self.app.get(TestAlarm.ENDPOINT + '000210ee', status=404,
headers={'X-Auth-Token': self.cloud_admin})
self.assertEqual(result.status, 404)
| StarcoderdataPython |
1678274 | <filename>src/pymor/operators/interfaces.py
# -*- coding: utf-8 -*-
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: <NAME>, <NAME>, <NAME>
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
from pymor.core.interfaces import ImmutableInterface, abstractmethod, abstractstaticmethod
from pymor.parameters.base import Parametric
class OperatorInterface(ImmutableInterface, Parametric):
"""Interface for |Parameter| dependent discrete operators.
Every operator is viewed as a map ::
A(μ): R^s -> R^r
Note that there is no special distinction between functionals
and operators in pyMOR. A functional is simply an operator with
range dimension 1 and |NumpyVectorArray| as `range.type`.
Attributes
----------
invert_options
|OrderedDict| of possible options for :meth:`~OperatorInterface.apply_inverse`.
Each key is a type of inversion algorithm which can be used to invert the
operator. `invert_options[k]` is a dict containing all options along with
their default values which can be set for algorithm `k`. We always have
`invert_options[k]['type'] == k` such that `invert_options[k]` can be passed
directly to :meth:`~OperatorInterface.apply_inverse()`.
linear
`True` if the operator is linear.
source
The source |VectorSpace|.
range
The range |VectorSpace|.
"""
@abstractmethod
def apply(self, U, ind=None, mu=None):
"""Apply the operator.
Parameters
----------
U
|VectorArray| of vectors to which the operator is applied.
ind
The indices of the vectors in `U` to which the operator shall be
applied. (See the |VectorArray| documentation for further details.)
mu
The |Parameter| for which to evaluate the operator.
Returns
-------
|VectorArray| of the operator evaluations.
"""
pass
@abstractmethod
def apply2(self, V, U, pairwise, U_ind=None, V_ind=None, mu=None, product=None):
"""Treat the operator as a 2-form by calculating (V, A(U)).
In particular, if ( , ) is the Euclidean product and A is a linear operator
given by multiplication with a matrix M, then ::
A.apply2(V, U) = V^T*M*U
Parameters
----------
V
|VectorArray| of the left arguments V.
U
|VectorArray| of the right right arguments U.
pairwise
If `False`, the 2-form is applied to all combinations of vectors
in `V` and `U`, i.e. ::
L.apply2(V, U).shape = (len(V_ind), len(U_ind)).
If `True`, the vectors in `V` and `U` are applied in pairs, i.e.
`V` and `U` must be of the same length and we have ::
L.apply2(V, U).shape = (len(V_ind),) = (len(U_ind),).
V_ind
The indices of the vectors in `V` to which the operator shall be
applied. (See the |VectorArray| documentation for further details.)
U_ind
The indices of the vectors in `U` to which the operator shall be
applied. (See the |VectorArray| documentation for further details.)
mu
The |Parameter| for which to evaluate the operator.
product
The scalar product used in the expression `(V, A(U))` given as
an |Operator|. If `None`, the euclidean product is chosen.
Returns
-------
A |NumPy array| of all 2-form evaluations.
"""
pass
@abstractmethod
def apply_adjoint(self, U, ind=None, mu=None, source_product=None, range_product=None):
"""Apply the adjoint operator.
For a linear operator A the adjoint A^* of A is given by ::
(A^*v, u)_s = (v, Au)_r
where ( , )_s and ( , )_r denote the scalar products on the source
and range space of A. If A and the two products are given by the
matrices M, P_s and P_r, then::
A^*v = P_s^(-1) * M^T * P_r * v
with M^T denoting the transposed of M. Thus, if ( , )_s and ( , )_r
are the euclidean products, A^*v is simply given by multiplication of
the matrix of A with v from the left.
Parameters
----------
U
|VectorArray| of vectors to which the adjoint operator is applied.
ind
The indices of the vectors in `U` to which the operator shall be
applied. (See the |VectorArray| documentation for further details.)
mu
The |Parameter| for which to apply the adjoint operator.
source_product
The scalar product on the source space given as an |Operator|.
If `None`, the euclidean product is chosen.
range_product
The scalar product on the range space given as an |Operator|.
If `None`, the euclidean product is chosen.
Returns
-------
|VectorArray| of the adjoint operator evaluations.
"""
pass
@abstractmethod
def apply_inverse(self, U, ind=None, mu=None, options=None):
"""Apply the inverse operator.
Parameters
----------
U
|VectorArray| of vectors to which the inverse operator is applied.
ind
The indices of the vectors in `U` to which the operator shall be
applied. (See the |VectorArray| documentation for further details.)
mu
The |Parameter| for which to evaluate the inverse operator.
options
Dictionary of options for the inversion algorithm. The dictionary
has to contain the key `'type'` whose value determines which inversion
algorithm is to be used. All other items represent options specific to
this algorithm. `options` can also be given as a string, which is then
interpreted as the type of inversion algorithm. If `options` is `None`,
a default algorithm with default options is chosen. Available algorithms
and their default options are provided by
:attr:`~OperatorInterface.invert_options`.
Returns
-------
|VectorArray| of the inverse operator evaluations.
Raises
------
InversionError
The operator could not be inverted.
"""
pass
@abstractmethod
def jacobian(self, U, mu=None):
"""Return the operator's Jacobian.
Parameters
----------
U
Length 1 |VectorArray| containing the vector for which to compute
the jacobian.
mu
The |Parameter| for which to compute the Jacobian.
Returns
-------
|Operator| representing the Jacobian.
"""
pass
@abstractmethod
def as_vector(self, mu=None):
"""Return vector representation of linear functional or vector operator.
This method may only be called on linear functionals, i.e. linear operators
with `range.dim == 1` and |NumpyVectorArray| as :attr:`~OperatorInterface.range.type`,
or on operators describing vectors, i.e. linear operators with
`source.dim == 1` |NumpyVectorArray| as :attr:`~OperatorInterface.source.type`.
In the case of a functional, the identity ::
self.as_vector(mu).dot(U) == operator.apply(U, mu)
holds, whereas in the case of a vector like operator we have ::
operator.as_vector(mu) == operator.apply(NumpyVectorArray(1), mu).
Parameters
----------
mu
The |Parameter| for which to return a vector representation.
Returns
-------
V
|VectorArray| of length 1 containing the vector representation. We have
`V.dim == self.source.dim`, `type(V) == self.source.type` for functionals
and `V.dim = self.range.dim`, `type(V) == self.range.type` for vector-like
operators.
"""
pass
@abstractmethod
def assemble(self, mu=None):
"""Assemble the operator for a given parameter.
What the result of the assembly is strongly depends on the given operator.
For instance, a matrix-based operator will assemble its matrix, a |LincombOperator|
will try to form the linear combination of its operators, whereas an arbitrary
operator might simply return a :class:`~pymor.operators.constructions.FixedParameterOperator`.
The only assured property of the assembled operator is that it is no longer
parametric.
Parameters
----------
mu
The |Parameter| for which to assemble the operator.
Returns
-------
Parameter-independent, assembled |Operator|.
"""
pass
@abstractstaticmethod
def lincomb(operators, coefficients=None, num_coefficients=None, coefficients_name=None, name=None):
"""DEPRECATED! Use :func:`pymor.operators.constructions.LincombOperator` instead.
"""
pass
def assemble_lincomb(self, operators, coefficients, name=None):
"""Try to assemble a linear combination of the given operators.
This method is called in the `assemble` method of |LincombOperator|. If an
assembly of the given linear combination is possible, e.g. the linear
combination of the system matrices of the operators can be formed, then
the assembled operator is returned. Otherwise, the method returns
`None` to indicate that assembly is not possible.
Parameters
----------
operators
List of |Operators| whose linear combination is formed.
coefficients
List of the corresponding linear coefficients. (In contrast to
:meth:`~pymor.operators.constructions.lincomb`, these coefficients are
always numbers, not |ParameterFunctionals|.)
name
Name of the assembled operator.
Returns
-------
The assembled |Operator| if assembly is possible, otherwise `None`.
"""
return None
@abstractmethod
def projected(self, source_basis, range_basis, product=None, name=None):
"""Project operator to subspaces of the source and range space.
Denote `self` by A. Given a scalar product ( ⋅, ⋅), and vectors b_1, ..., b_N,
c_1, ..., c_M, the projected operator A_P is defined by ::
[ A_P(e_j) ]_i = ( c_i, A(b_j) )
for all i,j, where e_j denotes the j-th canonical basis vector of R^N.
In particular, if the c_i are orthonormal w.r.t. the given product,
then A_P is the coordinate representation w.r.t. the b_i/c_i bases
of the restriction of A to span(b_i) concatenated with the orthogonal
projection onto span(c_i).
From another point of view, if A is viewed as a bilinear form
(see :meth:`~OperatorInterface.apply2`) and ( ⋅, ⋅ ) is the Euclidean
product, then A_P represents the matrix of the bilinear form restricted
span(b_i) / spanc(c_i) (w.r.t. the b_i/c_i bases).
How the projected operator is realized will depend on the implementation
of the operator to project. While a projected |NumpyMatrixOperator| will
again be a |NumpyMatrixOperator|, only a
:class:`pymor.operators.basic.ProjectedOperator` will be returned
in general. (Note that the latter will not be suitable to obtain an
efficient offline/online-decomposition for reduced basis schemes.)
A default implementation is provided in |OperatorBase|.
.. warning::
No check is performed whether the b_i and c_j are orthonormal or linear
independent.
Parameters
----------
source_basis
The b_1, ..., b_N as a |VectorArray| or `None`. If `None`, no restriction of
the source space is performed.
range_basis
The c_1, ..., c_M as a |VectorArray|. If `None`, no projection in the range
space is performed.
product
An |Operator| representing the scalar product. If `None`, the
Euclidean product is chosen.
name
Name of the projected operator.
Returns
-------
The projected |Operator|.
"""
pass
@abstractmethod
def __add__(self, other):
"""Sum of two operators"""
pass
@abstractmethod
def __radd__(self, other):
"""Sum of two operators"""
pass
@abstractmethod
def __mul__(self, other):
"""Product of operator by a scalar"""
pass
| StarcoderdataPython |
1634399 | from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
class WxUserTokenObtainPairSerializer(TokenObtainPairSerializer):
@classmethod
def get_token(cls, user):
token = super().get_token(user)
token['openid'] = user.openid
token['unionid'] = user.unionid
return token
| StarcoderdataPython |
165659 | # -*- coding: utf-8 -*-
###
# © 2018 The Board of Trustees of the Leland Stanford Junior University
# <NAME>
# <EMAIL>
###
"""
The official Python client for Pulsar LIMS.
Required Environment Variables:
1) PULSAR_API_URL
2) PULSAR_TOKEN
"""
import logging
import os
import sys
from urllib.parse import urlparse
#####################
### MAIL SETTINGS ###
#####################
MAIL_DOMAIN = os.environ.get("MAILGUN_DOMAIN","")
# We don't want utils.send_mail trying to send mail when there isn't a domain specified.
# I tried this, and we strangly get a 200 back, so this could cause issues. The fix is to
# have MAIL_SERVER_URL empty in that case.
MAIL_SERVER_URL = ""
if MAIL_DOMAIN:
MAIL_SERVER_URL = os.path.join("https://api.mailgun.net/v3/{}/messages".format(MAIL_DOMAIN))
MAIL_AUTH = ("api", os.environ.get("MAILGUN_API_KEY",""))
DEFAULT_TO = [os.environ.get("SUPPORT_EMAIL_ADDR", "")]
#: The directory that contains the log files created by the `Model` class.
LOG_DIR = "Pulsarpy_Logs"
URL = os.environ.get("PULSAR_API_URL", "")
HOST = ""
if URL:
HOST = urlparse(URL).hostname
API_TOKEN = os.environ.get("PULSAR_TOKEN", "")
#: The name of the debug ``logging`` instance.
DEBUG_LOGGER_NAME = "ppy_debug"
#: The name of the error ``logging`` instance created in the ``pulsarpy.models.Model`` class.
#: and referenced elsewhere.
ERROR_LOGGER_NAME = "ppy_error"
#: The name of the POST ``logging`` instance created in the ``pulsarpy.models.Model`` claass.
#: and referenced elsewhere.
POST_LOGGER_NAME = "ppy_post"
#: A ``logging`` instance that logs all messages sent to it to STDOUT.
debug_logger = logging.getLogger(DEBUG_LOGGER_NAME)
level = logging.DEBUG
debug_logger.setLevel(level)
f_formatter = logging.Formatter('%(asctime)s:%(name)s:\t%(message)s')
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(level)
ch.setFormatter(f_formatter)
debug_logger.addHandler(ch)
| StarcoderdataPython |
4828435 | """Build extension to generate immutable JavaScript protocol buffers.
Usage:
immutable_js_proto_library: generates a immutable JavaScript implementation
for an existing proto_library.
Example usage:
proto_library(
name = "foo",
srcs = ["foo.proto"],
)
immutable_js_proto_library(
name = "foo_immutable_js_proto",
deps = [":foo"],
)
"""
load("@bazel_skylib//lib:dicts.bzl", "dicts")
load(":immutable_js_common.bzl", "create_js_lib_struct", "create_js_provider", "js_attrs")
ImmutableJspbInfo = provider(
"Provider for the immutable_js_proto compilation.\n" +
"NOTE: Data under '_private_' is considered private internal data so do not use.\n" +
"This provider is exported for only particular use cases and you should talk to us" +
"to verify your use case.",
fields = ["js", "_private_"],
)
def _immutable_js_proto_library_aspect_impl(target, ctx):
srcs = target[ProtoInfo].direct_sources
transitive_srcs = target[ProtoInfo].transitive_sources
out_srcs = []
if srcs:
output = ctx.actions.declare_directory(ctx.label.name + "-improto")
out_srcs = [output]
command = """
set -e -o pipefail
rm -rf {output}
mkdir -p {output}
mkdir -p {genfiles}
{protoc} --plugin=protoc-gen-immutable_js_protobuf={protoc_plugin} \
--proto_path=. \
--proto_path={genfiles} \
--immutable_js_protobuf_out={output} \
{proto_sources}
js_files=$(find {output} -name '*.js')
chmod -R 664 $js_files
{clang_format} -style=Google -i $js_files
""".format(
clang_format = ctx.executable._clang_format.path,
output = output.path,
protoc = ctx.executable._protocol_compiler.path,
protoc_plugin = ctx.executable._protoc_gen_immutable_js.path,
genfiles = ctx.configuration.genfiles_dir.path,
proto_sources = " ".join([s.path for s in srcs]),
)
(resolved_inputs, resolved_command, input_manifest) = ctx.resolve_command(command = command)
ctx.actions.run_shell(
command = resolved_command,
inputs = depset(resolved_inputs, transitive = [transitive_srcs]),
outputs = [output],
tools = [
ctx.executable._protocol_compiler,
ctx.executable._protoc_gen_immutable_js,
ctx.executable._clang_format,
],
input_manifests = input_manifest,
progress_message = "Generating immutable jspb files",
)
transitive_runfiles = [dep[ImmutableJspbInfo]._private_.runfiles for dep in ctx.rule.attr.deps]
deps = [dep[ImmutableJspbInfo].js for dep in ctx.rule.attr.deps]
exports = [dep[ImmutableJspbInfo].js for dep in ctx.rule.attr.exports]
js_provider = create_js_provider(
ctx,
srcs = out_srcs,
deps = deps,
runtime_deps = ctx.attr._runtime_deps,
exports = (deps if not srcs else []) + exports,
# Use unique artifact suffix to avoid conflicts with other aspects on the same target.
artifact_suffix = "_immutable_js_proto_library_aspect",
)
return [ImmutableJspbInfo(
js = js_provider,
_private_ = struct(runfiles = depset(out_srcs, transitive = transitive_runfiles)),
)]
immutable_js_proto_library_aspect = aspect(
implementation = _immutable_js_proto_library_aspect_impl,
attr_aspects = ["deps", "exports"],
attrs = dicts.add(js_attrs(), {
"_protocol_compiler": attr.label(
executable = True,
cfg = "host",
default = Label("//third_party:protocol_compiler"),
),
"_protoc_gen_immutable_js": attr.label(
executable = True,
cfg = "host",
default = Label(
"//java/com/google/protobuf/contrib/immutablejs/internal_do_not_use:ImmutableJspbCompiler",
),
),
"_clang_format": attr.label(
executable = True,
allow_files = True,
cfg = "host",
default = Label("//third_party:clang-format"),
),
"_runtime_deps": attr.label_list(
default = [
Label("//java/com/google/protobuf/contrib/immutablejs/internal_do_not_use:runtime"),
],
),
"_jar": attr.label(
cfg = "host",
executable = True,
default = Label("@bazel_tools//tools/jdk:jar"),
),
}),
fragments = ["js"],
provides = [ImmutableJspbInfo],
)
def _immutable_js_proto_library_rule_impl(ctx):
if len(ctx.attr.deps) != 1:
fail("Only one deps entry allowed")
dep = ctx.attr.deps[0]
# Create a new js provider to create a blaze level ExtraAction, so
# that this rule gets indexed by Cymbals table.
js_provider = create_js_provider(
ctx,
exports = [dep[ImmutableJspbInfo].js],
)
runfiles = dep[ImmutableJspbInfo]._private_.runfiles
return create_js_lib_struct(
js_provider = js_provider,
extra_providers = [
DefaultInfo(runfiles = ctx.runfiles(transitive_files = runfiles)),
],
)
immutable_js_proto_library = rule(
implementation = _immutable_js_proto_library_rule_impl,
attrs = dicts.add(js_attrs(), {
"deps": attr.label_list(
providers = [ProtoInfo],
aspects = [immutable_js_proto_library_aspect],
),
}),
fragments = ["js"],
)
| StarcoderdataPython |
3261491 | from nose.tools import assert_equal
from pyecharts.charts import Line
def test_chart_append_color():
x_data = ["周一", "周二", "周三", "周四", "周五", "周六", "周日"]
y_data1 = [140, 232, 101, 264, 90, 340, 250]
y_data2 = [120, 282, 111, 234, 220, 340, 310]
c = (
Line()
.add_xaxis(xaxis_data=x_data)
.add_yaxis(
series_name="品类 1",
y_axis=y_data1,
color='#80FFA5')
.add_yaxis(
series_name="品类 2",
y_axis=y_data2,
color='#00DDFF')
)
c.render()
default_colors = (
"#c23531 #2f4554 #61a0a8 #d48265 #749f83 #ca8622 #bda29a #6e7074 "
"#546570 #c4ccd3 #f05b72 #ef5b9c #f47920 #905a3d #fab27b #2a5caa "
"#444693 #726930 #b2d235 #6d8346 #ac6767 #1d953f #6950a1 #918597"
).split()
expected_result = ['#80FFA5', '#00DDFF', *default_colors]
assert_equal(c.colors, expected_result)
| StarcoderdataPython |
1789456 | from typing import Any, List
import networkx as nx
from interface import Interface, implements
from .builders.graph_builders import GraphBuilderInterface, TextGCNGraphBuilder
from .model.document import Document
from .model.graph_matrix import GraphMatrix
from .nlp.pieplines import ProcessingPipeline, ProcessingPipelineInterface
from .nlp.processors import Lemmatizer
from .presenters.graph_presenter import GraphPresenter
from .readers.reading_controller import ReadingController
class GBTRInterface(Interface):
"""Main module."""
def get_graph(
self,
source: Any
) -> List[GraphMatrix]:
"""Transform given documents corpus to graph representation.
Parameters
----------
source: any
Data source in one of supported types.
Currently supported types:
- list of dictionaries {"text" : str, "label" : str}.
Returns
-------
List[GraphMatrix]
List of prepared graphs.
If method implements whole corpus representation as one graph
then one element list is returned.
"""
class GBTR(implements(GBTRInterface)):
def __init__(
self,
reading_controller: ReadingController,
nlp_pipeline: ProcessingPipelineInterface,
graph_builder: GraphBuilderInterface
):
self._data: List[Document] = None
self._reading_controller = reading_controller
self._graph_builder = graph_builder
def get_graph(
self,
source: Any
) -> List[GraphMatrix]:
self._data = self._reading_controller.read_data(source)
# TODO
# consider parallel processing
for document in self._data:
document.text = self.nlp_pipeline.process(document.text)
return self._graph_builder.get_graph(self._data)
class TextGCN:
"""Implementation of graph representation for TextGCN."""
def __call__(
self,
source: Any
) -> nx.Graph:
"""Returns TextGCN based grapg representation for given corpus.
Parameters
----------
source: any
Data source in one of supported types.
Currently supported types:
- list of dictionaries {"text" : str, "label" : str}.
Returns
-------
nx.Graph
Graph representation as Networkx Graph object.
"""
gbtr = GBTR(
reading_controller=ReadingController(),
nlp_pipeline=ProcessingPipeline([
# TODO
Lemmatizer()
]),
graph_builder=TextGCNGraphBuilder()
)
graph_matrix = gbtr.get_graph(source)[0]
graph_presenter = GraphPresenter()
return graph_presenter.to_nx(graph_matrix)
| StarcoderdataPython |
3256796 | '''
altimu10v5: Main module
Copyright 2017, <NAME>
Licensed under MIT.
'''
from .lsm6ds33 import LSM6DS33
from .lis3mdl import LIS3MDL
from .lps25h import LPS25H
class IMU(object):
""" Set up and control Pololu's AltIMU-10v5.
"""
def __init__(self):
super(IMU, self).__init__()
self.lsm6ds33 = LSM6DS33()
self.gyroAccelEnabled = False
self.lis3mdl = LIS3MDL()
self.barometerEnabled = False
self.lps25h = LPS25H()
self.magnetometerEnabled = False
def __del__(self):
del(self.lsm6ds33)
del(self.lis3mdl)
del(self.lps25h)
def enable(self, gyroAccel=True, barometer=True, magnetometer=True):
""" Enable the given devices. """
if gyroAccel:
self.lsm6ds33.enable()
self.gyroAccelEnabled = True
if barometer:
self.lps25h.enable()
self.barometerEnabled = True
if magnetometer:
self.lis3mdl.enable()
self.magnetometerEnabled = True
def get_complementary_angles(self, delta_t=0.05):
""" Calculate combined angles of accelerometer and gyroscope
using a complementary filter.
"""
if not self.gyroAccelEnabled:
raise(Exception('Gyroscope and accelerometer are not enabled!'))
self.complementary_angles = [0, 0]
complementary_filter_constant = 0.98
accel_angles = self.lsm6ds33.get_accelerometer_angles()
gyro_angular_velocity = self.lsm6ds33.get_gyro_angular_velocity()
self.complementary_angles[0] = complementary_filter_constant \
* (self.complementary_angles[0] + (gyro_angular_velocity[0] * delta_t)) \
+ (1 - complementary_filter_constant) \
* accel_angles[0]
self.complementary_angles[1] = complementary_filter_constant \
* (self.complementary_angles[1] + (gyro_angular_velocity[1] * delta_t)) \
+ (1 - complementary_filter_constant) \
* accel_angles[1]
return self.complementary_angles
| StarcoderdataPython |
50761 | <reponame>DCCouncil/dc-law-tools<filename>build_xml/insert_tables.py
"""
The Lexis parser does not parse tables, so this script
inserts manually-created html tables defined in tables.xml.
tables.xml *must be* updated when a new version of the code
comes out.
"""
import os.path
import lxml.etree as etree, re
DIR = os.path.abspath(os.path.dirname(__file__))
src_file = DIR + '/../working_files/dccode-t1-ch15.xml'
tables_path = DIR + '/tables.xml'
dst_file = DIR + '/../working_files/dccode-tables.xml'
num_re = re.compile('<num>(?P<num>.+?)</num>')
table_re = re.compile(r'@@TABLE@@')
pict_re = re.compile(r'@@PICT@@')
def insert_tables():
with open(src_file) as f:
xml = f.read() # etree.parse(f).getroot()
with open(tables_path) as f:
Tables = etree.parse(f).getroot()
sections = xml.split('<section>\n')
print('inserting tables...')
out = []
for section in sections:
try:
num = num_re.search(section).group(1)
except:
import ipdb
ipdb.set_trace()
section_tables = Tables.find('section[@id="{}"]'.format(num))
if section_tables is not None:
tables = section_tables.getchildren()
i = 0
def replacement(match):
nonlocal i
if i >=len(tables):
import ipdb
ipdb.set_trace()
table = tables[i]
if table.getchildren():
out = etree.tostring(table, pretty_print=True, encoding='utf-8').decode('utf-8')
else:
out = ''
table.set('inserted', 'true')
i = i + 1
return out
section = table_re.sub(replacement, section)
elif '@@TABLE@@' in section:
print('missing tables for section', num)
# special case: picture converted to table
if num == '16-916.01':
table = Tables.find('section[@id="16-916.01"]/table'.format(num))
table.set('inserted', 'true')
table_str = etree.tostring(table, pretty_print=True, encoding='utf-8').decode('utf-8')
section = section.replace('@@PICT@@', table_str, 1)
# delete 11 subsequent @@PICT@@
section = section.replace('<text>@@PICT@@</text>', '', 11)
if '@@PICT@@' in section:
i = 0
def replacement(match):
nonlocal i
i += 1
return '<img src="./{}-{}.jpeg" />'.format(num, i)
section = pict_re.sub(replacement, section)
out.append(section)
if Tables.xpath('section/table[not(@inserted)]'):
import ipdb
ipdb.set_trace()
raise Exception('some tables not inserted')
out = '<section>\n'.join(out).encode('utf-8')
dom = etree.fromstring(out)
# remove any empty text elements
emptyTextNodes = dom.xpath('//text[not(text() or *)]')
for node in emptyTextNodes:
node.getparent().remove(node)
with open(dst_file, 'wb') as f:
f.write(etree.tostring(dom, pretty_print=True, encoding="utf-8"))
if __name__ == '__main__':
insert_tables()
| StarcoderdataPython |
1617858 | <filename>examples/diff_gpmp2_nonholonomic_example.py
#!/usr/bin/env python
import os, sys
sys.path.insert(0, "..")
import matplotlib.pyplot as plt
import numpy as np
import pprint
import time
import torch
from diff_gpmp2.env.env_2d import Env2D
from diff_gpmp2.robot_models import PointRobotXYH
from diff_gpmp2.gpmp2.diff_gpmp2_planner import DiffGPMP2Planner
from diff_gpmp2.utils.helpers import rgb2gray, load_params
from diff_gpmp2.utils.sdf_utils import sdf_2d
from diff_gpmp2.utils.planner_utils import straight_line_traj
from datasets import PlanningDataset
use_cuda = False
np.set_printoptions(threshold=np.nan, linewidth=np.inf)
pp = pprint.PrettyPrinter()
torch.set_default_tensor_type(torch.DoubleTensor)
use_cuda = torch.cuda.is_available() if use_cuda else False
device = torch.device('cuda') if use_cuda else torch.device('cpu')
env_file = os.path.abspath("../diff_gpmp2/env/simple_2d/4.png")
plan_param_file = os.path.abspath('gpmp2_xyh_params.yaml')
robot_param_file = os.path.abspath('robot_2d.yaml')
env_param_file = os.path.abspath('env_2d_params.yaml')
np.random.seed(0)
torch.manual_seed(0)
#Load parameters
env_data, planner_params, gp_params, obs_params, optim_params, robot_data = load_params(plan_param_file, robot_param_file, env_param_file, device)
env_params = {'x_lims': env_data['x_lims'], 'y_lims': env_data['y_lims']}
env_image = plt.imread(env_file)
if len(env_image.shape) > 2:
env_image = rgb2gray(env_image)
cell_size = (env_params['x_lims'][1] - env_params['x_lims'][0])/env_image.shape[0]
env_sdf = sdf_2d(env_image, res = cell_size)
#2D Point robot model
robot = PointRobotXYH(robot_data['sphere_radius'][0], use_cuda=use_cuda)
start_conf = torch.tensor([[env_params['x_lims'][0]+1., env_params['y_lims'][0]+ 1., 0.0]], device=device)
start_vel = torch.tensor([[0., 0., 0.]], device=device)
goal_conf = torch.tensor([[env_params['x_lims'][1]-1., env_params['y_lims'][1]-1., np.pi/2.0]], device=device)#[17, 14])
goal_vel = torch.tensor([[0., 0., 0.]], device=device)
start = torch.cat((start_conf, start_vel), dim=1)
goal = torch.cat((goal_conf, goal_vel), dim=1)
th_init = straight_line_traj(start_conf, goal_conf, planner_params['total_time_sec'], planner_params['total_time_step'], planner_params['dof'], device)
th_init.requires_grad_(True)
im = torch.tensor(env_image, device=device)
sdf = torch.tensor(env_sdf, device=device)
planner = DiffGPMP2Planner(gp_params, obs_params, planner_params, optim_params, env_params, robot, use_cuda=use_cuda)
start_t = time.time()
print th_init
th_final, _, err_init, err_final, err_per_iter, err_ext_per_iter, k, time_taken = planner.forward(th_init.unsqueeze(0), start.unsqueeze(0), goal.unsqueeze(0), im.unsqueeze(0).unsqueeze(0), sdf.unsqueeze(0).unsqueeze(0))
print th_final
pp.pprint('Initial cost = %f'%(err_init[0]))
pp.pprint('Final cost = %f'%(err_final[0]))
pp.pprint('Iterations taken = %d'%(k[0]))
pp.pprint('Time taken = %f (seconds)'%(time_taken[0]))
path_init = []
path_final = []
th_init_np = th_init.cpu().detach().numpy()
th_final_np = th_final[0].cpu().detach().numpy()
for i in range(planner_params['total_time_step']+1):
path_init.append(th_init_np[i, 0:planner_params['dof']])
path_final.append(th_final_np[i, 0:planner_params['dof']])
#Plot the final results
env = Env2D(env_params)
env.initialize_from_file(env_file)
env.initialize_plot(start_conf.cpu().numpy()[0], goal_conf.cpu().numpy()[0])
env.plot_edge(path_init, color='red')
env.plot_edge(path_final)
env.plot_signed_distance_transform()
fig=plt.figure()
plt.plot(err_per_iter[0])
plt.show()
| StarcoderdataPython |
3255757 | <reponame>kchida/aptly
"""
Testing DB operations
"""
from .cleanup import *
from .recover import *
| StarcoderdataPython |
3389876 | <filename>prolink/api/urls.py
from django.conf.urls import url
from rest_framework import routers
from prolink.api import views
router = routers.DefaultRouter()
urlpatterns = [
url(r'drawer/self/', views.DrawerTransactionsViewSet.as_view(), name='drawer-self'),
url(r'drawer/view-user/', views.UserDrawerViewset.as_view(), name='drawer-view'),
url(r'drawer/all-open/', views.AllOpenDrawersViewset.as_view(), name='drawers-open'),
url(r'drawer/transaction/', views.DrawerTransactionViewset.as_view(), name='transaction'),
] | StarcoderdataPython |
17658 | """
The key is to use a set to remember if we seen the node or not.
Next, think about how we are going to *remove* the duplicate node?
The answer is to simply link the previous node to the next node.
So we need to keep a pointer `prev` on the previous node as we iterate the linked list.
So, the solution.
Create a set `seen`. #[1]
Point pointer `prev` on the first node. `cuur` on the second.
Now we iterate trough the linked list.
* For every node, we add its value to `seen`. Move `prev` and `curr` forward. #[2]
* If we seen the node, we *remove* the `curr` node. Then move the curr forward. #[3]
Return the `head`
"""
class Solution(object):
def deleteDuplicates(self, head):
if head is None or head.next is None: return head
prev = head
curr = head.next
seen = set() #[1]
seen.add(prev.val)
while curr:
if curr.val not in seen: #[2]
seen.add(curr.val)
curr = curr.next
prev = prev.next
else: #[3]
prev.next = curr.next #remove
curr = curr.next
return head
| StarcoderdataPython |
1729704 | <gh_stars>0
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//form[@id='aspnetForm']/div[@id='lg-wrapper']/div[@class='lg-info-prd-details']/h1",
'price' : "//div[@class='lg-box-price-buy']/div[@class='lg-clb-left-buy']/div[@class='lg-price-new']/span",
'category' : "//div[@id='lg-wrapper']/ul[@class='lg-menu-dh']/li/a",
'description' : "//div[@class='tab-content']/div[@id='tab1']/div[@class='lg-text-parameters-top']/p",
'images' : "//ul[@class='lg-thumb-zoom-prd']/li/a/img/@data-src",
'canonical' : "//link[@rel='canonical']/@href",
'base_url' : "",
'brand' : ""
}
name = 'logicbuy.vn'
allowed_domains = ['logicbuy.vn']
start_urls = ['http://logicbuy.vn']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['-\d+\.html']), 'parse_item'),
Rule(LinkExtractor(allow=['[a-zA-Z-0-9]+\w\.html'], deny=['Filter=','price=']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| StarcoderdataPython |
4843054 | import os
import json
from django.http import HttpResponse
from django.template import RequestContext
from django.shortcuts import render
from logtailer.models import LogsClipboard, LogFile
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import csrf_exempt
from django.contrib.admin.views.decorators import staff_member_required
@staff_member_required
def read_logs(request):
context = {}
return render(request, 'logtailer/log_reader.html',
context, RequestContext(request, {}),)
def get_history(f, lines=0):
buffer_size = 1024
f.seek(0, os.SEEK_END)
bytes = f.tell()
size = lines
block = -1
data = []
while size > 0 and bytes > 0:
if bytes - buffer_size > 0:
# Seek back one whole buffer_size
f.seek(f.tell()+block*buffer_size, 0)
# read buffer
data.append(f.read(buffer_size))
else:
# file too small, start from beginning
f.seek(0, 0)
# only read what was not read
data.append(f.read(bytes))
lines_found = data[-1].count('\n')
size -= lines_found
bytes += block*buffer_size
block -= 1
return ''.join(data).splitlines(True)[-lines:]
@staff_member_required
def get_log_lines(request, file_id):
history = int(request.GET.get('history', 0))
try:
file_record = LogFile.objects.get(id=file_id)
except LogFile.DoesNotExist:
return HttpResponse(json.dumps([_('error_logfile_notexist')]),
content_type='text/html')
content = []
file = open(file_record.path, 'r')
if history > 0:
content = get_history(file, history)
content = [line.replace('\n','<br/>') for line in content]
else:
last_position = request.session.get('file_position_%s' % file_id)
file.seek(0, os.SEEK_END)
if last_position and last_position <= file.tell():
file.seek(last_position)
for line in file:
content.append('%s' % line.replace('\n','<br/>'))
request.session['file_position_%s' % file_id] = file.tell()
file.close()
return HttpResponse(json.dumps(content), content_type='application/json')
@staff_member_required
def save_to_clipoard(request):
LogsClipboard(name=request.POST['name'],
notes=request.POST['notes'],
logs=request.POST['logs'],
log_file=LogFile.objects.get(id=int(request.POST['file']))).save()
return HttpResponse(_('loglines_saved'), content_type='text/html')
| StarcoderdataPython |
59038 | <gh_stars>0
"""
Use zip to transpose data from a 4-by-3 matrix to a 3-by-4 matrix. There's actually a cool trick for this! Feel free to look at the solutions if you can't figure it out.
"""
data = ((0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11))
# 0 1 2
# 3 4 5
# 6 7 8
# 9 10 11
#
# Transpose : [data]^T
#
# 0 3 6 9
# 1 4 7 10
# 2 5 8 11
data_transpose = tuple(zip(*data))
print(data)
print(data_transpose)
| StarcoderdataPython |
3344677 | <gh_stars>1-10
"""
Analysis functions
"""
# Copyright (c) <NAME>
# Distributed under the terms of the MIT License
# author: <NAME>
import numpy as np
from scipy import integrate
from scipy.constants import codata
kb = codata.value('Boltzmann constant')
ev = codata.value('electron volt')
ev = -ev
class OneDimensionalChargeDensity:
"""
The :py:class:`polypy.analysis.OneDimensionalChargeDensity`
class converts one dimensional number densitie into
the charge density, electric field and electrostatic potential.
Args:
histogram_positions (:py:attr:`array_like`): Histogram locations.
atom_densities (:py:attr:`list`): List of histograms.
atom_charges (:py:attr:`list`): List of atom charges.
histogram_volume (:py:attr:`float`): Volume of the histograms.
timesteps (:py:attr:`float`): Simulation timestep.
"""
def __init__(self, histogram_positions, atom_densities, atom_charges, histogram_volume, timesteps):
self.histogram_positions = histogram_positions
self.atom_densities = atom_densities
self.atom_charges = atom_charges
self.histogram_volume = histogram_volume
self.timesteps = timesteps
self.scale = 14.3997584
def calculate_charge_density(self):
r"""
Calculates the charge density in one dimension.
Returns:
charge_density (:py:attr:`array_like`): Charge density.
"""
number_density = np.column_stack((self.atom_densities))
charges = np.asarray(self.atom_charges)
charge_density = np.sum(np.multiply(number_density, charges),
axis=1) / self.histogram_volume
return self.histogram_positions, charge_density/self.timesteps
def calculate_electric_field(self):
r"""
Calculates the electric field.
Returns:
e_field (:py:attr:`array_like`): Electric field.
"""
rho = self.calculate_charge_density()[1]
e_field = self.scale * integrate.cumtrapz(rho, self.histogram_positions, initial=0)
e_field = e_field - np.mean(e_field)
return self.histogram_positions, e_field
def calculate_electrostatic_potential(self):
r"""
Calculates the electrostatic potential.
Returns:
potential (:py:attr:`array_like`): Electrostatic potential.
"""
rho = self.calculate_charge_density()[1]
e_field = self.scale * integrate.cumtrapz(rho, self.histogram_positions, initial=0)
e_field = e_field - np.mean(e_field)
potential = -integrate.cumtrapz(e_field, self.histogram_positions, initial=0)
potential = potential / self.timesteps
return self.histogram_positions, potential
def system_volume(data):
"""
Calculate the volume at each timestep and return a volume as function of time.
Args:
data (:py:class:`polypy.read.Trajectory`): polypy Trajectory object.
Returns:
volume (:py:attr:`array_like`): Volume as a function of timestep.
step (:py:attr:`array_like`): Timestep.
"""
volume = []
step = []
for i in range(data.timesteps):
volume.append((np.dot(data.lv[i][0,:] , np.cross(data.lv[i][1,:], data.lv[i][2,:] ))))
step.append(i)
return volume, step
def conductivity(charge_carriers, volume, diff, temperature, hr):
"""
Calculate the ionic conductivity.
Args:
charge_carriers (:py:attr:`float`): Number of charge carriers.
volume (:py:attr:`float`): Average cell volume.
diff (:py:attr:`float`): Diffusion coefficient.
temperature (:py:attr:`float`): Temperature.
hr (:py:attr:`float`): Haven ratio.
Returns:
conductivity (:py:attr:`float`): Ionic conductivity.
"""
volume = volume * (10 ** -24)
diff = diff * (10 ** -8)
conc = charge_carriers / volume
EV = ev ** 2
constants = kb * temperature
conductivity = ((diff * conc) * EV) / constants
return conductivity * hr
def two_dimensional_charge_density(atoms_coords, atom_charges, bin_volume, timesteps):
"""
Calculates the charge density in two dimensions.
Args:
atoms_coords (:py:attr:`list`): List of atomic coordinates
atom_charges (:py:attr:`list`): List of atomic charges
bin_volume (:py:attr:`float`): Volume of histograms
Returns:
charge_density (:py:attr:`array_like`): Charge density.
"""
number_density = np.dstack((atoms_coords))
charges = np.asarray(atom_charges)
charge_density = np.sum(np.multiply(number_density,
charges), axis=2) / bin_volume
return (charge_density / timesteps)
| StarcoderdataPython |
3230886 | """This problem was asked by Google.
Given an undirected graph represented as an adjacency matrix and an integer
k, write a function to determine whether each vertex in the graph can be
colored such that no two adjacent vertices share
the same color using at most k colors""" | StarcoderdataPython |
4805962 | import threading
import socket
from zeroconf import ServiceBrowser, ServiceStateChange, Zeroconf
from cac.client.scenes.select_server.list_box import ListBox, ListBoxItem
from cac.client.engine.game_object import GameObject
from cac.client.engine.events import EventPropagation
from cac.client.engine.curses_colour import get_colour_pair
from cac.client.engine.curses_text import render_text, \
TextAlignment, VerticalTextAlignment
class Server():
def __init__(self, name="",
zeroconf_server_name="", address="", port=1337):
self.address = address
self.name = name
self.zeroconf_server_name = zeroconf_server_name
self.port = port
class SelectAutoDiscoveryServer(GameObject):
def __init__(self):
super().__init__()
# server auto discovery
self._discovered_servers = []
self._discovered_servers_lock = threading.Lock()
self._zeroconf = None
self._browser = None
# discovered server selection
self._server_list_box = ListBox()
self._server_list_box_visible = False
def get_child_objects(self):
if self._server_list_box_visible:
return [self._server_list_box]
else:
return []
def process_event(self, event):
return EventPropagation.propagate_forward(self._server_list_box)
def update(self, delta_time):
# update the listbox contents
with self._discovered_servers_lock:
# items
self._server_list_box.items = [
ListBoxItem(srv.name, [f"{srv.address}:{srv.port}"], srv)
for srv in self._discovered_servers
]
# only make the listbox visible,
# if there is actually something to show...
self._server_list_box_visible = len(self._discovered_servers) > 0
# reposition the list box
w, h = self.size
self._server_list_box.position = 0, 0
self._server_list_box.size = w, h
def render(self, win):
w, h = self.size
win.erase()
colour = get_colour_pair(0, 0, 0, 1, 1, 1)
win.bkgd(colour)
if not self._server_list_box_visible:
render_text(
win, "Searching for servers...", 0, 0, w, h,
alignment=TextAlignment.CENTER,
valignment=VerticalTextAlignment.CENTER,
text_format=colour,
)
def start_discovery(self):
self._zeroconf = Zeroconf()
self._browser = ServiceBrowser(
self._zeroconf,
"_cac._tcp.local.",
handlers=[self.on_service_state_change]
)
def stop_discovery(self):
self.zeroconf.close()
def on_service_state_change(self, zeroconf,
service_type, name,
state_change):
with self._discovered_servers_lock:
# remove it
self._discovered_servers = [
server
for server in self._discovered_servers
if server.zeroconf_server_name != name
]
# add service
if state_change is ServiceStateChange.Added:
info = zeroconf.get_service_info(service_type, name)
if info:
addr = socket.inet_ntoa(info.address)
port = info.port
zc_name = name
name = "Unnamed Server"
if info.properties and b"name" in info.properties:
name = info.properties[b"name"].decode("utf-8")
self._discovered_servers.append(
Server(name, zc_name, addr, port))
| StarcoderdataPython |
1701092 | <gh_stars>10-100
from typing import Optional
from mypy.nodes import Node
from mypy.types import Instance
from mypy.types import Type as MypyType
from mypy.types import TypeType
def get_definition(typ: MypyType, arg: str) -> Optional[Node]:
"""Gets definition of a type from SymbolTableNode."""
if isinstance(typ, Instance): # TODO: support Union types
return _get_defition_instance(typ, arg)
elif isinstance(typ, TypeType):
return _get_defition_type(typ, arg)
return None
def _get_defition_instance(typ: Instance, arg: str) -> Optional[Node]:
sym = typ.type.get(arg)
return sym.node if sym is not None else None
def _get_defition_type(typ: TypeType, arg: str) -> Optional[Node]:
if not isinstance(typ.item, Instance):
return None # it can be type var or union or etc
sym = typ.item.type.get(arg) # TODO: support Union types
return sym.node if sym is not None else None
| StarcoderdataPython |
3380299 | <gh_stars>0
from sanic import Sanic, Blueprint
from sanic.response import text, json
app = Sanic()
passing_bp = Blueprint('passing_blueprint')
failing_bp = Blueprint('failing_blueprint', url_prefix='/api/failing')
@passing_bp.get("/api/passing")
async def get_data(request):
return json({
"hello": "world",
"I am working if": "you see me",
"from": "the passing blueprint"
})
@passing_bp.post("/api/passing")
async def post_data(request):
return text("POST [PASSING] - {}\n\n{}".format(
request.headers, request.body
))
@failing_bp.get("/")
async def get_data(request):
return json({
"hello": "world",
"I am working if": "you see me",
"from": "the failing blueprint"
})
"""
This is the part that is failing
"""
@failing_bp.post("/")
async def post_data(request):
return text("POST [FAILING] - {}\n\n{}".format(
request.headers,
request.body
))
if __name__ == "__main__":
app.blueprint(passing_bp)
app.blueprint(failing_bp)
app.run(debug=True, host="0.0.0.0", port=8000)
| StarcoderdataPython |
3225672 | #!/usr/bin/env python
from my_data import Data
def main():
# Looks and feels like normal python objects
objectList = [Data(1), Data(2), Data(3)]
# Print them out
for dataObject in objectList:
print(dataObject)
# Show the Mutability
objectList[1].set_value(1234)
print(objectList[1])
# all native objects will be deallocated on close
if __name__ == "__main__":
main()
| StarcoderdataPython |
643 | from PhysicsTools.Heppy.analyzers.core.Analyzer import Analyzer
from PhysicsTools.Heppy.analyzers.core.AutoHandle import AutoHandle
from PhysicsTools.Heppy.physicsobjects.Tau import Tau
from PhysicsTools.HeppyCore.utils.deltar import deltaR, matchObjectCollection3
import PhysicsTools.HeppyCore.framework.config as cfg
class TauAnalyzer( Analyzer ):
def __init__(self, cfg_ana, cfg_comp, looperName ):
super(TauAnalyzer,self).__init__(cfg_ana,cfg_comp,looperName)
#----------------------------------------
# DECLARATION OF HANDLES OF LEPTONS STUFF
#----------------------------------------
def declareHandles(self):
super(TauAnalyzer, self).declareHandles()
self.handles['taus'] = AutoHandle( ('slimmedTaus',''),'std::vector<pat::Tau>')
def beginLoop(self, setup):
super(TauAnalyzer,self).beginLoop(setup)
self.counters.addCounter('events')
count = self.counters.counter('events')
count.register('all events')
count.register('has >=1 tau at preselection')
count.register('has >=1 selected taus')
count.register('has >=1 other taus')
#------------------
# MAKE LEPTON LISTS
#------------------
def makeTaus(self, event):
event.inclusiveTaus = []
event.selectedTaus = []
event.otherTaus = []
#get all
alltaus = map( Tau, self.handles['taus'].product() )
#make inclusive taus
for tau in alltaus:
tau.associatedVertex = event.goodVertices[0] if len(event.goodVertices)>0 else event.vertices[0]
tau.lepVeto = False
tau.idDecayMode = tau.tauID("decayModeFinding")
tau.idDecayModeNewDMs = tau.tauID("decayModeFindingNewDMs")
if hasattr(self.cfg_ana, 'inclusive_decayModeID') and self.cfg_ana.inclusive_decayModeID and not tau.tauID(self.cfg_ana.inclusive_decayModeID):
continue
tau.inclusive_lepVeto = False
if self.cfg_ana.inclusive_vetoLeptons:
for lep in event.selectedLeptons:
if deltaR(lep.eta(), lep.phi(), tau.eta(), tau.phi()) < self.cfg_ana.inclusive_leptonVetoDR:
tau.inclusive_lepVeto = True
if tau.inclusive_lepVeto: continue
if self.cfg_ana.inclusive_vetoLeptonsPOG:
if not tau.tauID(self.cfg_ana.inclusive_tauAntiMuonID):
tau.inclusive_lepVeto = True
if not tau.tauID(self.cfg_ana.inclusive_tauAntiElectronID):
tau.inclusive_lepVeto = True
if tau.inclusive_lepVeto: continue
if tau.pt() < self.cfg_ana.inclusive_ptMin: continue
if abs(tau.eta()) > self.cfg_ana.inclusive_etaMax: continue
if abs(tau.dxy()) > self.cfg_ana.inclusive_dxyMax or abs(tau.dz()) > self.cfg_ana.inclusive_dzMax: continue
def id3(tau,X):
"""Create an integer equal to 1-2-3 for (loose,medium,tight)"""
return tau.tauID(X%"Loose") + tau.tauID(X%"Medium") + tau.tauID(X%"Tight")
def id5(tau,X):
"""Create an integer equal to 1-2-3-4-5 for (very loose,
loose, medium, tight, very tight)"""
return id3(tau, X) + tau.tauID(X%"VLoose") + tau.tauID(X%"VTight")
def id6(tau,X):
"""Create an integer equal to 1-2-3-4-5-6 for (very loose,
loose, medium, tight, very tight, very very tight)"""
return id5(tau, X) + tau.tauID(X%"VVTight")
tau.idMVA = id6(tau, "by%sIsolationMVArun2v1DBoldDMwLT")
tau.idMVANewDM = id6(tau, "by%sIsolationMVArun2v1DBnewDMwLT")
tau.idCI3hit = id3(tau, "by%sCombinedIsolationDeltaBetaCorr3Hits")
tau.idAntiMu = tau.tauID("againstMuonLoose3") + tau.tauID("againstMuonTight3")
tau.idAntiE = id5(tau, "againstElectron%sMVA6")
#print "Tau pt %5.1f: idMVA2 %d, idCI3hit %d, %s, %s" % (tau.pt(), tau.idMVA2, tau.idCI3hit, tau.tauID(self.cfg_ana.tauID), tau.tauID(self.cfg_ana.tauLooseID))
if tau.tauID(self.cfg_ana.inclusive_tauID):
event.inclusiveTaus.append(tau)
for tau in event.inclusiveTaus:
tau.loose_lepVeto = False
if self.cfg_ana.loose_vetoLeptons:
for lep in event.selectedLeptons:
if deltaR(lep.eta(), lep.phi(), tau.eta(), tau.phi()) < self.cfg_ana.loose_leptonVetoDR:
tau.loose_lepVeto = True
if self.cfg_ana.loose_vetoLeptonsPOG:
if not tau.tauID(self.cfg_ana.loose_tauAntiMuonID):
tau.loose_lepVeto = True
if not tau.tauID(self.cfg_ana.loose_tauAntiElectronID):
tau.loose_lepVeto = True
if tau.tauID(self.cfg_ana.loose_decayModeID) and \
tau.pt() > self.cfg_ana.loose_ptMin and abs(tau.eta()) < self.cfg_ana.loose_etaMax and \
abs(tau.dxy()) < self.cfg_ana.loose_dxyMax and abs(tau.dz()) < self.cfg_ana.loose_dzMax and \
tau.tauID(self.cfg_ana.loose_tauID) and not tau.loose_lepVeto:
event.selectedTaus.append(tau)
else:
event.otherTaus.append(tau)
event.inclusiveTaus.sort(key = lambda l : l.pt(), reverse = True)
event.selectedTaus.sort(key = lambda l : l.pt(), reverse = True)
event.otherTaus.sort(key = lambda l : l.pt(), reverse = True)
self.counters.counter('events').inc('all events')
if len(event.inclusiveTaus): self.counters.counter('events').inc('has >=1 tau at preselection')
if len(event.selectedTaus): self.counters.counter('events').inc('has >=1 selected taus')
if len(event.otherTaus): self.counters.counter('events').inc('has >=1 other taus')
def matchTaus(self, event):
match = matchObjectCollection3(event.inclusiveTaus, event.gentaus, deltaRMax = 0.5)
for lep in event.inclusiveTaus:
gen = match[lep]
lep.mcMatchId = 1 if gen else 0
lep.genp = gen
def process(self, event):
self.readCollections( event.input )
self.makeTaus(event)
if not self.cfg_comp.isMC:
return True
if hasattr(event, 'gentaus'):
self.matchTaus(event)
return True
# Find the definitions of the tau ID strings here:
# http://cmslxr.fnal.gov/lxr/source/PhysicsTools/PatAlgos/python/producersLayer1/tauProducer_cfi.py
setattr(TauAnalyzer,"defaultConfig",cfg.Analyzer(
class_object = TauAnalyzer,
# inclusive very loose hadronic tau selection
inclusive_ptMin = 18,
inclusive_etaMax = 9999,
inclusive_dxyMax = 1000.,
inclusive_dzMax = 0.4,
inclusive_vetoLeptons = False,
inclusive_leptonVetoDR = 0.4,
inclusive_decayModeID = "decayModeFindingNewDMs", # ignored if not set or ""
inclusive_tauID = "decayModeFindingNewDMs",
inclusive_vetoLeptonsPOG = False, # If True, the following two IDs are required
inclusive_tauAntiMuonID = "",
inclusive_tauAntiElectronID = "",
# loose hadronic tau selection
loose_ptMin = 18,
loose_etaMax = 9999,
loose_dxyMax = 1000.,
loose_dzMax = 0.2,
loose_vetoLeptons = True,
loose_leptonVetoDR = 0.4,
loose_decayModeID = "decayModeFindingNewDMs", # ignored if not set or ""
loose_tauID = "byLooseCombinedIsolationDeltaBetaCorr3Hits",
loose_vetoLeptonsPOG = False, # If True, the following two IDs are required
loose_tauAntiMuonID = "againstMuonLoose3",
loose_tauAntiElectronID = "againstElectronLooseMVA5"
)
)
| StarcoderdataPython |
3242923 | from flask import render_template
class Emails(object):
message_cls = None
def __init__(self, config, message_cls, celery):
if message_cls:
self.message_cls = message_cls
if not self.message_cls:
try:
from flask_emails import Message
self.message_cls = Message
except ImportError:
pass
if not self.message_cls:
def send(*args, **kwargs):
raise RuntimeError('No flask_emails, and message_cls is not configured')
self.send = send
elif celery:
def send(*args, **kwargs):
return self._send(*args, **kwargs)
self.send_task = celery.task(send)
def send_delay(name, to, context, locale=None):
return self.send_task.delay(name, to, context, locale)
self.send = send_delay
else:
self.send = self._send
self.dkim_key = config.get('DKIM_KEY')
if not self.dkim_key and config.get('DKIM_KEY_PATH'):
self.dkim_key = open(config['DKIM_KEY_PATH']).read()
self.dkim_domain = config.get('DKIM_DOMAIN')
self.dkim_selector = config.get('DKIM_SELECTOR')
def create(self, name, context, locale):
subject_template = 'userflow/emails/{}_subject.txt'.format(name)
html_template = 'userflow/emails/{}.html'.format(name)
subject = render_template(subject_template, **context)
html = render_template(html_template, **context)
message = self.message_cls(subject=subject, html=html)
if self.dkim_key:
message.dkim(key=self.dkim_key, domain=self.dkim_domain,
selector=self.dkim_selector)
return message
def _send(self, name, to, context, locale=None):
message = self.create(name, context, locale)
message.send(to=to)
| StarcoderdataPython |
1741758 | #!/usr/bin/env python
import os
import errno
from rest_framework_ccbv.config import VERSION, REST_FRAMEWORK_VERSIONS
from rest_framework_ccbv.inspector import drfklasses
from rest_framework_ccbv.renderers import (DetailPageRenderer,
IndexPageRenderer,
LandPageRenderer,
ErrorPageRenderer,
SitemapRenderer)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def main(out_folder='public'):
klasses = sorted(drfklasses.values(),
key=lambda x: (x.__module__, x.__name__))
for klass in drfklasses.values():
renderer = DetailPageRenderer(klasses, klass.__name__,
klass.__module__)
mkdir_p(os.path.join(out_folder, VERSION, klass.__module__))
renderer.render(filename=os.path.join(out_folder, VERSION,
klass.__module__,
klass.__name__ + '.html'))
renderer = IndexPageRenderer(klasses)
renderer.render(os.path.join(out_folder, VERSION, 'index.html'))
if VERSION == REST_FRAMEWORK_VERSIONS[-1]:
renderer = LandPageRenderer(klasses)
renderer.render(os.path.join(out_folder, 'index.html'))
renderer = ErrorPageRenderer(klasses)
renderer.render(os.path.join(out_folder, 'error.html'))
renderer = SitemapRenderer(klasses)
renderer.render(os.path.join(out_folder, 'sitemap.xml'))
if __name__ == '__main__':
main()
| StarcoderdataPython |
1686835 | ###############################################################################
# prop_mod.py
###############################################################################
#
# Calculate mod without numpy issue
#
###############################################################################
import numpy as np
from tqdm import *
def mod(dividends, divisor):
""" return dividends (array) mod divisor (double)
"""
output = np.zeros(len(dividends))
for i in tqdm(range(len(dividends))):
output[i] = dividends[i]
done=False
while (not done):
if output[i] >= divisor:
output[i] -= divisor
elif output[i] < 0.:
output[i] += divisor
else:
done=True
return output
| StarcoderdataPython |
3259967 | <gh_stars>10-100
from distutils.core import setup
setup(name='external_library', packages=['external_library'])
| StarcoderdataPython |
3208933 | from pulumi import export
import pulumi_keycloak as keycloak
realm = keycloak.Realm("new-python-realm",
realm="my-example-python-realm"
)
export("realm_id", realm.id)
| StarcoderdataPython |
3365660 | from django.conf.urls import patterns, url
urlpatterns = patterns('portfolio',
url(r'^$', 'views.home', name="home"),
url(r'^(?P<id>\d+)-(?P<slug>[-\w]+)$', 'views.project', name="project"),
)
| StarcoderdataPython |
57599 | <filename>src/manual_solve.py
#!/usr/bin/python
# Student Information
# Student Name: <NAME>
# Student ID: 17232977
# Git: https://github.com/manmayajob/ARC
import os, sys
import json
import numpy as np
import re
import matplotlib.pyplot as plt
### YOUR CODE HERE: write at least three functions which solve
### specific tasks by transforming the input x and returning the
### result. Name them according to the task ID as in the three
### examples below. Delete the three examples. The tasks you choose
### must be in the data/training directory, not data/evaluation.
def solve_6150a2bd(inputs):
'''
In this function, numpy package to reverse the order of elements in an array,
the elements are reordered but the shape is preserved.
Parameters: c = ARC test list
Returns: tst_rslt = returns a numpy ndarray, size of which depends on the task
'''
input_array = np.array(inputs)
tst_rslt = np.flip(input_array)
return tst_rslt
def solve_ce22a75a(inputs):
'''
In this function I return a numpy array by replacing all zeros around a pattern with the corresponding pattern value.
'''
tst_rslt = np.array(inputs)
res = np.where(tst_rslt > 0)
try:
for i in list(zip(res[0], res[1])):
for x in range(-1, 2):
for y in range(-1, 2):
if (x != 0 or y != 0):
tst_rslt[i[0] - x][i[1] - y] = 1
tst_rslt[i[0]][i[1]] = 1
except IndexError: # catch the error
pass #we are passing the if any point is at the corner
return tst_rslt
def solve_1cf80156(inputs):
'''
In this function I return an numpy array by slicing the min and max value of rows and columns for the given input.
'''
input_array = np.array(inputs)
result = np.where(input_array > 0)
column_minium = min(result[1])
column_maximum = max(result[1])
row_minimum = min(result[0])
row_maximum = max(result[0])
tst_rslt = []
for i in range(row_minimum,row_maximum + 1):
for j in range(column_minium,column_maximum + 1):
tst_rslt.append(input_array[i][j])
tst_rslt = np.reshape(tst_rslt,(row_maximum-row_minimum + 1,column_maximum-column_minium + 1))
return tst_rslt
def main():
# Find all the functions defined in this file whose names are
# like solve_abcd1234(), and run them.
# regex to match solve_* functions and extract task IDs
p = r"solve_([a-f0-9]{8})"
tasks_solvers = []
# globals() gives a dict containing all global names (variables
# and functions), as name: value pairs.
for name in globals():
m = re.match(p, name)
if m:
# if the name fits the pattern eg solve_abcd1234
ID = m.group(1) # just the task ID
solve_fn = globals()[name] # the fn itself
tasks_solvers.append((ID, solve_fn))
for ID, solve_fn in tasks_solvers:
# for each task, read the data and call test()
directory = os.path.join("..", "data", "training")
json_filename = os.path.join(directory, ID + ".json")
data = read_ARC_JSON(json_filename)
test(ID, solve_fn, data)
def read_ARC_JSON(filepath):
"""Given a filepath, read in the ARC task data which is in JSON
format. Extract the train/test input/output pairs of
grids. Convert each grid to np.array and return train_input,
train_output, test_input, test_output."""
# Open the JSON file and load it
data = json.load(open(filepath))
# Extract the train/test input/output grids. Each grid will be a
# list of lists of ints. We convert to Numpy.
train_input = [np.array(data['train'][i]['input']) for i in range(len(data['train']))]
train_output = [np.array(data['train'][i]['output']) for i in range(len(data['train']))]
test_input = [np.array(data['test'][i]['input']) for i in range(len(data['test']))]
test_output = [np.array(data['test'][i]['output']) for i in range(len(data['test']))]
return (train_input, train_output, test_input, test_output)
def test(taskID, solve, data):
"""Given a task ID, call the given solve() function on every
example in the task data."""
print(taskID)
train_input, train_output, test_input, test_output = data
print("Training grids")
for x, y in zip(train_input, train_output):
yhat = solve(x)
show_result(x, y, yhat)
print("Test grids")
for x, y in zip(test_input, test_output):
yhat = solve(x)
show_result(x, y, yhat)
plt_list = train_input + test_input
visualization_func(plt_list,taskID) # added to show the grid visualisation of the results
def show_result(x, y, yhat):
print("Input")
print(x)
print("Correct output")
print(y)
print("Our output")
print(yhat)
print("Correct?")
if y.shape != yhat.shape:
print(f"False. Incorrect shape: {y.shape} v {yhat.shape}")
else:
print(np.all(y == yhat))
def visualization_func(input,titlename):
'''
plotting grids and emulating testing interface.
Parameters: input = A list of test input and computed output.
'''
for i in range(len(input)):
plt.matshow(input[i])
plt.title(titlename)
plt.show()
if __name__ == "__main__": main()
| StarcoderdataPython |
3300089 | #!/usr/bin/python
"""
This program to take the month-wise backup from PostgreSQL
and store it to IBM COS.
"""
__author__ = "<NAME>"
__copyright__ = "(c) Copyright IBM 2020"
__credits__ = ["BAT DMS IBM Team"]
__email__ = "<EMAIL>"
__status__ = "Production"
# Import the required libraries
# Import the sys library to parse the arguments
import sys
# Import the parsing library
import configparser
# Import the logging library
import logging
import logging.config
# Import pandas for parquet conversion
import pandas as pd
# Initialising the configparser object to parse the properties file
CONFIG = configparser.ConfigParser()
# Import COS library
import ibm_boto3
from ibm_botocore.client import Config, ClientError
# Import Postgresql library
import psycopg2
# Set the logging criteria for the generated logs
LOGFILENAME = '/root/postgresql-backup-to-ibmcos/logs/cos_setup.log'
logging.config.fileConfig(fname='/root/postgresql-backup-ibmcos/conf/log_config.conf',
defaults={'logfilename': LOGFILENAME},
disable_existing_loggers=False)
# Get the logger specified in the file
logger = logging.getLogger(__name__)
# Specify the months along with the end date
MONTHDAYS = {'jan': '31', 'feb': '28', 'mar': '31', 'apr': '30', 'may': '31',
'jun': '30', 'jul': '31', 'aug': '31', 'sep': '30', 'oct': '31',
'nov': '30', 'dec': '31'}
# Specify the month as number
MONTHNUMBER = {'jan': '01', 'feb': '02', 'mar': '03', 'apr': '04', 'may': '05',
'jun': '06', 'jul': '07', 'aug': '08', 'sep': '09', 'oct': '10',
'nov': '11', 'dec': '12'}
def set_env(p_app_config_file):
"""
:param p_app_config_file:
:return p_user, p_passwd, p_host, p_port, p_db, p_table,
cos_endpoint, cos_api_key_id, cos_auth_endpoint,
cos_resource_crn, bucket_name, path, month, year:
"""
p_user = None
p_passwd = None
p_host = None
p_port = None
p_db = None
p_table = None
cos_endpoint = None
cos_api_key_id = None
cos_auth_endpoint = None
cos_resource_crn = None
bucket_name = None
path = None
month = None
year = None
try:
# Reading configuration parameters from .ini file.
CONFIG.read(p_app_config_file)
# PostgreSQL Username
p_user = CONFIG['ApplicationParams']['p_user']
# PostgreSQL Password
p_passwd = CONFIG['ApplicationParams']['p_passwd']
# PostgreSQL Hostname
p_host = CONFIG['ApplicationParams']['p_host']
# PostgreSQL Port Number
p_port = CONFIG['ApplicationParams']['p_port']
# PostgreSQL Database Name
p_db = CONFIG['ApplicationParams']['p_db']
# PostgreSQL Table Name
p_table = CONFIG['ApplicationParams']['p_table']
# IBM Cloud Object Storage (COS) endpoint
cos_endpoint = CONFIG['ApplicationParams']['cos_endpoint']
# IBM COS api key id
cos_api_key_id = CONFIG['ApplicationParams']['cos_api_key_id']
# IBM COS authentication endpoint
cos_auth_endpoint = CONFIG['ApplicationParams']['cos_auth_endpoint']
# IBM COS resource crn
cos_resource_crn = CONFIG['ApplicationParams']['cos_resource_crn']
# IBM COS bucket name
bucket_name = CONFIG['ApplicationParams']['bucket_name']
"""
Path of the server directory for transforming dataframes into
Parquet format
"""
path = CONFIG['ApplicationParams']['path']
# Month for which the backup needs to be taken
month = CONFIG['ApplicationParams']['month']
# Year for which the backup needs to be taken
year = CONFIG['ApplicationParams']['year']
except Exception as e:
raise Exception('Exception encountered in set_env() while '
'setting up application configuration parameters.')
return \
p_user, p_passwd, p_host, p_port, p_db, p_table, cos_endpoint,\
cos_api_key_id, cos_auth_endpoint, cos_resource_crn, bucket_name,\
path, month, year
def extract_command_params(arguments):
"""
Passing arguments from command line.
"""
# There should be only one argument
if len(arguments) != 2:
raise Exception('Illegal number of arguments. '
'Usage: python3 cos_setup.py parameter.ini')
app_config_file = arguments[1]
return app_config_file
def create_connection(
p_user, p_passwd, p_host, p_port, p_db, cos_api_key_id,
cos_resource_crn, cos_auth_endpoint, cos_endpoint):
global db_connection, cur, cos, cos_cli
"""
Function to create connections to PostgreSQL and IBM COS.
:param p_user:
:param p_passwd:
:param p_host:
:param p_port:
:param p_db:
:param cos_api_key_id:
:param cos_resource_crn:
:param cos_auth_endpoint:
:param cos_endpoint:
"""
# Create connection to PostgreSQL
db_connection = psycopg2.connect(user=p_user, password=<PASSWORD>,
host=p_host, port=p_port,
database=p_db)
cur = db_connection.cursor()
# Create resource
cos = ibm_boto3.resource("s3",
ibm_api_key_id=cos_api_key_id,
ibm_service_instance_id=cos_resource_crn,
ibm_auth_endpoint=cos_auth_endpoint,
config=Config(signature_version="oauth"),
endpoint_url=cos_endpoint
)
# Create client
cos_cli = ibm_boto3.client("s3",
ibm_api_key_id=cos_api_key_id,
ibm_service_instance_id=cos_resource_crn,
ibm_auth_endpoint=cos_auth_endpoint,
config=Config(signature_version="oauth"),
endpoint_url=cos_endpoint
)
def postgresql_process(p_table, event_date, filename):
"""
Function to copy the data into CSV files.
:param p_user:
:param p_passwd:
:param p_host:
:param p_port:
:param p_db:
:param p_table:
"""
logging.info("PostgreSQL start :")
try:
logging.info("=====Starting PostgreSQL Operation=====")
# Open a file into write mode
f_data_store = open(filename, "w")
# SQL to copy the data from database to csv file
copy_sql = "COPY (select * from " + p_table + " where event_date='" \
+ event_date + "') TO STDOUT WITH CSV DELIMITER ',' HEADER"
print("Database connection successful")
cur.copy_expert(sql=copy_sql, file=f_data_store)
# Closing the file
f_data_store.close()
logging.info("=====Ending PostgreSQL Operation=====")
except Exception as e:
logging.error('Exception message in main thread::::')
logging.error(e)
raise Exception('Exception message in main thread::::', e)
def cos_insertion(bucket_name, filename, path, year, month):
"""
Function to transform CSV into Parquet
and call upload_large_file function to upload data to
IBM COS in parallel.
:param bucket_name:
:param filename:
:param path:
:param year:
:param month:
"""
try:
logging.info("=====Starting uploading file=====")
# Create a dataframe holding a data
df = pd.read_csv(filename, dtype='unicode')
# Cast the datatype of specific column
df['service_id'] = df['service_id'].astype(str)
# Create a parquet file
parquet_filename = filename[0:-4] + '.parquet'
parquet_path = path + '/' + parquet_filename
# Store the parquet file
df.to_parquet(parquet_filename)
month_parquet_filename = year + '/' + month + '/' + parquet_filename
# Call the function to upload this parquet to IBM COS
upload_large_file(
cos_cli, bucket_name, month_parquet_filename, parquet_path)
logging.info("=====File is uploaded=====")
except Exception as e:
logging.error('Exception message in main thread::::')
logging.error(e)
raise Exception('Exception message in main thread::::', e)
def upload_large_file(cos_cli, bucket_name, item_name, file_path):
"""
Function to upload data to IBM COS buckets in parallel.
:param cos_cli:
:param bucket_name:
:param item_name:
:param file_path:
"""
print("Starting large file upload for {0} to bucket: {1}".format(
item_name,
bucket_name))
# set the chunk size to 5 MB
part_size = 1024 * 1024 * 5
# set threadhold to 5 MB
file_threshold = 1024 * 1024 * 5
# set the transfer threshold and chunk size in config settings
transfer_config = ibm_boto3.s3.transfer.TransferConfig(
multipart_threshold=file_threshold,
multipart_chunksize=part_size
)
# create transfer manager
transfer_mgr = ibm_boto3.s3.transfer.TransferManager(
cos_cli,
config=transfer_config)
try:
# initiate file upload
future = transfer_mgr.upload(file_path, bucket_name, item_name)
# wait for upload to complete
future.result()
print("Large file upload complete!")
except Exception as e:
print("Unable to complete large file upload: {0}".format(e))
finally:
transfer_mgr.shutdown()
def process_data(
p_user, p_passwd, p_host, p_port, p_db, p_table, cos_endpoint,
cos_api_key_id, cos_auth_endpoint, cos_resource_crn, bucket_name,
path, month, year):
"""
Function to take the backup of all dates for a month.
:param p_user:
:param p_passwd:
:param p_host:
:param p_port:
:param p_db:
:param p_table:
:param cos_endpoint:
:param cos_api_key_id:
:param cos_auth_endpoint:
:param cos_resource_crn:
:param bucket_name:
:param path:
:param month:
:param year:
:return:
"""
try:
logging.info("=====Starting Process=====")
# Fetching month number from dictionary
month_number = MONTHNUMBER[month]
# Fetching days in a particular month from dictionary
days = MONTHDAYS[month]
if month_number == '02':
# Handling leap year month
if int(year) % 4 == 0 and int(year) % 100 != 0 \
or int(year) % 400 == 0:
days = '29'
# Calling create connection function for PostgreSQL and IBM COS
create_connection(
p_user, p_passwd, p_host, p_port, p_db, cos_api_key_id,
cos_resource_crn, cos_auth_endpoint, cos_endpoint)
for i in range(1, int(days) + 1):
if len(str(i)) == 1:
date = '0' + str(i)
else:
date = str(i)
# Creating a date format
event_date = year + '-' + month_number + '-' + date
filename = event_date + '.csv'
print(filename)
# Calling the function to fetch the data
postgresql_process(p_table, event_date, filename)
"""
Calling the function to transform the data into Parquet
and store it into IBM COS
"""
cos_insertion(bucket_name, filename, path, year, month)
logging.info("=====Finishing Process=====")
# Closing PostgreSQL database connection
cur.close()
except Exception as e:
logging.error('Exception message in main thread::::')
logging.error(e)
raise Exception('Exception message in main thread::::', e)
def main():
"""
Usage: python3 cos_setup.py parameter.ini
:return:
"""
try:
logging.info("==== Processing Started ====")
# Extract command line parameters
p_app_config_file = extract_command_params(sys.argv)
# Set environment
p_user, p_passwd, p_host, p_port, p_db, p_table, cos_endpoint, \
cos_api_key_id, cos_auth_endpoint, cos_resource_crn, bucket_name,\
path, month, year = set_env(p_app_config_file)
# Process Data
logging.info(cos_endpoint)
logging.info(cos_api_key_id)
logging.info(cos_auth_endpoint)
logging.info(cos_resource_crn)
process_data(
p_user, p_passwd, p_host, p_port, p_db, p_table, cos_endpoint,
cos_api_key_id, cos_auth_endpoint, cos_resource_crn,
bucket_name, path, month, year)
logging.info("==== Processing Ended ====")
except Exception as e:
logging.error('Exception message in main thread::::')
logging.error(e)
raise Exception('Exception message in main thread::::', e)
if __name__ == '__main__':
main()
logging.shutdown()
| StarcoderdataPython |
3293514 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from keystoneauth1 import adapter
from openstack.common import tag
from openstack import exceptions
from openstack import resource
from openstack.tests.unit import base
from openstack.tests.unit.test_resource import FakeResponse
class TestTagMixin(base.TestCase):
def setUp(self):
super(TestTagMixin, self).setUp()
self.service_name = "service"
self.base_path = "base_path"
class Test(resource.Resource, tag.TagMixin):
service = self.service_name
base_path = self.base_path
resources_key = 'resources'
allow_create = True
allow_fetch = True
allow_head = True
allow_commit = True
allow_delete = True
allow_list = True
self.test_class = Test
self.request = mock.Mock(spec=resource._Request)
self.request.url = "uri"
self.request.body = "body"
self.request.headers = "headers"
self.response = FakeResponse({})
self.sot = Test.new(id="id", tags=[])
self.sot._prepare_request = mock.Mock(return_value=self.request)
self.sot._translate_response = mock.Mock()
self.session = mock.Mock(spec=adapter.Adapter)
self.session.get = mock.Mock(return_value=self.response)
self.session.put = mock.Mock(return_value=self.response)
self.session.delete = mock.Mock(return_value=self.response)
def test_tags_attribute(self):
res = self.sot
self.assertTrue(hasattr(res, 'tags'))
self.assertIsInstance(res.tags, list)
def test_fetch_tags(self):
res = self.sot
sess = self.session
mock_response = mock.Mock()
mock_response.status_code = 200
mock_response.links = {}
mock_response.json.return_value = {'tags': ['blue1', 'green1']}
sess.get.side_effect = [mock_response]
result = res.fetch_tags(sess)
# Check tags attribute is updated
self.assertEqual(['blue1', 'green1'], res.tags)
# Check the passed resource is returned
self.assertEqual(res, result)
url = self.base_path + '/' + res.id + '/tags'
sess.get.assert_called_once_with(url)
def test_set_tags(self):
res = self.sot
sess = self.session
# Set some initial value to check rewrite
res.tags.extend(['blue_old', 'green_old'])
result = res.set_tags(sess, ['blue', 'green'])
# Check tags attribute is updated
self.assertEqual(['blue', 'green'], res.tags)
# Check the passed resource is returned
self.assertEqual(res, result)
url = self.base_path + '/' + res.id + '/tags'
sess.put.assert_called_once_with(
url,
json={'tags': ['blue', 'green']}
)
def test_remove_all_tags(self):
res = self.sot
sess = self.session
# Set some initial value to check removal
res.tags.extend(['blue_old', 'green_old'])
result = res.remove_all_tags(sess)
# Check tags attribute is updated
self.assertEqual([], res.tags)
# Check the passed resource is returned
self.assertEqual(res, result)
url = self.base_path + '/' + res.id + '/tags'
sess.delete.assert_called_once_with(url)
def test_remove_single_tag(self):
res = self.sot
sess = self.session
res.tags.extend(['blue', 'dummy'])
result = res.remove_tag(sess, 'dummy')
# Check tags attribute is updated
self.assertEqual(['blue'], res.tags)
# Check the passed resource is returned
self.assertEqual(res, result)
url = self.base_path + '/' + res.id + '/tags/dummy'
sess.delete.assert_called_once_with(url)
def test_check_tag_exists(self):
res = self.sot
sess = self.session
sess.get.side_effect = [FakeResponse(None, 202)]
result = res.check_tag(sess, 'blue')
# Check tags attribute is updated
self.assertEqual([], res.tags)
# Check the passed resource is returned
self.assertEqual(res, result)
url = self.base_path + '/' + res.id + '/tags/blue'
sess.get.assert_called_once_with(url)
def test_check_tag_not_exists(self):
res = self.sot
sess = self.session
mock_response = mock.Mock()
mock_response.status_code = 404
mock_response.links = {}
mock_response.content = None
sess.get.side_effect = [mock_response]
# ensure we get 404
self.assertRaises(
exceptions.NotFoundException,
res.check_tag,
sess,
'dummy',
)
def test_add_tag(self):
res = self.sot
sess = self.session
# Set some initial value to check add
res.tags.extend(['blue', 'green'])
result = res.add_tag(sess, 'lila')
# Check tags attribute is updated
self.assertEqual(['blue', 'green', 'lila'], res.tags)
# Check the passed resource is returned
self.assertEqual(res, result)
url = self.base_path + '/' + res.id + '/tags/lila'
sess.put.assert_called_once_with(url)
def test_tagged_resource_always_created_with_empty_tag_list(self):
res = self.sot
self.assertIsNotNone(res.tags)
self.assertEqual(res.tags, list())
| StarcoderdataPython |
1771652 | <reponame>wu-uw/OpenCompetition<filename>src/tabular/feature_engineering/feature_generator/symbolic_learning.py
# encoding:utf-8
import pandas as pd
from gplearn.genetic import SymbolicTransformer
from sklearn.model_selection import train_test_split
class GPConfig:
def __init__(self, feature_cols, target_col, generation=1000, population_size=5000, hall_of_fame=100,
n_components=10,
parsimony_coefficient=0.0005, max_samples=0.9):
self.generation = generation
self.population_size = population_size
self.hall_of_fame = hall_of_fame
self.n_components = n_components
self.parsimony_coefficient = parsimony_coefficient
self.max_samples = max_samples
self.function_set = ['add', 'sub', 'mul', 'div', 'log', 'sqrt', 'abs', 'neg', 'max', 'min']
self.feature_cols = feature_cols
self.target_col = target_col
def get_feature_symbolic_learning(df, gp_config):
"""
Parameters
----------
df: pd.DataFrame,the input dataFrame.
gp_config: GPConfig object, the config object of gplearn.SymbolicTransformer.
Returns
-------
df_t: pd.DataFrame, df with the features of SymbolicTransformer trans.
The new features named like 'symbolic_component_{0 to n}'(n is the n_components)
"""
gp = SymbolicTransformer(generations=gp_config.generation, population_size=gp_config.population_size,
hall_of_fame=gp_config.hall_of_fame, n_components=gp_config.n_components,
function_set=gp_config.function_set,
parsimony_coefficient=gp_config.parsimony_coefficient,
max_samples=gp_config.max_samples, verbose=1,
random_state=0, n_jobs=3)
X = df[gp_config.feature_cols]
y = df[gp_config.target_col]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=True)
gp.fit(X_train, y_train)
names = ["symbolic_component_" + str(i) for i in range(gp_config.n_components)]
res = pd.DataFrame(gp.transform(X),columns=names)
df_t = pd.concat([df,res],axis=1)
return df_t
| StarcoderdataPython |
3310261 | <gh_stars>1-10
"""
LeetCode Problem: 1239. Maximum Length of a Concatenated String with Unique Characters
Link: https://leetcode.com/problems/maximum-length-of-a-concatenated-string-with-unique-characters/
Language: Python
Written by: <NAME>
"""
class Solution:
def maxLength(self, arr: List[str]) -> int:
def isUnique(string):
if len(string) == len(set(string)):
return True
else:
return False
def helper(arr, comb, start):
if isUnique(comb):
self.ans = max(self.ans, len(comb))
else: # has duplicates
return
for i in range(start, len(arr)):
helper(arr, comb + arr[i], i + 1)
self.ans = 0
helper(arr, '', 0)
return self.ans | StarcoderdataPython |
1723624 | <gh_stars>1-10
#!/usr/bin/env python3
actions = {
"delete-alarms",
"describe-alarm-history",
"describe-alarms",
"describe-alarms-for-metric",
"disable-alarm-actions",
"enable-alarm-actions",
"get-metric-statistics",
"list-metrics",
"put-metric-alarm",
"put-metric-data",
"set-alarm-state"
}
| StarcoderdataPython |
183590 | <filename>src/api/models/article/author.py
from ..base import BaseModel
from ..href import HrefModel
class AuthorModel(BaseModel):
def init(self):
self.add_property('name')
self.add_model_property('link', HrefModel)
| StarcoderdataPython |
182104 | <filename>alexber/utils/deploys.py
"""
This module is usable in your deployment script. See also `fabs` module.
See here https://medium.com/analytics-vidhya/my-ymlparsers-module-88221edf16a6 for documentation.
This module depends on some 3-rd party dependencies, in order to use it, you should have installed first. To do it
run `python3 -m pip install alex-ber-utils[yml]`.
"""
from collections import OrderedDict
from pathlib import Path
from collections import deque
try:
import alexber.utils.ymlparsers as ymlparsers
except ImportError:
pass
from .init_app_conf import merge_list_value_in_dicts, conf
from .parsers import is_empty, parse_sys_args
def split_path(filename, split_dirname):
"""
Split filename in 2 part parts by split_dirname.
first_part will ends with split_dirname.
second_part will start immediately after split_dirname.
if split_dirname is not in filename or split_dirname is None, the behaviour is undefined.
if split_dirname exists twice in the path, the last one will be used for splitting.
:param filename: path to filename, can be relative or absolute.
:param split_dirname: directory name in filename that will be used to split the path
:return: (first_part, second_part) - such that first_part+second_part is absolute path.
"""
if split_dirname is None:
raise FileNotFoundError("can't split on None")
parts = Path(filename).parts
split_dirname = str(split_dirname)
if split_dirname not in parts:
raise FileNotFoundError(f"{split_dirname} is not found in {filename}")
length = len(parts)
use = False
second_parts = deque(maxlen=length)
first_parts = deque(maxlen=length)
for part in reversed(parts):
if part == split_dirname:
use = True
if use:
first_parts.appendleft(part)
else:
second_parts.appendleft(part)
return Path(*first_parts), Path(*second_parts)
def add_to_zip_copy_function(split_dirname=None, zf=None):
"""
Factory method that returns closure that can be used as copy_function param in shutil.copytree()
:param split_dirname: path from this directory and below will be used in archive.
:param zf: zipfile.ZipFile
:return:
"""
def add_to_zip_file(src,dst):
"""
Closure that can be used as copy_function param in shutil.copytree()
shutil.copytree() is used to add from src to archive with entries evaluted according to split_dirname.
:param src: soource file to use in entry in archive
:param dst: ignored, see split_dirname in enclused function
:return:
"""
_, last_parts = split_path(src, split_dirname)
dest_path = Path(split_dirname) / last_parts
zf.write(str(src), str(dest_path))
return add_to_zip_file
def load_config(argumentParser=None, args=None):
"""
Simplified method for parsing yml configuration file with optionally overrided profiles only.
See alexber.utils.init_app_conf.parse_config() for another variant.
:param argumentParser with instruction how to interpret args. If None, the default one will be instantiated.
:param args: if not None will be used instead of sys.argv
:return:
"""
if ymlparsers.HiYaPyCo.jinja2ctx is None:
raise ValueError("ymlparsers.HiYaPyCo.jinja2ctx can't be None")
params, sys_d = parse_sys_args(argumentParser, args)
config_file = params.config_file
full_path = Path(config_file).resolve() # relative to cwd
with ymlparsers.DisableVarSubst():
default_d = ymlparsers.load([str(full_path)])
profiles = merge_list_value_in_dicts(sys_d, default_d, conf.GENERAL_KEY, conf.PROFILES_KEY)
b = is_empty(profiles)
if not b:
# merge to default_d
general_d = default_d.setdefault(conf.GENERAL_KEY, OrderedDict())
general_d[conf.PROFILES_KEY] = profiles
return full_path, default_d
| StarcoderdataPython |
1679324 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 05 13:47:52 2018
@author: a002028
"""
import pandas as pd
class AttributeDict(dict):
"""Base class for attribute dictionaries."""
def __init__(self):
"""Initialize."""
super().__init__()
def _add_arrays_to_entries(self, **entries):
"""Add arrays as attributes to self."""
for key, array in entries.items():
# TODO Check if array is needed if only one value
setattr(self, key, array)
def add_entries(self, **entries):
"""Turn elements in arrays into attributes with a corresponding official field name."""
for key, array in entries.items():
setattr(self, key, key)
setattr(self, key.lower(), key)
if isinstance(array, pd.core.series.Series):
array = array.values
for value in array:
if not pd.isnull(value):
setattr(self, value, key)
setattr(self, value.lower(), key)
def add_entries_from_keylist(self, data, from_combo_keys=None, from_synonyms=None, to_key=''):
"""Create mapping attributes for ShipMapping().
Args:
data (dict):
from_combo_keys (list): list of keys
from_synonyms (list): list of keys
to_key (str):
"""
from_combo_keys = from_combo_keys or []
from_synonyms = from_synonyms or []
for i, value in enumerate(data[to_key]):
setattr(self, value, value)
if any(from_combo_keys):
setattr(self, ''.join([data[key][i].zfill(2) for key in from_combo_keys]), value)
if any(from_synonyms):
for key in from_synonyms:
setattr(self, data[key][i], value)
setattr(self, data[key][i].upper(), value)
def keys(self):
"""Return list of keys from self attributes."""
return list(self.__dict__.keys())
def get(self, key):
"""Get attribute from self using key."""
try:
return getattr(self, key)
except AttributeError:
try:
return getattr(self, key.lower())
except Exception:
if '[' in key:
try:
key = key.split('[')[0].strip()
return getattr(self, key.lower())
except Exception:
# print('No mapping found for key: ' + key)
return key
else:
# print('No mapping found for key: ' + key)
return key
def get_list(self, key_list):
"""Get list of values from self attributes based on key_list."""
return [self.get(key) for key in key_list]
def get_mapping_dict(self, key_list):
"""Get dictionary from self attributes based on key_list."""
return dict([(key, self.get(key)) for key in key_list])
def __getitem__(self, key):
"""Get item from self. If not key exists return None"""
return getattr(self, key)
class ParameterMapping(AttributeDict):
"""Load file to map data fields and parameters to a standard setting format."""
def __init__(self):
"""Initialize."""
super().__init__()
def map_parameter_list(self, para_list, ext_list=False):
"""Return mapped parameter list.
Args:
para_list (list): list of parameters
ext_list (bool): False or True, NotImplemented
"""
return self.get_list(para_list)
def get_parameter_mapping(self, para_list, ext_list=False):
"""Get a dictionary with mapped parameters from the given para_list."""
return self.get_mapping_dict(para_list)
class ShipMapping(AttributeDict):
"""Load file to map 2sign-cntry and 2sign-shipc to 4sign-shipc (ICES / SMHI)."""
def __init__(self):
"""Initialize."""
super().__init__()
def map_cntry_and_shipc(self, cntry=None, shipc=None):
"""Get SHIP code (according to standard of ICES)."""
return self.get(cntry + shipc)
def map_shipc(self, cntry_shipc):
"""Map SHIP code (according to standard of ICES)."""
return self.get(cntry_shipc)
| StarcoderdataPython |
3208804 | from imagedt.decorator import time_cost
import cv2 as cv
def brg2rgb(image):
(r, g, b) = cv.split(image)
return cv.merge([b, g, r])
def orb_detect(image_source, image_aim):
orb = cv.ORB_create()
kp1, des1 = orb.detectAndCompute(image_source, None)
kp2, des2 = orb.detectAndCompute(image_aim, None)
bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
image_result = cv.drawMatches(image_source, kp1, image_aim, kp2, matches[:100], None)
return brg2rgb(image_result)
@time_cost
def sift_detect(image_1, image_2, detector='surf'):
if detector.startswith('si'):
print("sift detector....")
surf = cv.xfeatures2d.SURF_create()
else:
print("surf detector.......")
surf = cv.xfeatures2d.SURF_create()
kp1, des1 = surf.detectAndCompute(image_1, None)
kp2, des2 = surf.detectAndCompute(image_2, None)
bf = cv.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
good = [[m] for m, n in matches if m.distance < 0.5 * n.distance]
image_3 = cv.drawMatchesKnn(image_1, kp1, image_2, kp2, good, None, flags=2)
return image_3
# return brg2rgb(image_3)
if __name__ == '__main__':
image_a = cv.imread("image/source_image.jpg")
image_b = cv.imread("image/aim_image2.jpg")
image = sift_detect(image_a, image_b)
image = cv.resize(image, (1280, 720), interpolation=cv.INTER_CUBIC)
cv.imshow("result", image)
cv.waitKey(0)
cv.destroyAllWindows()
# plt.imshow(image)
# plt.show()
| StarcoderdataPython |
1666593 | <reponame>lucasw/imgui_ros
# Copyright 2018 <NAME>
import launch
import launch_ros.actions
import os
import yaml
from ament_index_python.packages import get_package_share_directory
def generate_launch_description():
# image_manip_dir = get_package_share_directory('image_manip')
# print('image_manip dir ' + image_manip_dir)
launches = []
node_name = 'imgui_ros'
params = dict(
name = 'imgui_ros demo',
width = 1000,
height = 800,
)
node = launch_ros.actions.Node(
package='imgui_ros', node_executable='imgui_ros_node', output='screen',
node_name=node_name,
# arguments=[image_manip_dir + "/data/mosaic.jpg"])
# arguments=['__params:=' + param_file],
parameters=[params],
remappings=[])
launches.append(node)
node = launch_ros.actions.Node(
package='imgui_ros', node_executable='graph.py', output='screen')
launches.append(node)
if False:
node = launch.actions.IncludeLaunchDescription(
launch.launch_description_sources.PythonLaunchDescriptionSource(
get_package_share_directory('imgui_ros') + '/launch/shaders_launch.py'))
launches.append(node)
node = launch_ros.actions.Node(
package='imgui_ros', node_executable='pub_shape.py', output='screen',
)
launches.append(node)
node = launch_ros.actions.Node(
package='imgui_ros', node_executable='cameras.py', output='screen',
)
launches.append(node)
return launch.LaunchDescription(launches)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.