ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a528a122a1552b2c144a7fdab33601cc76a0c1f | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List
from fairseq import utils
from fairseq.models.roberta import (
RobertaModel,
RobertaLMHead,
roberta_base_architecture,
roberta_large_architecture,
)
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
@register_model('mpnet')
class MPNet(RobertaModel):
def __init__(self, args, encoder):
super().__init__(args, encoder)
def task_compute(self, task='mlm', **kwargs):
if task == 'mlm':
return self.compute_mlm(**kwargs)
elif task == 'plm':
return self.compute_plm(**kwargs)
else:
return self.compute_mpnet(**kwargs)
def compute_mlm(self, src_tokens, src_lengths, positions, pred_size, **kwargs):
sz = src_tokens.size(1)
emb = self.encode_emb(self.decoder.sentence_encoder, src_tokens, positions)
x = reverse_tensor(emb)
positions_bias = self.encode_relative_emb(self.decoder.sentence_encoder, positions)
for layer in self.decoder.sentence_encoder.layers:
x, _ = layer(x, positions_bias=positions_bias)
x = self.maybe_final_norm(self.decoder.sentence_encoder, x)
x = reverse_tensor(x)
x = self.output_layer(x[:, sz-pred_size:])
return x
def compute_plm(self, src_tokens, src_lengths, positions, pred_size, **kwargs):
emb = self.encode_emb(self.decoder.sentence_encoder, src_tokens, positions)
x = reverse_tensor(emb)
c, q = split_tensor(x, pred_size)
content_position_bias = self.encode_relative_emb(
self.decoder.sentence_encoder, positions[:, :-pred_size]
)
if content_position_bias is not None:
query_position_bias = content_position_bias[:, -pred_size:].contiguous()
else:
query_position_bias = None
sz = c.size(0)
query_mask, content_mask = make_query_and_content_mask(src_tokens, sz, pred_size, kind='PLM')
for i, layer in enumerate(self.decoder.sentence_encoder.layers):
c, q = encode_two_stream_attn(
layer, c, q, content_mask, query_mask, content_position_bias, query_position_bias,
)
q = self.maybe_final_norm(self.decoder.sentence_encoder, q)
q = reverse_tensor(q)
x = self.output_layer(q)
return x
def compute_mpnet(self, src_tokens, src_lengths, positions, pred_size, return_mlm=False, **kwargs):
emb = self.encode_emb(self.decoder.sentence_encoder, src_tokens, positions)
x = reverse_tensor(emb)
c, q = split_tensor(x, pred_size)
content_position_bias = self.encode_relative_emb(self.decoder.sentence_encoder, positions[:, :-pred_size])
if content_position_bias is not None:
query_position_bias = content_position_bias[:, -pred_size:].contiguous()
else:
query_position_bias = None
sz = c.size(0) - pred_size
query_mask, content_mask = make_query_and_content_mask(src_tokens, sz, pred_size)
for i, layer in enumerate(self.decoder.sentence_encoder.layers):
c, q = encode_two_stream_attn(
layer, c, q, content_mask, query_mask, content_position_bias, query_position_bias,
)
q = self.maybe_final_norm(self.decoder.sentence_encoder, q)
q = reverse_tensor(q)
x = self.output_layer(q)
if return_mlm is True:
c = c[-pred_size:]
c = self.maybe_final_norm(self.decoder.sentence_encoder, c)
c = reverse_tensor(c)
c = self.output_layer(c)
return x, c
return x
@staticmethod
def encode_emb(self, src_tokens, positions=None):
x = self.embed_tokens(src_tokens)
if self.embed_scale is not None:
x *= self.embed_scale
if positions is not None:
x += F.embedding(positions + 2, self.embed_positions.weight, self.padding_idx)
if self.emb_layer_norm is not None and not self.normalize_before:
x = self.emb_layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
return x
@staticmethod
def maybe_final_norm(self, x):
if self.emb_layer_norm is not None and self.normalize_before:
return self.emb_layer_norm(x)
return x
@staticmethod
def encode_relative_emb(self, positions):
if not self.relative_attention_bias:
return None
qlen, klen = positions.size(1), positions.size(1)
context_position = positions[:, :, None]
memory_position = positions[:, None, :]
relative_position = memory_position - context_position
rp_bucket = self.relative_position_bucket(
relative_position,
num_buckets=self.relative_attention_num_buckets,
)
rp_bucket = rp_bucket.to(positions.device)
values = self.relative_attention_bias(rp_bucket)
values = values.permute(0, 3, 1, 2).contiguous() # [bsz, head, qlen, klen]
values = values.view(-1, qlen, klen)
return values
def reverse_tensor(x):
return x.transpose(0, 1)
def split_tensor(x, split_size):
sz = x.size(0) - split_size
return x[:sz].contiguous(), x[sz:].contiguous()
def encode_two_stream_attn(
self,
c,
q,
content_mask: torch.Tensor = None,
query_mask: torch.Tensor = None,
content_position_bias: torch.Tensor = None,
query_position_bias: torch.Tensor = None,
):
def reuse_fn(x, residual):
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
return x
residual_c = c
residual_q = q
c = self.maybe_layer_norm(self.self_attn_layer_norm, c, before=True)
q = self.maybe_layer_norm(self.self_attn_layer_norm, q, before=True)
c, q = two_stream_self_attention(
self.self_attn,
query=[c, q],
key=c,
value=c,
query_mask=query_mask,
content_mask=content_mask,
query_position_bias=query_position_bias,
content_position_bias=content_position_bias,
)
c = reuse_fn(c, residual_c)
q = reuse_fn(q, residual_q)
return c, q
def two_stream_self_attention(
self,
query: torch.Tensor,
key: torch.Tensor = None,
value: torch.Tensor = None,
query_mask: torch.Tensor = None,
content_mask: torch.Tensor = None,
query_position_bias: torch.Tensor = None,
content_position_bias: torch.Tensor = None,
):
c, q = query
bsz, embed_dim = key.size(1), key.size(2)
def transpose_fn(x):
return x.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
def fill_mask(attn_weights, attn_mask):
return attn_weights.masked_fill(
attn_mask.unsqueeze(0),
float('-inf')
)
def attn_fn(_q, k, v, mask=None, bias=None):
_q = transpose_fn(self.scaling * self.in_proj_q(_q))
attn_weights = torch.bmm(_q, k.transpose(1, 2))
if bias is not None:
attn_weights += bias
if mask is not None:
attn_weights = fill_mask(attn_weights, mask)
attn_weights = utils.softmax(
attn_weights, dim=-1,
).type_as(attn_weights)
attn_weights = F.dropout(attn_weights, p=self.dropout, training=self.training)
attn = torch.bmm(attn_weights, v)
attn = attn.transpose(0, 1).contiguous().view(-1, bsz, embed_dim)
return self.out_proj(attn)
k = transpose_fn(self.in_proj_k(key))
v = transpose_fn(self.in_proj_v(value))
c = attn_fn(c, k, v, mask=content_mask, bias=content_position_bias)
q = attn_fn(q, k, v, mask=query_mask, bias=query_position_bias)
return c, q
def make_query_and_content_mask(tensor, a, b, kind='MPLM'):
'''
Query Mask:
| <- PLM -> | | <- MPNet -> |
[ 0 0 0 0 1 1 1 ] [ 0 0 0 0 1 1 1 0 0 0 ]
[ 0 0 0 0 0 1 1 ] [ 0 0 0 0 0 1 1 1 0 0 ]
[ 0 0 0 0 0 0 1 ] [ 0 0 0 0 0 0 1 1 1 0 ]
Content Mask:
| <- PLM -> | | <- MPNet -> |
x x x x x x x m m m
1 2 3 4 5 6 7 5 6 7
[ 0 0 0 0 1 1 1 ] [ 0 0 0 0 1 1 1 0 0 0 ]
[ 0 0 0 0 1 1 1 ] [ 0 0 0 0 1 1 1 0 0 0 ]
[ 0 0 0 0 1 1 1 ] [ 0 0 0 0 1 1 1 0 0 0 ]
[ 0 0 0 0 1 1 1 ] [ 0 0 0 0 1 1 1 0 0 0 ]
[ 0 0 0 0 0 1 1 ] [ 0 0 0 0 0 1 1 1 0 0 ]
[ 0 0 0 0 0 0 1 ] [ 0 0 0 0 0 0 1 1 1 0 ]
[ 0 0 0 0 0 0 0 ] [ 0 0 0 0 0 0 0 1 1 1 ]
[ 0 0 0 0 1 1 1 0 0 0 ]
[ 0 0 0 0 1 1 1 0 0 0 ]
[ 0 0 0 0 1 1 1 0 0 0 ]
'''
def make_query_mask():
mask = torch.triu(torch.ones(b, b), 0)
mask = (torch.ones(b, a - b), 1 - mask) if kind is 'PLM' else (torch.ones(b, a - b), 1 - mask, mask)
return torch.cat(mask, dim=-1).eq(0)
def make_content_mask():
mask = [torch.zeros(a - b, b), torch.tril(torch.ones(b, b), 0)]
if kind is not 'PLM':
mask.append(torch.zeros(b, b))
mask = torch.cat(mask, dim=0)
mask = (torch.ones(a, a - b), mask) if kind is 'PLM' else (torch.ones(a + b, a - b), mask, 1 - mask)
return torch.cat(mask, dim=-1).eq(0)
return make_query_mask().to(tensor.device), make_content_mask().to(tensor.device)
@register_model_architecture('mpnet', 'mpnet_base')
def mpnet_base_architecture(args):
roberta_base_architecture(args)
@register_model_architecture('mpnet', 'mpnet_rel_base')
def mpnet_rel_base_architecture(args):
args.use_relative_positions = getattr(args, 'use_relative_positions', True)
mpnet_base_architecture(args)
@register_model_architecture('mpnet', 'mpnet_large')
def mpnet_large_architecture(args):
roberta_large_architecture(args)
|
py | 1a528a6b8183087e66bbf56be3dc33d9ace41d76 |
# Unicode and Emoji
# importing necessary library
from tkinter import * # from tkinter we import everything
import tkinter as tk
from tkinter import ttk
from PIL import Image, ImageTk
import tkinter.messagebox as mbox
import emoji
import pandas as pd
data = pd.read_csv('emoji_df.csv')
emoji1 = data['emoji'].tolist()
code1 = data['codepoints'].tolist()
# Main Window
frame = Tk()
frame.title('Unicode and Emoji')
frame.geometry('950x700')
# frame.configure(bg = "white")
# image on the main window
path = "Images/front.jpg"
# Creates a Tkinter-compatible photo image, which can be used everywhere Tkinter expects an image object.
img1 = ImageTk.PhotoImage(Image.open(path))
# The Label widget is a standard Tkinter widget used to display a text or image on the screen.
panel = tk.Label(frame, image = img1)
panel.place(x = 55, y = 110)
# starting label
start1 = Label(frame, text='UNICODE & EMOJI', font=("Arial", 55,"underline"),fg="magenta")
start1.place(x=130,y=10)
def start_fun():
frame.destroy()
# creating an exit button
prevB = Button(frame, text='START', command=start_fun, font=("Arial", 25), bg = "light green", fg = "blue", borderwidth=3, relief="raised")
prevB.place(x = 120, y = 590)
# defined exit_win function, to show a exit dialog box when tried to exit
def exit_win():
if mbox.askokcancel("Exit", "Do you want to exit?"):
frame.destroy()
# creating an exit button
prevB = Button(frame, text='EXIT', command=exit_win, font=("Arial", 25), bg = "red", fg = "blue", borderwidth=3, relief="raised")
prevB.place(x = 700, y = 590)
# this is done to show the exit dialog box when tried to exit from the main window, using the top-roght close button of titlebar
frame.protocol("WM_DELETE_WINDOW", exit_win)
frame.mainloop()
# Main Window
frame1 = Tk()
frame1.title('Unicode and Emoji')
frame1.geometry('950x700')
# image on the main window
path1 = "Images/second.jpg"
# Creates a Tkinter-compatible photo image, which can be used everywhere Tkinter expects an image object.
img2 = ImageTk.PhotoImage(Image.open(path1))
# The Label widget is a standard Tkinter widget used to display a text or image on the screen.
panel1 = tk.Label(frame1, image = img2)
panel1.place(x = 465, y = 110)
# starting label
start1 = Label(frame1, text='UNICODE & EMOJI', font=("Arial", 55,"underline"),fg="magenta")
start1.place(x=130,y=10)
# starting label
start1 = Label(frame1, text='Emoji to\nUnicode', font=("Arial", 40),fg="green")
start1.place(x=100,y=120)
# starting label
start1 = Label(frame1, text='Emoji', font=("Arial", 30),fg="brown")
start1.place(x=50,y=250)
# emoji Box
l1_entry = Entry(frame1, font=("Arial", 25), fg='brown', bg="light yellow", borderwidth=3, width=18)
l1_entry.place(x=50, y=300)
# starting label
start1 = Label(frame1, text='Unicode', font=("Arial", 30),fg="brown")
start1.place(x=50,y=400)
# unicode Box
l2_entry = Entry(frame1, font=("Arial", 25), fg='brown', bg="light yellow", borderwidth=3, width=18)
l2_entry.place(x=50, y=450)
# starting label
start1 = Label(frame1, text='Unicode\nto Emoji', font=("Arial", 40),fg="green")
start1.place(x=620,y=120)
# starting label
start1 = Label(frame1, text='Unicode', font=("Arial", 30),fg="brown")
start1.place(x=550,y=250)
# unicode Box
r1_entry = Entry(frame1, font=("Arial", 25), fg='brown', bg="light yellow", borderwidth=3, width=18)
r1_entry.place(x=550, y=300)
# starting label
start1 = Label(frame1, text='Emoji', font=("Arial", 30),fg="brown")
start1.place(x=550,y=400)
# emoji Box
r2_entry = Entry(frame1, font=("Arial", 25), fg='brown', bg="light yellow", borderwidth=3, width=18)
r2_entry.place(x=550, y=450)
def uni_fun():
# emoji_entered = str(l1_entry.get())
# uc_sentence = emoji_entered.encode('unicode-escape')
# l2_entry.insert(0,uc_sentence)
emoji_entered = str(l1_entry.get())
for i in range(0,len(emoji1)):
if emoji1[i]==emoji_entered:
l2_entry.delete(0,END)
l2_entry.insert(0, code1[i])
break
def emo_fun():
code_entered = str(r1_entry.get())
for i in range(0, len(code1)):
if code1[i] == code_entered:
r2_entry.delete(0,END)
r2_entry.insert(0, emoji1[i])
break
# creating an exit button
prevB = Button(frame1, text='GET UNICODE', command=uni_fun, font=("Arial", 25), bg = "orange", fg = "blue", borderwidth=3, relief="raised")
prevB.place(x = 70, y = 550)
# creating an exit button
prevB = Button(frame1, text='GET EMOJI', command=emo_fun, font=("Arial", 25), bg = "orange", fg = "blue", borderwidth=3, relief="raised")
prevB.place(x = 650, y = 550)
# defined exit_win function, to show a exit dialog box when tried to exit
def exit_win1():
if mbox.askokcancel("Exit", "Do you want to exit?"):
frame1.destroy()
# creating an exit button
prevB = Button(frame1, text='EXIT', command=exit_win1, font=("Arial", 25), bg = "red", fg = "blue", borderwidth=3, relief="raised")
prevB.place(x = 420, y = 600)
# this is done to show the exit dialog box when tried to exit from the main window, using the top-roght close button of titlebar
frame1.protocol("WM_DELETE_WINDOW", exit_win1)
frame1.mainloop() |
py | 1a528c16c3afc9b8da056e81603b9786cf2d25d0 | # Deprecated we should remove it in #5221
import json
import graphene
import pytest
from saleor.graphql.tests.utils import assert_no_permission, get_graphql_content
PRIVATE_META_NAMESPACE = ""
PUBLIC_META_NAMESPACE = ""
META_CLIENT = ""
PRIVATE_KEY = "name"
PRIVATE_VALUE = "Bond"
PUBLIC_KEY = "purpose"
PUBLIC_VALUE = "42"
@pytest.fixture
def customer_with_meta(customer_user):
customer_user.store_value_in_private_metadata(items={PRIVATE_KEY: PRIVATE_VALUE})
customer_user.store_value_in_metadata(items={PUBLIC_KEY: PUBLIC_VALUE})
customer_user.save()
return customer_user
GET_PRIVATE_META_QUERY = """
query UserMeta($id: ID!) {
user(id: $id) {
email
privateMeta {
namespace
clients {
name
metadata {
key
value
}
}
}
}
}
"""
def test_get_private_meta(
staff_api_client, permission_manage_users, customer_with_meta
):
user_id = graphene.Node.to_global_id("User", customer_with_meta.id)
variables = {"id": user_id}
response = staff_api_client.post_graphql(
GET_PRIVATE_META_QUERY, variables, permissions=[permission_manage_users]
)
meta = get_graphql_content(response)["data"]["user"]["privateMeta"][0]
assert meta["namespace"] == PRIVATE_META_NAMESPACE
assert meta["clients"] == [
{
"metadata": [{"key": PRIVATE_KEY, "value": PRIVATE_VALUE}],
"name": META_CLIENT,
}
]
MY_PRIVATE_META_QUERY = """
{
me {
email
privateMeta {
namespace
clients {
name
metadata {
key
value
}
}
}
}
}
"""
def test_user_has_no_access_to_private_meta(user_api_client, customer_with_meta):
response = user_api_client.post_graphql(MY_PRIVATE_META_QUERY)
data = json.loads(response.content.decode("utf8"))
assert data["errors"] is not None
assert data["data"]["me"] is None
UPDATE_PRIVATE_METADATA_MUTATION = """
mutation UserUpdatePrivateMetadata($id: ID!, $input: MetaInput!) {
userUpdatePrivateMetadata(
id: $id
input: $input
) {
user {
privateMeta {
namespace
clients {
name
metadata {
key
value
}
}
}
}
}
}
"""
def test_update_private_metadata_through_mutation(
staff_api_client, permission_manage_users, customer_with_meta
):
NEW_VALUE = "NEW_VALUE"
user_id = graphene.Node.to_global_id("User", customer_with_meta.id)
variables = {
"id": user_id,
"input": {
"namespace": PRIVATE_META_NAMESPACE,
"clientName": META_CLIENT,
"key": PRIVATE_KEY,
"value": NEW_VALUE,
},
}
response = staff_api_client.post_graphql(
UPDATE_PRIVATE_METADATA_MUTATION,
variables,
permissions=[permission_manage_users],
)
meta = get_graphql_content(response)["data"]["userUpdatePrivateMetadata"]["user"][
"privateMeta"
][0]
assert meta["namespace"] == PRIVATE_META_NAMESPACE
assert meta["clients"] == [
{"metadata": [{"key": PRIVATE_KEY, "value": NEW_VALUE}], "name": META_CLIENT}
]
def test_add_new_key_value_pair_to_private_metadata_using_mutation(
staff_api_client, permission_manage_users, customer_with_meta
):
NEW_KEY = "NEW_KEY"
NEW_VALUE = "NEW_VALUE"
user_id = graphene.Node.to_global_id("User", customer_with_meta.id)
variables = {
"id": user_id,
"input": {
"namespace": PRIVATE_META_NAMESPACE,
"clientName": META_CLIENT,
"key": NEW_KEY,
"value": NEW_VALUE,
},
}
response = staff_api_client.post_graphql(
UPDATE_PRIVATE_METADATA_MUTATION,
variables,
permissions=[permission_manage_users],
)
meta = get_graphql_content(response)["data"]["userUpdatePrivateMetadata"]["user"][
"privateMeta"
][0]
expected_metadata = [
{"key": NEW_KEY, "value": NEW_VALUE},
{"key": PRIVATE_KEY, "value": PRIVATE_VALUE},
]
assert meta["namespace"] == PRIVATE_META_NAMESPACE
assert meta["clients"] == [{"metadata": expected_metadata, "name": META_CLIENT}]
CLEAR_PRIVATE_METADATA_MUTATION = """
mutation UserClearPrivateMetadata($id: ID!, $input: MetaPath!) {
userClearPrivateMetadata(
id: $id
input: $input
) {
user {
privateMeta {
namespace
clients {
name
metadata {
key
value
}
}
}
}
}
}
"""
def test_clear_private_metadata_through_mutation(
staff_api_client, permission_manage_users, customer_with_meta
):
user_id = graphene.Node.to_global_id("User", customer_with_meta.id)
variables = {
"id": user_id,
"input": {
"namespace": PRIVATE_META_NAMESPACE,
"clientName": META_CLIENT,
"key": PRIVATE_KEY,
},
}
response = staff_api_client.post_graphql(
CLEAR_PRIVATE_METADATA_MUTATION,
variables,
permissions=[permission_manage_users],
)
meta = get_graphql_content(response)["data"]["userClearPrivateMetadata"]["user"][
"privateMeta"
][0]
assert meta["namespace"] == PRIVATE_META_NAMESPACE
assert meta["clients"] == []
MY_PUBLIC_META_QUERY = """
{
me {
email
meta {
namespace
clients {
name
metadata {
key
value
}
}
}
}
}
"""
def test_access_users_public_metadata(user_api_client, customer_with_meta):
response = user_api_client.post_graphql(MY_PUBLIC_META_QUERY)
data = json.loads(response.content.decode("utf8"))
assert "errors" not in data
meta = get_graphql_content(response)["data"]["me"]["meta"][0]
assert meta["namespace"] == PUBLIC_META_NAMESPACE
assert meta["clients"] == [
{"metadata": [{"key": PUBLIC_KEY, "value": PUBLIC_VALUE}], "name": META_CLIENT}
]
GET_META_QUERY = """
query UserMeta($id: ID!) {
user(id: $id) {
email
meta {
namespace
clients {
name
metadata {
key
value
}
}
}
}
}
"""
def test_staff_access_to_public_metadata(
staff_api_client, permission_manage_users, customer_with_meta
):
user_id = graphene.Node.to_global_id("User", customer_with_meta.id)
variables = {"id": user_id}
response = staff_api_client.post_graphql(
GET_META_QUERY, variables, permissions=[permission_manage_users]
)
meta = get_graphql_content(response)["data"]["user"]["meta"][0]
assert meta["namespace"] == PUBLIC_META_NAMESPACE
assert meta["clients"] == [
{"metadata": [{"key": PUBLIC_KEY, "value": PUBLIC_VALUE}], "name": META_CLIENT}
]
UPDATE_METADATA_MUTATION = """
mutation UserUpdateMetadata($id: ID!, $input: MetaInput!) {
userUpdateMetadata(
id: $id
input: $input
) {
user {
meta {
namespace
clients {
name
metadata {
key
value
}
}
}
}
}
}
"""
def test_staff_update_metadata_through_mutation(
staff_api_client, permission_manage_users, customer_with_meta
):
NEW_VALUE = "NEW_VALUE"
user_id = graphene.Node.to_global_id("User", customer_with_meta.id)
variables = {
"id": user_id,
"input": {
"namespace": PUBLIC_META_NAMESPACE,
"clientName": META_CLIENT,
"key": PUBLIC_KEY,
"value": NEW_VALUE,
},
}
resp = staff_api_client.post_graphql(
UPDATE_METADATA_MUTATION, variables, permissions=[permission_manage_users]
)
meta = get_graphql_content(resp)["data"]["userUpdateMetadata"]["user"]["meta"][0]
assert meta["namespace"] == PUBLIC_META_NAMESPACE
assert meta["clients"] == [
{"metadata": [{"key": PUBLIC_KEY, "value": NEW_VALUE}], "name": META_CLIENT}
]
def test_staff_add_new_key_value_pair_to_metadata_using_mutation(
staff_api_client, customer_with_meta, permission_manage_users
):
NEW_KEY = "NEW_KEY"
NEW_VALUE = "NEW_VALUE"
user_id = graphene.Node.to_global_id("User", customer_with_meta.id)
variables = {
"id": user_id,
"input": {
"namespace": PUBLIC_META_NAMESPACE,
"clientName": META_CLIENT,
"key": NEW_KEY,
"value": NEW_VALUE,
},
}
response = staff_api_client.post_graphql(
UPDATE_METADATA_MUTATION, variables, permissions=[permission_manage_users]
)
meta = get_graphql_content(response)["data"]["userUpdateMetadata"]["user"]["meta"][
0
]
expected_metadata = [
{"key": NEW_KEY, "value": NEW_VALUE},
{"key": PUBLIC_KEY, "value": PUBLIC_VALUE},
]
assert meta["namespace"] == PUBLIC_META_NAMESPACE
assert meta["clients"] == [{"metadata": expected_metadata, "name": META_CLIENT}]
CLEAR_METADATA_MUTATION = """
mutation UserClearPrivateMetadata($id: ID!, $input: MetaPath!) {
userClearMetadata(
id: $id
input: $input
) {
user {
meta {
namespace
clients {
name
metadata {
key
value
}
}
}
}
}
}
"""
def test_staff_clear_metadata_through_mutation(
staff_api_client, customer_with_meta, permission_manage_users
):
user_id = graphene.Node.to_global_id("User", customer_with_meta.id)
variables = {
"id": user_id,
"input": {
"namespace": PUBLIC_META_NAMESPACE,
"clientName": META_CLIENT,
"key": PUBLIC_KEY,
},
}
response = staff_api_client.post_graphql(
CLEAR_METADATA_MUTATION, variables, permissions=[permission_manage_users]
)
meta = get_graphql_content(response)["data"]["userClearMetadata"]["user"]["meta"][0]
assert meta["namespace"] == PUBLIC_META_NAMESPACE
assert meta["clients"] == []
@pytest.mark.parametrize(
"mutation", [UPDATE_METADATA_MUTATION, UPDATE_PRIVATE_METADATA_MUTATION]
)
def test_staff_update_meta_without_permissions(
staff_api_client, customer_with_meta, mutation
):
user_id = graphene.Node.to_global_id("User", customer_with_meta.id)
variables = {
"id": user_id,
"input": {
"namespace": "new_namespace",
"clientName": "client_name",
"key": "meta_key",
"value": "value",
},
}
response = staff_api_client.post_graphql(mutation, variables)
assert_no_permission(response)
@pytest.mark.parametrize(
"mutation", [CLEAR_METADATA_MUTATION, CLEAR_PRIVATE_METADATA_MUTATION]
)
def test_staff_clear_meta_without_permissions(
staff_api_client, customer_with_meta, mutation
):
user_id = graphene.Node.to_global_id("User", customer_with_meta.id)
variables = {
"id": user_id,
"input": {
"namespace": PUBLIC_META_NAMESPACE,
"clientName": META_CLIENT,
"key": PUBLIC_KEY,
},
}
response = staff_api_client.post_graphql(mutation, variables)
assert_no_permission(response)
UPDATE_ACCOUNT_META_MUTATION = """
mutation AccountUpdateMeta($input: MetaInput!) {
accountUpdateMeta(input: $input){
user{
meta{
namespace
clients{
name
metadata{
key
value
}
}
}
}
}
}
"""
def test_user_updates_own_meta(user_api_client, customer_with_meta):
NEW_VALUE = "NEW_VALUE"
variables = {
"input": {
"namespace": PUBLIC_META_NAMESPACE,
"clientName": META_CLIENT,
"key": PUBLIC_KEY,
"value": NEW_VALUE,
}
}
resp = user_api_client.post_graphql(UPDATE_ACCOUNT_META_MUTATION, variables)
meta = get_graphql_content(resp)["data"]["accountUpdateMeta"]["user"]["meta"][0]
assert meta["namespace"] == PUBLIC_META_NAMESPACE
assert meta["clients"] == [
{"metadata": [{"key": PUBLIC_KEY, "value": NEW_VALUE}], "name": META_CLIENT}
]
|
py | 1a528c36636ae522fad5a51cc6c4c68e3566ee20 | import zmq
import unittest
from http import client as http
from .simple import Base, TimeoutError
CONFIG='test/wlimit.yaml'
CHAT_FW = "ipc:///tmp/zerogw-test-chatfw"
class Wlimit(Base):
timeout = 2 # in zmq.select units (seconds)
config = CONFIG
def setUp(self):
self.zmq = zmq.Context(1)
self.addCleanup(self.zmq.term)
super().setUp()
self.chatfw = self.zmq.socket(zmq.PULL)
self.addCleanup(self.chatfw.close)
self.chatfw.connect(CHAT_FW)
def backend_recv(self, backend=None):
if backend is None:
sock = self.chatfw
else:
sock = self.minigame
if (([sock], [], []) !=
zmq.select([sock], [], [], timeout=self.timeout)):
raise TimeoutError()
val = sock.recv_multipart()
if val[1] == b'heartbeat':
return self.backend_recv(backend=backend)
return val
def testWorking(self):
ws1 = self.websock()
ws1.connect()
ws1.client_send('hello1') # checks backend delivery itself
ws2 = self.websock()
ws2.connect()
ws2.client_send('hello2')
ws1.client_send('hello3')
ws3 = self.websock()
ws3.connect()
ws3.client_send('hello1') # checks backend delivery itself
ws4 = self.websock()
ws4.connect()
ws4.client_send('hello2')
ws1.close()
ws5 = self.websock()
ws5.connect()
ws5.client_send("hello4")
ws2.client_send("fifth_hello")
ws2.close()
ws3.close()
ws4.close()
ws5.close()
def testNoMoreSlots(self):
ws1 = self.websock()
ws1.connect()
self.addCleanup(ws1.close)
ws1.client_send('hello1') # checks backend delivery itself
ws2 = self.websock()
ws2.connect()
self.addCleanup(ws2.close)
ws2.client_send('hello2')
ws1.client_send('hello3')
ws3 = self.websock()
ws3.connect()
self.addCleanup(ws3.close)
ws3.client_send('hello1') # checks backend delivery itself
ws4 = self.websock()
ws4.connect()
self.addCleanup(ws4.close)
ws4.client_send('hello2')
ws5 = self.websock()
with self.assertRaisesRegex(http.BadStatusLine, "''"):
ws5.connect()
self.addCleanup(ws5.http.close)
ws2.client_send("fifth_hello")
if __name__ == '__main__':
unittest.main()
|
py | 1a528c45929d4b4df8c875c75b0d7d93e90be6ae | # SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
class ProcessorMetadata:
"""
ProcessorMetadata
This contains meta information about the process that generated
this processor. The results of these functions do not
impact the pixel processing.
"""
def __init__(self):
pass
def getFiles(self):
"""
getFiles()
Returns a list of file references used internally by this processor
:return: list of filenames
:rtype: list
"""
pass
def getLooks(self):
"""
getLooks()
Returns a list of looks used internally by this processor
:return: list of look names
:rtype: list
"""
pass
|
py | 1a528d4c0855df681a3f39038f9056838c4484a6 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'AnalysisProgress.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_AnalysisProgress(object):
def setupUi(self, AnalysisProgress):
AnalysisProgress.setObjectName("AnalysisProgress")
AnalysisProgress.resize(594, 465)
self.verticalLayout_2 = QtWidgets.QVBoxLayout(AnalysisProgress)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.progressBar = QtWidgets.QProgressBar(AnalysisProgress)
self.progressBar.setProperty("value", 0)
self.progressBar.setInvertedAppearance(False)
self.progressBar.setObjectName("progressBar")
self.verticalLayout.addWidget(self.progressBar)
self.textEdit = QtWidgets.QTextEdit(AnalysisProgress)
self.textEdit.setReadOnly(True)
self.textEdit.setObjectName("textEdit")
self.verticalLayout.addWidget(self.textEdit)
self.verticalLayout_2.addLayout(self.verticalLayout)
self.retranslateUi(AnalysisProgress)
QtCore.QMetaObject.connectSlotsByName(AnalysisProgress)
def retranslateUi(self, AnalysisProgress):
_translate = QtCore.QCoreApplication.translate
AnalysisProgress.setWindowTitle(_translate("AnalysisProgress", "Analysis Progress"))
|
py | 1a528dd9253bbf6741a89b8c84ac7901146692c0 | from django.contrib import admin
from ...models.tributo import TributoSezErario
class TributoSezErarioAdmin(admin.StackedInline):
model = TributoSezErario
extra = 0
fieldsets = (
(
None,
{
"fields": (
(
"ufficio",
"atto",
),
"tributo",
(
"riferimento",
"anno",
),
"debito",
),
},
),
)
|
py | 1a528e8bc1811bc5f201b1bb1fb34c66086215c3 | # Copyright 2016 - Nokia Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import pecan
from oslo_log import log
from oslo_utils.strutils import bool_from_string
from osprofiler import profiler
from pecan.core import abort
from vitrage.api.controllers.rest import RootRestController
from vitrage.api.policy import enforce
LOG = log.getLogger(__name__)
# noinspection PyBroadException
@profiler.trace_cls("rca controller",
info={}, hide_args=False, trace_private=False)
class RCAController(RootRestController):
@pecan.expose('json')
def index(self, alarm_id, all_tenants=False):
return self.get(alarm_id, all_tenants)
@pecan.expose('json')
def get(self, alarm_id, all_tenants=False):
all_tenants = bool_from_string(all_tenants)
if all_tenants:
enforce('get rca:all_tenants', pecan.request.headers,
pecan.request.enforcer, {})
else:
enforce('get rca', pecan.request.headers,
pecan.request.enforcer, {})
LOG.info('received show rca with alarm id %s', alarm_id)
return self.get_rca(alarm_id, all_tenants)
@staticmethod
def get_rca(alarm_id, all_tenants):
try:
graph_data = pecan.request.client.call(pecan.request.context,
'get_rca',
root=alarm_id,
all_tenants=all_tenants)
LOG.info(graph_data)
graph = json.loads(graph_data)
return graph
except Exception:
LOG.exception('Failed to get RCA.')
abort(404, 'Failed to get RCA')
|
py | 1a528f4e0ba183df492c97e3fa5ed05bb3a433ff | from __future__ import unicode_literals
import os
import sys
import urllib.request
import shutil
from contextlib import closing
#import gzip
import datetime
from dateutil import parser
import logging
#import subprocess
from netCDF4 import Dataset
import rasterio as rio
import eeUtil
import numpy as np
LOG_LEVEL = logging.INFO
CLEAR_COLLECTION_FIRST = False
DOWNLOAD_FILE = True
# constants for bleaching alerts
SOURCE_URL = 'http://soton.eead.csic.es/spei/nc/{filename}'
SOURCE_FILENAME = 'spei{month_lag}.nc'
FILENAME = 'cli_039_lag{lag}_{date}'
SDS_NAME = 'NETCDF:\"{nc_name}\":{var_name}'
VAR_NAME = 'spei'
TIME_NAME = 'time'
TIMELAGS = ['06']
# Read from dataset
NODATA_VALUE = None
DATA_TYPE = 'Byte' # Byte/Int16/UInt16/UInt32/Int32/Float32/Float64/CInt16/CInt32/CFloat32/CFloat64
MISSING_VALUE_NAME = "missing_value"
DATA_DIR = 'data/'
GS_FOLDER = 'cli_039_spei'
EE_COLLECTION = 'cli_039_spei'
MAX_ASSETS = 36
DATE_FORMAT = '%Y%m15'
TIMESTEP = {'days': 30}
def getAssetName(date, lag):
'''get asset name from datestamp'''
return os.path.join(EE_COLLECTION, FILENAME.format(date=date, lag=lag))
def getDate(filename):
'''get last 8 chrs of filename'''
return os.path.splitext(os.path.basename(filename))[0][-8:]
def getNewTargetDates(exclude_dates):
'''Get new dates excluding existing'''
new_dates = []
date = datetime.date.today()
date.replace(day=15)
for i in range(MAX_ASSETS):
date -= datetime.timedelta(**TIMESTEP)
date.replace(day=15)
datestr = date.strftime(DATE_FORMAT)
if datestr not in exclude_dates + new_dates:
new_dates.append(datestr)
return new_dates
def fetch(filename, lag):
'''Fetch files by datestamp'''
# New data may not yet be posted
sourceUrl = SOURCE_URL.format(filename=SOURCE_FILENAME.format(month_lag=lag))
try:
urllib.request.urlretrieve(sourceUrl, filename)
except Exception as e:
logging.warning('Could not fetch {}'.format(sourceUrl))
logging.error(e)
return filename
def extract_metadata(nc_file):
nc = Dataset(nc_file)
logging.debug(nc)
logging.debug(nc.variables)
logging.debug(nc[VAR_NAME])
dtype = str(nc[VAR_NAME].dtype)
nodata = float(nc[VAR_NAME].getncattr("_FillValue"))
#nodata = float(nc[VAR_NAME].getncattr(MISSING_VALUE_NAME))
del nc
return dtype, nodata
def retrieve_formatted_dates(nc_file, date_pattern=DATE_FORMAT):
'''
Inputs:
* pointer to a netcdf file
Outputs:
* dates formatted according to DATE_FORMAT
'''
# Extract time variable range
nc = Dataset(nc_file)
time_displacements = nc[TIME_NAME]
del nc
# Identify time units
# fuzzy=True allows the parser to pick the date out from a string with other text
time_units = time_displacements.getncattr('units')
logging.debug("Time units: {}".format(time_units))
ref_time = parser.parse(time_units, fuzzy=True)
logging.debug("Reference time: {}".format(ref_time))
# Format times to DATE_FORMAT
###
## REPLACE W/ MAP FUNCTION
###
formatted_dates = [(ref_time + datetime.timedelta(days=int(time_disp))).strftime(date_pattern) for time_disp in time_displacements]
logging.debug('Dates available: {}'.format(formatted_dates))
return(formatted_dates)
def extract_subdata_by_date(nc_file, lag, dtype, nodata, available_dates, target_dates):
'''
new_dates should be a list of tuples of form (date, index_in_netcdf)
'''
nc = Dataset(nc_file)
sub_tifs = []
for date in target_dates:
# Find index in available dates, if not available, skip this date
try:
date_ix = available_dates.index(date)
logging.info("Date {} found! Processing...".format(date))
except:
logging.error("Date {} not found in available dates".format(date))
continue
# Extract data
data = nc[VAR_NAME][date_ix,:,:]
# Create profile/tif metadata
south_lat = -90
north_lat = 90
west_lon = -180
east_lon = 180
# Transformation function
transform = rio.transform.from_bounds(west_lon, south_lat, east_lon, north_lat, data.shape[1], data.shape[0])
# Profile
profile = {
'driver':'GTiff',
'height':data.shape[0],
'width':data.shape[1],
'count':1,
'dtype':dtype,
'crs':'EPSG:4326',
'transform':transform,
'compress':'lzw',
'nodata':nodata
}
# Set filename
sub_tif = DATA_DIR + '{}.tif'.format(FILENAME.format(date=date, lag=lag))
logging.info(sub_tif)
with rio.open(sub_tif, 'w', **profile) as dst:
## Need to flip array, original data comes in upside down
flipped_array = np.flipud(data.astype(dtype))
dst.write(flipped_array, indexes=1)
sub_tifs.append(sub_tif)
del nc
return sub_tifs
def processNewData(existing_dates, lag):
'''fetch, process, upload, and clean new data'''
# 1. Determine which years to read from the netCDF file
target_dates = getNewTargetDates(existing_dates)
# 2. Fetch datafile
logging.info('Fetching files')
nc_file = fetch(DATA_DIR + 'nc_file.nc', lag)
available_dates = retrieve_formatted_dates(nc_file)
dtype, nodata = extract_metadata(nc_file)
logging.info('type: ' + dtype)
logging.info('nodata val: ' + str(nodata))
if target_dates:
# 3. Convert new files
logging.info('Converting files')
sub_tifs = extract_subdata_by_date(nc_file, lag, dtype, nodata, available_dates, target_dates)
logging.info(sub_tifs)
# 4. Upload new files
logging.info('Uploading files')
dates = [getDate(tif) for tif in sub_tifs]
datestamps = [datetime.datetime.strptime(date, DATE_FORMAT)
for date in dates]
assets = [getAssetName(date, lag) for date in dates]
eeUtil.uploadAssets(sub_tifs, assets, GS_FOLDER, datestamps)
# 5. Delete local files
logging.info('Cleaning local files')
os.remove(nc_file)
for tif in sub_tifs:
logging.debug('deleting: ' + tif)
os.remove(tif)
return assets
return []
def checkCreateCollection(collection):
'''List assests in collection else create new collection'''
if eeUtil.exists(collection):
return eeUtil.ls(collection)
else:
logging.info('{} does not exist, creating'.format(collection))
eeUtil.createFolder(collection, imageCollection=True, public=True)
return []
def deleteExcessAssets(dates, max_assets):
'''Delete assets if too many'''
# oldest first
dates.sort()
if len(dates) > max_assets:
for date in dates[:-max_assets]:
eeUtil.removeAsset(getAssetName(date, TIMELAGS[0]))
def main():
'''Ingest new data into EE and delete old data'''
logging.basicConfig(stream=sys.stderr, level=LOG_LEVEL)
logging.info('STARTING')
# Initialize eeUtil
eeUtil.initJson()
# 1. Check if collection exists and create
if CLEAR_COLLECTION_FIRST:
if eeUtil.exists(EE_COLLECTION):
eeUtil.removeAsset(EE_COLLECTION, recursive=True)
existing_assets = checkCreateCollection(EE_COLLECTION)
existing_dates = [getDate(a) for a in existing_assets]
# 2. Fetch, process, stage, ingest, clean
new_assets = []
for lag in TIMELAGS:
new_assets.extend(processNewData(existing_dates, lag))
new_dates = [getDate(a) for a in new_assets]
# 3. Delete old assets
existing_dates = existing_dates + new_dates
logging.info('Existing assets: {}, new: {}, max: {}'.format(
len(existing_dates), len(new_dates), MAX_ASSETS))
deleteExcessAssets(existing_dates, MAX_ASSETS)
###
logging.info('SUCCESS')
|
py | 1a528f8cedfe04c3b0cb2787123bf04a6f1cca16 | """Test the cloud.iot module."""
import asyncio
from unittest.mock import patch, MagicMock, PropertyMock
from aiohttp import WSMsgType, client_exceptions, web
import pytest
from homeassistant.setup import async_setup_component
from homeassistant.components.cloud import (
Cloud, iot, auth_api, MODE_DEV)
from homeassistant.components.cloud.const import (
PREF_ENABLE_ALEXA, PREF_ENABLE_GOOGLE)
from homeassistant.util import dt as dt_util
from tests.components.alexa import test_smart_home as test_alexa
from tests.common import mock_coro, async_fire_time_changed
from . import mock_cloud_prefs
@pytest.fixture
def mock_client():
"""Mock the IoT client."""
client = MagicMock()
type(client).closed = PropertyMock(side_effect=[False, True])
# Trigger cancelled error to avoid reconnect.
with patch('asyncio.sleep', side_effect=asyncio.CancelledError), \
patch('homeassistant.components.cloud.iot'
'.async_get_clientsession') as session:
session().ws_connect.return_value = mock_coro(client)
yield client
@pytest.fixture
def mock_handle_message():
"""Mock handle message."""
with patch('homeassistant.components.cloud.iot'
'.async_handle_message') as mock:
yield mock
@pytest.fixture
def mock_cloud():
"""Mock cloud class."""
return MagicMock(subscription_expired=False)
@asyncio.coroutine
def test_cloud_calling_handler(mock_client, mock_handle_message, mock_cloud):
"""Test we call handle message with correct info."""
conn = iot.CloudIoT(mock_cloud)
mock_client.receive.return_value = mock_coro(MagicMock(
type=WSMsgType.text,
json=MagicMock(return_value={
'msgid': 'test-msg-id',
'handler': 'test-handler',
'payload': 'test-payload'
})
))
mock_handle_message.return_value = mock_coro('response')
mock_client.send_json.return_value = mock_coro(None)
yield from conn.connect()
# Check that we sent message to handler correctly
assert len(mock_handle_message.mock_calls) == 1
p_hass, p_cloud, handler_name, payload = \
mock_handle_message.mock_calls[0][1]
assert p_hass is mock_cloud.hass
assert p_cloud is mock_cloud
assert handler_name == 'test-handler'
assert payload == 'test-payload'
# Check that we forwarded response from handler to cloud
assert len(mock_client.send_json.mock_calls) == 1
assert mock_client.send_json.mock_calls[0][1][0] == {
'msgid': 'test-msg-id',
'payload': 'response'
}
@asyncio.coroutine
def test_connection_msg_for_unknown_handler(mock_client, mock_cloud):
"""Test a msg for an unknown handler."""
conn = iot.CloudIoT(mock_cloud)
mock_client.receive.return_value = mock_coro(MagicMock(
type=WSMsgType.text,
json=MagicMock(return_value={
'msgid': 'test-msg-id',
'handler': 'non-existing-handler',
'payload': 'test-payload'
})
))
mock_client.send_json.return_value = mock_coro(None)
yield from conn.connect()
# Check that we sent the correct error
assert len(mock_client.send_json.mock_calls) == 1
assert mock_client.send_json.mock_calls[0][1][0] == {
'msgid': 'test-msg-id',
'error': 'unknown-handler',
}
@asyncio.coroutine
def test_connection_msg_for_handler_raising(mock_client, mock_handle_message,
mock_cloud):
"""Test we sent error when handler raises exception."""
conn = iot.CloudIoT(mock_cloud)
mock_client.receive.return_value = mock_coro(MagicMock(
type=WSMsgType.text,
json=MagicMock(return_value={
'msgid': 'test-msg-id',
'handler': 'test-handler',
'payload': 'test-payload'
})
))
mock_handle_message.side_effect = Exception('Broken')
mock_client.send_json.return_value = mock_coro(None)
yield from conn.connect()
# Check that we sent the correct error
assert len(mock_client.send_json.mock_calls) == 1
assert mock_client.send_json.mock_calls[0][1][0] == {
'msgid': 'test-msg-id',
'error': 'exception',
}
@asyncio.coroutine
def test_handler_forwarding():
"""Test we forward messages to correct handler."""
handler = MagicMock()
handler.return_value = mock_coro()
hass = object()
cloud = object()
with patch.dict(iot.HANDLERS, {'test': handler}):
yield from iot.async_handle_message(
hass, cloud, 'test', 'payload')
assert len(handler.mock_calls) == 1
r_hass, r_cloud, payload = handler.mock_calls[0][1]
assert r_hass is hass
assert r_cloud is cloud
assert payload == 'payload'
async def test_handling_core_messages_logout(hass, mock_cloud):
"""Test handling core messages."""
mock_cloud.logout.return_value = mock_coro()
await iot.async_handle_cloud(hass, mock_cloud, {
'action': 'logout',
'reason': 'Logged in at two places.'
})
assert len(mock_cloud.logout.mock_calls) == 1
async def test_handling_core_messages_refresh_auth(hass, mock_cloud):
"""Test handling core messages."""
mock_cloud.hass = hass
with patch('random.randint', return_value=0) as mock_rand, patch(
'homeassistant.components.cloud.auth_api.check_token'
) as mock_check:
await iot.async_handle_cloud(hass, mock_cloud, {
'action': 'refresh_auth',
'seconds': 230,
})
async_fire_time_changed(hass, dt_util.utcnow())
await hass.async_block_till_done()
assert len(mock_rand.mock_calls) == 1
assert mock_rand.mock_calls[0][1] == (0, 230)
assert len(mock_check.mock_calls) == 1
assert mock_check.mock_calls[0][1][0] is mock_cloud
@asyncio.coroutine
def test_cloud_getting_disconnected_by_server(mock_client, caplog, mock_cloud):
"""Test server disconnecting instance."""
conn = iot.CloudIoT(mock_cloud)
mock_client.receive.return_value = mock_coro(MagicMock(
type=WSMsgType.CLOSING,
))
with patch('asyncio.sleep', side_effect=[None, asyncio.CancelledError]):
yield from conn.connect()
assert 'Connection closed' in caplog.text
@asyncio.coroutine
def test_cloud_receiving_bytes(mock_client, caplog, mock_cloud):
"""Test server disconnecting instance."""
conn = iot.CloudIoT(mock_cloud)
mock_client.receive.return_value = mock_coro(MagicMock(
type=WSMsgType.BINARY,
))
yield from conn.connect()
assert 'Connection closed: Received non-Text message' in caplog.text
@asyncio.coroutine
def test_cloud_sending_invalid_json(mock_client, caplog, mock_cloud):
"""Test cloud sending invalid JSON."""
conn = iot.CloudIoT(mock_cloud)
mock_client.receive.return_value = mock_coro(MagicMock(
type=WSMsgType.TEXT,
json=MagicMock(side_effect=ValueError)
))
yield from conn.connect()
assert 'Connection closed: Received invalid JSON.' in caplog.text
@asyncio.coroutine
def test_cloud_check_token_raising(mock_client, caplog, mock_cloud):
"""Test cloud unable to check token."""
conn = iot.CloudIoT(mock_cloud)
mock_cloud.hass.async_add_job.side_effect = auth_api.CloudError("BLA")
yield from conn.connect()
assert 'Unable to refresh token: BLA' in caplog.text
@asyncio.coroutine
def test_cloud_connect_invalid_auth(mock_client, caplog, mock_cloud):
"""Test invalid auth detected by server."""
conn = iot.CloudIoT(mock_cloud)
mock_client.receive.side_effect = \
client_exceptions.WSServerHandshakeError(None, None, status=401)
yield from conn.connect()
assert 'Connection closed: Invalid auth.' in caplog.text
@asyncio.coroutine
def test_cloud_unable_to_connect(mock_client, caplog, mock_cloud):
"""Test unable to connect error."""
conn = iot.CloudIoT(mock_cloud)
mock_client.receive.side_effect = client_exceptions.ClientError(None, None)
yield from conn.connect()
assert 'Unable to connect:' in caplog.text
@asyncio.coroutine
def test_cloud_random_exception(mock_client, caplog, mock_cloud):
"""Test random exception."""
conn = iot.CloudIoT(mock_cloud)
mock_client.receive.side_effect = Exception
yield from conn.connect()
assert 'Unexpected error' in caplog.text
@asyncio.coroutine
def test_refresh_token_before_expiration_fails(hass, mock_cloud):
"""Test that we don't connect if token is expired."""
mock_cloud.subscription_expired = True
mock_cloud.hass = hass
conn = iot.CloudIoT(mock_cloud)
with patch('homeassistant.components.cloud.auth_api.check_token',
return_value=mock_coro()) as mock_check_token, \
patch.object(hass.components.persistent_notification,
'async_create') as mock_create:
yield from conn.connect()
assert len(mock_check_token.mock_calls) == 1
assert len(mock_create.mock_calls) == 1
@asyncio.coroutine
def test_handler_alexa(hass):
"""Test handler Alexa."""
hass.states.async_set(
'switch.test', 'on', {'friendly_name': "Test switch"})
hass.states.async_set(
'switch.test2', 'on', {'friendly_name': "Test switch 2"})
with patch('homeassistant.components.cloud.Cloud.async_start',
return_value=mock_coro()):
setup = yield from async_setup_component(hass, 'cloud', {
'cloud': {
'alexa': {
'filter': {
'exclude_entities': 'switch.test2'
},
'entity_config': {
'switch.test': {
'name': 'Config name',
'description': 'Config description',
'display_categories': 'LIGHT'
}
}
}
}
})
assert setup
mock_cloud_prefs(hass)
resp = yield from iot.async_handle_alexa(
hass, hass.data['cloud'],
test_alexa.get_new_request('Alexa.Discovery', 'Discover'))
endpoints = resp['event']['payload']['endpoints']
assert len(endpoints) == 1
device = endpoints[0]
assert device['description'] == 'Config description'
assert device['friendlyName'] == 'Config name'
assert device['displayCategories'] == ['LIGHT']
assert device['manufacturerName'] == 'Home Assistant'
@asyncio.coroutine
def test_handler_alexa_disabled(hass, mock_cloud_fixture):
"""Test handler Alexa when user has disabled it."""
mock_cloud_fixture[PREF_ENABLE_ALEXA] = False
resp = yield from iot.async_handle_alexa(
hass, hass.data['cloud'],
test_alexa.get_new_request('Alexa.Discovery', 'Discover'))
assert resp['event']['header']['namespace'] == 'Alexa'
assert resp['event']['header']['name'] == 'ErrorResponse'
assert resp['event']['payload']['type'] == 'BRIDGE_UNREACHABLE'
@asyncio.coroutine
def test_handler_google_actions(hass):
"""Test handler Google Actions."""
hass.states.async_set(
'switch.test', 'on', {'friendly_name': "Test switch"})
hass.states.async_set(
'switch.test2', 'on', {'friendly_name': "Test switch 2"})
hass.states.async_set(
'group.all_locks', 'on', {'friendly_name': "Evil locks"})
with patch('homeassistant.components.cloud.Cloud.async_start',
return_value=mock_coro()):
setup = yield from async_setup_component(hass, 'cloud', {
'cloud': {
'google_actions': {
'filter': {
'exclude_entities': 'switch.test2'
},
'entity_config': {
'switch.test': {
'name': 'Config name',
'aliases': 'Config alias',
'room': 'living room'
}
}
}
}
})
assert setup
mock_cloud_prefs(hass)
reqid = '5711642932632160983'
data = {'requestId': reqid, 'inputs': [{'intent': 'action.devices.SYNC'}]}
with patch('homeassistant.components.cloud.Cloud._decode_claims',
return_value={'cognito:username': 'myUserName'}):
resp = yield from iot.async_handle_google_actions(
hass, hass.data['cloud'], data)
assert resp['requestId'] == reqid
payload = resp['payload']
assert payload['agentUserId'] == 'myUserName'
devices = payload['devices']
assert len(devices) == 1
device = devices[0]
assert device['id'] == 'switch.test'
assert device['name']['name'] == 'Config name'
assert device['name']['nicknames'] == ['Config alias']
assert device['type'] == 'action.devices.types.SWITCH'
assert device['roomHint'] == 'living room'
async def test_handler_google_actions_disabled(hass, mock_cloud_fixture):
"""Test handler Google Actions when user has disabled it."""
mock_cloud_fixture[PREF_ENABLE_GOOGLE] = False
with patch('homeassistant.components.cloud.Cloud.async_start',
return_value=mock_coro()):
assert await async_setup_component(hass, 'cloud', {})
reqid = '5711642932632160983'
data = {'requestId': reqid, 'inputs': [{'intent': 'action.devices.SYNC'}]}
resp = await iot.async_handle_google_actions(
hass, hass.data['cloud'], data)
assert resp['requestId'] == reqid
assert resp['payload']['errorCode'] == 'deviceTurnedOff'
async def test_refresh_token_expired(hass):
"""Test handling Unauthenticated error raised if refresh token expired."""
cloud = Cloud(hass, MODE_DEV, None, None)
with patch('homeassistant.components.cloud.auth_api.check_token',
side_effect=auth_api.Unauthenticated) as mock_check_token, \
patch.object(hass.components.persistent_notification,
'async_create') as mock_create:
await cloud.iot.connect()
assert len(mock_check_token.mock_calls) == 1
assert len(mock_create.mock_calls) == 1
async def test_webhook_msg(hass):
"""Test webhook msg."""
cloud = Cloud(hass, MODE_DEV, None, None)
await cloud.prefs.async_initialize()
await cloud.prefs.async_update(cloudhooks={
'hello': {
'webhook_id': 'mock-webhook-id',
'cloudhook_id': 'mock-cloud-id'
}
})
received = []
async def handler(hass, webhook_id, request):
"""Handle a webhook."""
received.append(request)
return web.json_response({'from': 'handler'})
hass.components.webhook.async_register(
'test', 'Test', 'mock-webhook-id', handler)
response = await iot.async_handle_webhook(hass, cloud, {
'cloudhook_id': 'mock-cloud-id',
'body': '{"hello": "world"}',
'headers': {
'content-type': 'application/json'
},
'method': 'POST',
'query': None,
})
assert response == {
'status': 200,
'body': '{"from": "handler"}',
'headers': {
'Content-Type': 'application/json'
}
}
assert len(received) == 1
assert await received[0].json() == {
'hello': 'world'
}
async def test_send_message_not_connected(mock_cloud):
"""Test sending a message that expects no answer."""
cloud_iot = iot.CloudIoT(mock_cloud)
with pytest.raises(iot.NotConnected):
await cloud_iot.async_send_message('webhook', {'msg': 'yo'})
async def test_send_message_no_answer(mock_cloud):
"""Test sending a message that expects no answer."""
cloud_iot = iot.CloudIoT(mock_cloud)
cloud_iot.state = iot.STATE_CONNECTED
cloud_iot.client = MagicMock(send_json=MagicMock(return_value=mock_coro()))
await cloud_iot.async_send_message('webhook', {'msg': 'yo'},
expect_answer=False)
assert not cloud_iot._response_handler
assert len(cloud_iot.client.send_json.mock_calls) == 1
msg = cloud_iot.client.send_json.mock_calls[0][1][0]
assert msg['handler'] == 'webhook'
assert msg['payload'] == {'msg': 'yo'}
async def test_send_message_answer(loop, mock_cloud):
"""Test sending a message that expects no answer."""
cloud_iot = iot.CloudIoT(mock_cloud)
cloud_iot.state = iot.STATE_CONNECTED
cloud_iot.client = MagicMock(send_json=MagicMock(return_value=mock_coro()))
uuid = 5
with patch('homeassistant.components.cloud.iot.uuid.uuid4',
return_value=MagicMock(hex=uuid)):
send_task = loop.create_task(cloud_iot.async_send_message(
'webhook', {'msg': 'yo'}))
await asyncio.sleep(0)
assert len(cloud_iot.client.send_json.mock_calls) == 1
assert len(cloud_iot._response_handler) == 1
msg = cloud_iot.client.send_json.mock_calls[0][1][0]
assert msg['handler'] == 'webhook'
assert msg['payload'] == {'msg': 'yo'}
cloud_iot._response_handler[uuid].set_result({'response': True})
response = await send_task
assert response == {'response': True}
|
py | 1a528fe2f146d7947fdba82b6793599abacee2fc | from matplotlib import pyplot as plt
import numpy as np
emb = np.fromfile('.\\Release\\MNIST_emb.bin', np.float32)
emb.shape
emb = emb.reshape((70000,2))
plt.scatter(emb[...,0], emb[...,1])
plt.show() |
py | 1a52911926ccf5cf5fda01a574dd2cf126647816 | import sys
sys.path.append("..")
import pickle
import json, gzip
import datetime
import numpy as np
import config as cfg
from utils import log
##################### GLOBAL VARS #######################
GRID = []
CODES = []
STEP = 0.25
###################### LOAD DATA ########################
def load():
global GRID
global CODES
global STEP
if len(GRID) == 0:
# Status
log.p('LOADING eBIRD GRID DATA...', new_line=False)
# Load pickled or zipped grid data
if cfg.EBIRD_MDATA.rsplit('.', 1)[-1] == 'gz':
with gzip.open(cfg.EBIRD_MDATA, 'rt') as pfile:
GRID = json.load(pfile)
else:
with open(cfg.EBIRD_MDATA, 'rb') as pfile:
GRID = pickle.load(pfile)
# Load species codes
with open(cfg.EBIRD_SPECIES_CODES, 'r') as jfile:
CODES = json.load(jfile)
STEP = cfg.GRID_STEP_SIZE
log.p(('DONE!', len(GRID), 'GRID CELLS'))
#################### PROBABILITIES ######################
def getCellData(lat, lon):
# Find nearest cell
for cell in GRID:
if lat > cell['lat'] - STEP and lat < cell['lat'] + STEP and lon > cell['lon'] - STEP and lon < cell['lon'] + STEP:
return cell
# No cell
return None
def getWeek():
w = datetime.datetime.now().isocalendar()[1]
return min(48, max(1, int(48.0 * w / 52.0)))
def getWeekFromDate(y, m, d):
w = datetime.date(int(y), int(m), int(d)).isocalendar()[1]
return min(48, max(1, int(48.0 * w / 52.0)))
def getSpeciesProbabilities(lat=-1, lon=-1, week=-1):
# Dummy array
p = np.zeros((len(cfg.CLASSES)), dtype='float32')
# No coordinates?
if lat == -1 or lon == -1:
return p + 1.0
else:
# Get checklist data for nearest cell
cdata = getCellData(lat, lon)
# No cell data?
if cdata == None:
return p + 1.0
else:
# Get probabilities from checklist frequencies
for entry in cdata['data']:
for species in entry:
try:
# Get class index from species code
for i in range(len(cfg.CLASSES)):
if cfg.CLASSES[i].split('_')[0] == CODES[species].split('_')[0]:
# Do we want a specific week?
if week >= 1 and week <= 48:
p[i] = entry[species][week - 1] / 100.0
# If not, simply return the max frequency
else:
p[i] = max(entry[species]) / 100.0
break
except:
pass
return p
def getSpeciesLists(lat=-1, lon=-1, week=-1, threshold=0.02):
# Get species probabilities from for date and location
p = getSpeciesProbabilities(lat, lon, week)
# Parse probabilities and create white list and black list
white_list, black_list = [], []
for i in range(p.shape[0]):
if p[i] >= threshold:
white_list.append(cfg.CLASSES[i])
else:
black_list.append(cfg.CLASSES[i])
return white_list, black_list |
py | 1a529200f02a0f997ebe5dafd810f133843ed8b1 | from __future__ import annotations
from collections import defaultdict
from typing import TYPE_CHECKING
from typing import DefaultDict
from poetry.console.commands.command import Command
if TYPE_CHECKING:
from poetry.core.packages.package import Package
class PluginShowCommand(Command):
name = "plugin show"
description = "Shows information about the currently installed plugins."
def handle(self) -> int:
from poetry.plugins.application_plugin import ApplicationPlugin
from poetry.plugins.plugin import Plugin
from poetry.plugins.plugin_manager import PluginManager
from poetry.repositories.installed_repository import InstalledRepository
from poetry.utils.env import EnvManager
from poetry.utils.helpers import canonicalize_name
from poetry.utils.helpers import pluralize
plugins: DefaultDict[str, dict[str, Package | list[str]]] = defaultdict(
lambda: {
"package": None,
"plugins": [],
"application_plugins": [],
}
)
entry_points = (
PluginManager(ApplicationPlugin.group).get_plugin_entry_points()
+ PluginManager(Plugin.group).get_plugin_entry_points()
)
system_env = EnvManager.get_system_env(naive=True)
installed_repository = InstalledRepository.load(
system_env, with_dependencies=True
)
packages_by_name = {pkg.name: pkg for pkg in installed_repository.packages}
for entry_point in entry_points:
plugin = entry_point.load()
category = "plugins"
if issubclass(plugin, ApplicationPlugin):
category = "application_plugins"
package = packages_by_name[canonicalize_name(entry_point.distro.name)]
plugins[package.pretty_name]["package"] = package
plugins[package.pretty_name][category].append(entry_point)
for name, info in plugins.items():
package = info["package"]
description = " " + package.description if package.description else ""
self.line("")
self.line(f" • <c1>{name}</c1> (<c2>{package.version}</c2>){description}")
provide_line = " "
if info["plugins"]:
count = len(info["plugins"])
provide_line += f" <info>{count}</info> plugin{pluralize(count)}"
if info["application_plugins"]:
if info["plugins"]:
provide_line += " and"
count = len(info["application_plugins"])
provide_line += (
f" <info>{count}</info> application plugin{pluralize(count)}"
)
self.line(provide_line)
if package.requires:
self.line("")
self.line(" <info>Dependencies</info>")
for dependency in package.requires:
self.line(
f" - {dependency.pretty_name}"
f" (<c2>{dependency.pretty_constraint}</c2>)"
)
return 0
|
py | 1a52922e4b7f8362804bf1131055918283c995cc | # pylint: disable=W0611
'''
Android Joystick Input Provider
===============================
This module is based on the PyGame JoyStick Input Provider. For more
information, please refer to
`<http://www.pygame.org/docs/ref/joystick.html>`_
'''
__all__ = ('AndroidMotionEventProvider', )
import os
try:
import android # NOQA
except ImportError:
if 'KIVY_DOC' not in os.environ:
raise Exception('android lib not found.')
from kivy.logger import Logger
from kivy.input.provider import MotionEventProvider
from kivy.input.factory import MotionEventFactory
from kivy.input.shape import ShapeRect
from kivy.input.motionevent import MotionEvent
import pygame.joystick
class AndroidMotionEvent(MotionEvent):
def depack(self, args):
self.is_touch = True
self.profile = ['pos', 'pressure', 'shape']
self.sx, self.sy, self.pressure, radius = args
self.shape = ShapeRect()
self.shape.width = radius
self.shape.height = radius
super(AndroidMotionEvent, self).depack(args)
class AndroidMotionEventProvider(MotionEventProvider):
def __init__(self, device, args):
super(AndroidMotionEventProvider, self).__init__(device, args)
self.joysticks = []
self.touches = {}
self.uid = 0
self.window = None
def create_joystick(self, index):
Logger.info('Android: create joystick <%d>' % index)
js = pygame.joystick.Joystick(index)
js.init()
if js.get_numbuttons() == 0:
Logger.info('Android: discard joystick <%d> cause no button' %
index)
return
self.joysticks.append(js)
def start(self):
pygame.joystick.init()
Logger.info('Android: found %d joystick' % pygame.joystick.get_count())
for i in range(pygame.joystick.get_count()):
self.create_joystick(i)
def stop(self):
self.joysticks = []
def update(self, dispatch_fn):
if not self.window:
from kivy.core.window import Window
self.window = Window
w, h = self.window.system_size
touches = self.touches
for joy in self.joysticks:
jid = joy.get_id()
pressed = joy.get_button(0)
x = joy.get_axis(0) * 32768. / w
y = 1. - (joy.get_axis(1) * 32768. / h)
# python for android do * 1000.
pressure = joy.get_axis(2) / 1000.
radius = joy.get_axis(3) / 1000.
# new touche ?
if pressed and jid not in touches:
self.uid += 1
touch = AndroidMotionEvent(self.device, self.uid,
[x, y, pressure, radius])
touches[jid] = touch
dispatch_fn('begin', touch)
# update touch
elif pressed:
touch = touches[jid]
# avoid same touch position
if touch.sx == x and touch.sy == y \
and touch.pressure == pressure:
continue
touch.move([x, y, pressure, radius])
dispatch_fn('update', touch)
# disapear
elif not pressed and jid in touches:
touch = touches[jid]
touch.move([x, y, pressure, radius])
touch.update_time_end()
dispatch_fn('end', touch)
touches.pop(jid)
MotionEventFactory.register('android', AndroidMotionEventProvider)
|
py | 1a52932761c9826ecf5aa60a5ab0d9e40e639c48 | # Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from threading import RLock
from rosgraph.names import ns_join, GLOBALNS, SEP, is_global, is_private, canonicalize_name
import os
import json
def _get_param_names(names, key, d):
"""
helper recursive routine for getParamNames()
@param names: list of param names to append to
@type names: [str]
@param d: parameter tree node
@type d: dict
@param key: parameter key for tree node d
@type key: str
"""
#TODOXXX
for k,v in d.items():
if type(v) == dict:
_get_param_names(names, ns_join(key, k), v)
else:
names.append(ns_join(key, k))
class ParamDictionary(object):
def __init__(self, reg_manager):
"""
ctor.
@param subscribers: parameter subscribers
@type subscribers: Registrations
"""
self.lock = RLock()
self.parameters = {}
self.reg_manager = reg_manager
self.snapshot = False
if "ROS_MASTER_SNAPSHOT" in os.environ:
try:
self.snapshot = True
self.snapshot_file = os.path.join(os.environ["ROS_ROOT"], ".master_snapshot")
with open(self.snapshot_file, "r") as f:
self.parameters = json.loads(f.read())
del self.parameters["run_id"]
except IOError:
pass
except KeyError:
pass
def get_param_names(self):
"""
Get list of all parameter names stored on this server.
@return: [code, statusMessage, parameterNameList]
@rtype: [int, str, [str]]
"""
try:
self.lock.acquire()
param_names = []
_get_param_names(param_names, '/', self.parameters)
finally:
self.lock.release()
return param_names
def search_param(self, ns, key):
"""
Search for matching parameter key for search param
key. Search for key starts at ns and proceeds upwards to
the root. As such, search_param should only be called with a
relative parameter name.
search_param's behavior is to search for the first partial match.
For example, imagine that there are two 'robot_description' parameters:
- /robot_description
- /robot_description/arm
- /robot_description/base
- /pr2/robot_description
- /pr2/robot_description/base
If I start in the namespace /pr2/foo and search for
'robot_description', search_param will match
/pr2/robot_description. If I search for 'robot_description/arm'
it will return /pr2/robot_description/arm, even though that
parameter does not exist (yet).
@param ns: namespace to begin search from.
@type ns: str
@param key: Parameter key.
@type key: str
@return: key of matching parameter or None if no matching
parameter.
@rtype: str
"""
if not key or is_private(key):
raise ValueError("invalid key")
if not is_global(ns):
raise ValueError("namespace must be global")
if is_global(key):
if self.has_param(key):
return key
else:
return None
# there are more efficient implementations, but our hiearchy
# is not very deep and this is fairly clean code to read.
# - we only search for the first namespace in the key to check for a match
key_namespaces = [x for x in key.split(SEP) if x]
key_ns = key_namespaces[0]
# - corner case: have to test initial namespace first as
# negative indices won't work with 0
search_key = ns_join(ns, key_ns)
if self.has_param(search_key):
# resolve to full key
return ns_join(ns, key)
namespaces = [x for x in ns.split(SEP) if x]
for i in range(1, len(namespaces)+1):
search_key = SEP + SEP.join(namespaces[0:-i] + [key_ns])
if self.has_param(search_key):
# we have a match on the namespace of the key, so
# compose the full key and return it
full_key = SEP + SEP.join(namespaces[0:-i] + [key])
return full_key
return None
def get_param(self, key):
"""
Get the parameter in the parameter dictionary.
@param key: parameter key
@type key: str
@return: parameter value
"""
try:
self.lock.acquire()
val = self.parameters
if key != GLOBALNS:
# split by the namespace separator, ignoring empty splits
namespaces = [x for x in key.split(SEP)[1:] if x]
for ns in namespaces:
if not type(val) == dict:
raise KeyError(val)
val = val[ns]
return val
finally:
self.lock.release()
def set_param(self, key, value, notify_task=None):
"""
Set the parameter in the parameter dictionary.
@param key: parameter key
@type key: str
@param value: parameter value
@param notify_task: function to call with
subscriber updates. updates is of the form
[(subscribers, param_key, param_value)*]. The empty dictionary
represents an unset parameter.
@type notify_task: fn(updates)
"""
try:
self.lock.acquire()
if key == GLOBALNS:
if type(value) != dict:
raise TypeError("cannot set root of parameter tree to non-dictionary")
self.parameters = value
else:
namespaces = [x for x in key.split(SEP) if x]
# - last namespace is the actual key we're storing in
value_key = namespaces[-1]
namespaces = namespaces[:-1]
d = self.parameters
# - descend tree to the node we're setting
for ns in namespaces:
if not ns in d:
new_d = {}
d[ns] = new_d
d = new_d
else:
val = d[ns]
# implicit type conversion of value to namespace
if type(val) != dict:
d[ns] = val = {}
d = val
d[value_key] = value
# ParamDictionary needs to queue updates so that the updates are thread-safe
if notify_task:
updates = compute_param_updates(self.reg_manager.param_subscribers, key, value)
if updates:
notify_task(updates)
finally:
self.lock.release()
if self.snapshot:
with open(self.snapshot_file, 'w') as f:
f.write(json.dumps(self.parameters))
def subscribe_param(self, key, registration_args):
"""
@param key: parameter key
@type key: str
@param registration_args: additional args to pass to
subscribers.register. First parameter is always the parameter
key.
@type registration_args: tuple
"""
if key != SEP:
key = canonicalize_name(key) + SEP
try:
self.lock.acquire()
# fetch parameter value
try:
val = self.get_param(key)
except KeyError:
# parameter not set yet
val = {}
self.reg_manager.register_param_subscriber(key, *registration_args)
return val
finally:
self.lock.release()
def unsubscribe_param(self, key, unregistration_args):
"""
@param key str: parameter key
@type key: str
@param unregistration_args: additional args to pass to
subscribers.unregister. i.e. unregister will be called with
(key, *unregistration_args)
@type unregistration_args: tuple
@return: return value of subscribers.unregister()
"""
if key != SEP:
key = canonicalize_name(key) + SEP
return self.reg_manager.unregister_param_subscriber(key, *unregistration_args)
def delete_param(self, key, notify_task=None):
"""
Delete the parameter in the parameter dictionary.
@param key str: parameter key
@param notify_task fn(updates): function to call with
subscriber updates. updates is of the form
[(subscribers, param_key, param_value)*]. The empty dictionary
represents an unset parameter.
"""
try:
self.lock.acquire()
if key == GLOBALNS:
raise KeyError("cannot delete root of parameter tree")
else:
# key is global, so first split is empty
namespaces = [x for x in key.split(SEP) if x]
# - last namespace is the actual key we're deleting
value_key = namespaces[-1]
namespaces = namespaces[:-1]
d = self.parameters
# - descend tree to the node we're setting
for ns in namespaces:
if type(d) != dict or not ns in d:
raise KeyError(key)
else:
d = d[ns]
if not value_key in d:
raise KeyError(key)
else:
del d[value_key]
# ParamDictionary needs to queue updates so that the updates are thread-safe
if notify_task:
updates = compute_param_updates(self.reg_manager.param_subscribers, key, {})
if updates:
notify_task(updates)
finally:
self.lock.release()
def has_param(self, key):
"""
Test for parameter existence
@param key: parameter key
@type key: str
@return: True if parameter set, False otherwise
@rtype: bool
"""
try:
# more efficient implementations are certainly possible,
# but this guarantees correctness for now
self.get_param(key)
return True
except KeyError:
return False
def _compute_all_keys(param_key, param_value, all_keys=None):
"""
Compute which subscribers should be notified based on the parameter update
@param param_key: key of updated parameter
@type param_key: str
@param param_value: value of updated parameter
@param all_keys: (internal use only) list of parameter keys
to append to for recursive calls.
@type all_keys: [str]
@return: list of parameter keys. All keys will be canonicalized with trailing slash.
@rtype: [str]
"""
if all_keys is None:
all_keys = []
for k, v in param_value.items():
new_k = ns_join(param_key, k) + SEP
all_keys.append(new_k)
if type(v) == dict:
_compute_all_keys(new_k, v, all_keys)
return all_keys
def compute_param_updates(subscribers, param_key, param_value):
"""
Compute subscribers that should be notified based on the parameter update
@param subscribers: parameter subscribers
@type subscribers: Registrations
@param param_key: parameter key
@type param_key: str
@param param_value: parameter value
@type param_value: str
"""
# logic correct for both updates and deletions
if not subscribers:
return []
# end with a trailing slash to optimize startswith check from
# needing an extra equals check
if param_key != SEP:
param_key = canonicalize_name(param_key) + SEP
# compute all the updated keys
if type(param_value) == dict:
all_keys = _compute_all_keys(param_key, param_value)
else:
all_keys = None
updates = []
# subscriber gets update if anything in the subscribed namespace is updated or if its deleted
for sub_key in subscribers.iterkeys():
ns_key = sub_key
if ns_key[-1] != SEP:
ns_key = sub_key + SEP
if param_key.startswith(ns_key):
node_apis = subscribers[sub_key]
updates.append((node_apis, param_key, param_value))
elif all_keys is not None and ns_key.startswith(param_key) \
and not sub_key in all_keys:
# parameter was deleted
node_apis = subscribers[sub_key]
updates.append((node_apis, sub_key, {}))
# add updates for exact matches within tree
if all_keys is not None:
# #586: iterate over parameter tree for notification
for key in all_keys:
if key in subscribers:
# compute actual update value
sub_key = key[len(param_key):]
namespaces = [x for x in sub_key.split(SEP) if x]
val = param_value
for ns in namespaces:
val = val[ns]
updates.append((subscribers[key], key, val))
return updates
|
py | 1a5293976ce63177644f3ede4c0916bcd2532caf | #!/usr/bin/env python3
"""
MSFT Bonsai SDK3 Template for Simulator Integration using Python
Copyright 2020 Microsoft
Usage:
For registering simulator with the Bonsai service for training:
python __main__.py \
--workspace <workspace_id> \
--accesskey="<access_key> \
Then connect your registered simulator to a Brain via UI
Alternatively, one can set the SIM_ACCESS_KEY and SIM_WORKSPACE as
environment variables.
"""
import json
import time
from typing import Dict, Any, Optional
from microsoft_bonsai_api.simulator.client import BonsaiClientConfig, BonsaiClient
from microsoft_bonsai_api.simulator.generated.models import (
SimulatorState,
SimulatorInterface,
)
import argparse
from sim.qube_simulator import QubeSimulator
class TemplateSimulatorSession():
def __init__(self, render):
## Initialize python api for simulator
self.simulator = QubeSimulator()
self.render = render
def get_state(self) -> Dict[str, Any]:
"""Called to retreive the current state of the simulator. """
return {
## Add simulator state as dictionary
"theta": float(self.simulator.state[0]),
"alpha": float(self.simulator.state[1]),
"theta_dot": float(self.simulator.state[2]),
"alpha_dot": float(self.simulator.state[3])
}
def episode_start(self, config: Dict[str, Any]):
""" Called at the start of each episode """
## Add simulator reset api here using config from desired lesson in inkling
self.simulator.reset(config)
def episode_step(self, action: Dict[str, Any]):
""" Called for each step of the episode """
## Add simulator step api here using action from Bonsai platform
self.simulator.step(action['Vm'])
if self.render:
self.simulator.view()
def halted(self) -> bool:
"""
Should return True if the simulator cannot continue for some reason
"""
return (
False
)
def main(render = False):
# Grab standardized way to interact with sim API
sim = TemplateSimulatorSession(render= render)
# Configure client to interact with Bonsai service
config_client = BonsaiClientConfig()
client = BonsaiClient(config_client)
# Load json file as simulator integration config type file
with open('interface.json') as file:
interface = json.load(file)
# Create simulator session and init sequence id
registration_info = SimulatorInterface(
name=interface['name'],
timeout=interface['timeout'],
simulator_context=config_client.simulator_context,
)
registered_session = client.session.create(
workspace_name=config_client.workspace,
body=registration_info
)
print("Registered simulator.")
sequence_id = 1
try:
while True:
# Advance by the new state depending on the event type
sim_state = SimulatorState(
sequence_id=sequence_id, state=sim.get_state(),
halted=sim.halted()
)
event = client.session.advance(
workspace_name=config_client.workspace,
session_id=registered_session.session_id,
body=sim_state
)
sequence_id = event.sequence_id
print("[{}] Last Event: {}".format(time.strftime('%H:%M:%S'),
event.type))
# Event loop
if event.type == 'Idle':
time.sleep(event.idle.callback_time)
print('Idling...')
elif event.type == 'EpisodeStart':
sim.episode_start(event.episode_start.config)
elif event.type == 'EpisodeStep':
sim.episode_step(event.episode_step.action)
elif event.type == 'EpisodeFinish':
print('Episode Finishing...')
elif event.type == 'Unregister':
client.session.delete(
workspace_name=config_client.workspace,
session_id=registered_session.session_id
)
print("Unregistered simulator.")
else:
pass
except KeyboardInterrupt:
# Gracefully unregister with keyboard interrupt
client.session.delete(
workspace_name=config_client.workspace,
session_id=registered_session.session_id
)
print("Unregistered simulator.")
except Exception as err:
# Gracefully unregister for any other exceptions
client.session.delete(
workspace_name=config_client.workspace,
session_id=registered_session.session_id
)
print("Unregistered simulator because: {}".format(err))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='args for sim integration',
allow_abbrev=False)
parser.add_argument('--render', action='store_true')
args, _ = parser.parse_known_args()
main(render=args.render) |
py | 1a52946730a62054ca354720c66657a5ba7fad24 | #
# Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from MDSplus import TdiCompile, TreeNode
import os
import sys
import numpy
example = '/image/%s/-1?expr=ADD(ZERO([100,100],0WU),2000WU)&bit=12' % os.environ.get(
'EXPT', 'main')
def doImage(self):
if len(self.path_parts) > 2:
tree = self.openTree(self.path_parts[1], self.path_parts[2])
_tdi = tree.tdiCompile
else:
_tdi = TdiCompile
expr = self.args['expr'][-1]
obj = _tdi(expr)
idx = int(self.args['idx'][-1]) if 'idx' in self.args else 0
if isinstance(obj, TreeNode) and obj.getNumSegments() > 0:
d = obj.getSegment(idx)
isseg = True
else:
d = obj.evaluate()
isseg = False
try:
im = d.getImage()
except:
from PIL import Image
import io
raw = d.data()
if 'bit' in self.args:
bit = 8-int(self.args['bit'][-1])
if bit != 0:
if raw.itemsize == 1:
raw = raw.astype('uint16')
if bit > 0:
raw = (((raw+1) << (bit))-1).astype('uint8')
elif bit < 0:
raw = (((raw-1) >> (-bit))+1).astype('uint8')
else:
raw.astype("uint8")
if raw.ndim > 2 and ((not isseg) or raw.shape[0]):
raw = raw[0] if isseg else raw[idx]
if raw.ndim == 2:
img = Image.new("L", raw.T.shape, "gray")
elif raw.ndim == 3:
img = Image.new("RGB", raw.T.shape[:2])
raw = numpy.rollaxis(raw, 0, 3)
else:
raise
fmt = self.args['format'][-1].lower() if 'format' in self.args else 'jpeg'
img.frombytes(raw.tostring())
stream = io.BytesIO()
img.save(stream, format=fmt.upper())
return ('200 OK', [('Content-type', 'image/%s' % fmt)], stream.getvalue())
else:
if im.format == "MPEG":
response_headers = [('Content-type', 'video/mpeg'),
('Content-Disposition', 'inline; filename="%s.mpeg"' % (expr,))]
else: # covers gif, jpeg, and png
fmt = im.format.lower()
response_headers = [('Content-type', 'image/%s' % fmt),
('Content-Disposition', 'inline; filename="%s.%s"' % (expr, fmt))]
output = str(d.data().data)
status = '200 OK'
return (status, response_headers, output)
|
py | 1a529468d9247ab357836d0766bf03e83c2b1b0e | ############################################################################
#
# Copyright (c) Mamba Developers. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
############################################################################
""" Mamba generic utility functions """
import os
import re
import inspect
from typing import List, Iterator, Dict, Callable, Any
from types import ModuleType
from importlib import import_module
from pkgutil import iter_modules
from shutil import ignore_patterns, copy2, copystat
from mamba.core.context import Context
from mamba.core.exceptions import ComposeFileException
def get_properties_dict(configuration: Dict[str, dict]) -> Dict[str, Any]:
"""Return a dictionary of properties with default values composed from
a configuration file.
Args:
configuration: The path string formatted in windows or linux style.
Returns:
The dictionary of properties.
"""
if 'device' in configuration and 'properties' in \
configuration['device']:
properties_dict = {
key: value.get('default')
for key, value in configuration['device']['properties'].items()
}
else:
properties_dict = {}
return properties_dict
def path_from_string(path_str: str) -> str:
"""Return a valid path from a given path string, formatted with windows
or linux slashes.
Args:
path_str: The path string formatted in windows or linux style.
Returns:
The valid path string.
"""
path = os.path.join(*re.split(r' |/|\\', path_str))
if path_str[0] == '/': # Fix for absolute path
path = '/' + path
return path
def get_classes_from_module(module: str,
search_class: type) -> Dict[str, Callable]:
"""Return a dictionary with all classes 'search_class' defined in the
given module that can be instantiated.
"""
classes_dict: Dict[str, Callable] = {}
for cls in _iter_classes(module, search_class):
cls_name = cls.__module__.split('.')[-1]
classes_dict[cls_name] = cls
return classes_dict
def get_components(used_components: Dict[str, dict], modules: List[str],
component_type: type,
context: Context) -> Dict[str, object]:
"""Returns a dictionary of instantiated component with context.
Args:
used_components: The dictionary of used component.
modules: The folders where to look for the component.
component_type: The class type of the component.
context: The application context to instantiate
the component with.
Returns:
The instantiated dictionary of component.
Raises:
ComposeFileException: If a given component id is not found.
"""
all_components_by_type: Dict[str, Callable] = {}
for module in modules:
components_in_module = get_classes_from_module(module, component_type)
for key, value in components_in_module.items():
if key not in all_components_by_type:
all_components_by_type[key] = value
dict_used_components = {}
for component_name, args in used_components.items():
if args is None or 'component' not in args:
raise ComposeFileException(
f"'{component_name}: missing component property")
if args['component'] in all_components_by_type:
args['name'] = component_name
dict_used_components[component_name] = all_components_by_type[
args['component']](context, args)
else:
raise ComposeFileException(
f"{component_name}: component {args['component']}' is not a "
f"valid component identifier")
return dict_used_components
def merge_dicts(dict_1, dict_2):
"""
Merge dictionary dict_2 into dict_1. In case of conflict dict_1
has precedence
"""
if dict_1 is None:
return dict_2
if dict_2 is None:
return dict_1
result = dict_1
for key in dict_2:
if key in dict_1:
if isinstance(dict_1[key], dict) and isinstance(dict_2[key], dict):
merge_dicts(dict_1[key], dict_2[key])
else:
result[key] = dict_2[key]
return result
def copytree(src, dst, ignore_pattern=ignore_patterns('*.pyc', '.svn')):
"""
Since the original function always creates the directory, to resolve
the issue a new function had to be created. It's a simple copy and
was reduced for this case.
"""
ignore = ignore_pattern
names = os.listdir(src)
ignored_names = ignore(src, names)
if not os.path.exists(dst):
os.makedirs(dst)
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
if os.path.isdir(srcname):
copytree(srcname, dstname)
else:
copy2(srcname, dstname)
copystat(src, dst)
def _walk_modules(path: str) -> List[ModuleType]:
"""Loads a module and all its submodules from the given module path and
returns them. If *any* module throws an exception while importing, that
exception is thrown back.
For example: walk_modules('mamba.mock')
"""
mods = []
mod = import_module(path)
mods.append(mod)
# Any module that contains a __path__ attribute is considered a package.
if hasattr(mod, '__path__'):
for _, subpath, ispkg in iter_modules(getattr(mod, '__path__')):
fullpath = path + '.' + subpath
if ispkg:
mods += _walk_modules(fullpath)
else:
submod = import_module(fullpath)
mods.append(submod)
return mods
def _iter_classes(module_name: str, search_class: type) -> Iterator[Callable]:
"""Return an iterator over all classes 'search_class' defined in the given
module that can be instantiated.
"""
for module in _walk_modules(module_name):
for obj in vars(module).values():
if inspect.isclass(obj) and \
issubclass(obj, search_class) and \
obj.__module__ == module.__name__ and \
not obj == search_class:
yield obj
|
py | 1a5294cb6137a1b8401085ebe544b2d95febd2cd | class main():
#Redirector
def redirector(c):
output = f""""""
return output
#C2
def c2(c):
scripts = ', '.join('"../../redbaron/data/scripts/tools/{0}.sh"'.format(s) for s in c["tools"])
if c["redirectors"] > 0:
output = f""""""
else:
output = f""""""
return output
#WebServer
def webserver(c):
if c["redirectors"] > 0:
output = f""""""
else:
output = f""""""
return output
#Gophish:
def gophish(c):
if c["redirectors"] > 0:
output = f""""""
else:
output = f""""""
return output
#Mail
def mail(c,my_nets_1,my_nets_2):
output=f""""""
#Dns Records
def dns_records_type(c,record):
output=f""""""
return output
|
py | 1a52958bd6f9bb9cbde6dba1faeb61c1597efcd4 | import numpy as np
import copy
from ..field import Field
class SpectralNoiseFactory(object):
def __init__(self, psd, output_grid, psd_args=(), psd_kwargs={}):
self.psd = psd
self.psd_args = psd_args
self.psd_kwargs = psd_kwargs
self.output_grid = output_grid
def make_random(self):
raise NotImplementedError()
def make_zero(self):
raise NotImplementedError()
class SpectralNoise(object):
def copy(self):
return copy.deepcopy(self)
def shift(self, shift):
raise NotImplementedError()
def shifted(self, shift):
a = self.copy()
a.shift(shift)
return a
def __iadd__(self, b):
return NotImplemented
def __add__(self, b):
a = self.copy()
a += b
return a
def __imul__(self, f):
return NotImplemented
def __mul__(self, f):
a = self.copy()
a *= f
return a
def __call__(self):
raise NotImplementedError()
def evaluate(self):
return self()
class SpectralNoiseFactoryFFT(SpectralNoiseFactory):
def __init__(self, psd, output_grid, oversample=1, psd_args=(), psd_kwargs={}):
from ..fourier import FastFourierTransform
SpectralNoiseFactory.__init__(self, psd, output_grid, psd_args, psd_kwargs)
if not self.output_grid.is_regular:
raise ValueError("Can't make a SpectralNoiseFactoryFFT on a non-regular grid.")
self.fourier = FastFourierTransform(self.output_grid, q=oversample)
self.input_grid = self.fourier.output_grid
self.period = output_grid.coords.delta * output_grid.coords.shape
# * (2*np.pi)**self.input_grid.ndim is due to conversion from PSD from "per Hertz" to "per radian", which yields a factor of 2pi per dimension
self.C = np.sqrt(self.psd(self.input_grid, *psd_args, **psd_kwargs) / self.input_grid.weights * (2*np.pi)**self.input_grid.ndim)
def make_random(self):
N = self.input_grid.size
C = self.C * (np.random.randn(N) + 1j * np.random.randn(N))
C = Field(C, self.input_grid)
return SpectralNoiseFFT(self, C)
def make_zero(self):
C = Field(np.zeros(self.input_grid.size, dtype='complex'), self.input_grid)
return SpectralNoiseFFT(self, C)
class SpectralNoiseFFT(SpectralNoise):
def __init__(self, factory, C):
self.factory = factory
self.C = C
self.coords = C.grid.separated_coords
def shift(self, shift):
s = shift#np.mod(shift + self.factory.period / 2, self.factory.period) - self.factory.period / 2
S = [s[i] * self.coords[i] for i in range(len(self.coords))]
S = np.add.reduce(np.ix_(*S))
self.C *= np.exp(-1j * S.ravel())
def __iadd__(self, b):
self.C += b.C
return self
def __imul__(self, f):
self.C *= f
return self
def __call__(self):
return self.factory.fourier.backward(self.C).real
class SpectralNoiseFactoryMultiscale(SpectralNoiseFactory):
def __init__(self, psd, output_grid, oversampling, psd_args=(), psd_kwargs={}):
from ..fourier import FastFourierTransform, MatrixFourierTransform
SpectralNoiseFactory.__init__(self, psd, output_grid, psd_args, psd_kwargs)
self.oversampling = oversampling
self.fourier_1 = FastFourierTransform(self.output_grid)
self.input_grid_1 = self.fourier_1.output_grid
self.input_grid_2 = self.input_grid_1.scaled(1.0 / oversampling)
self.fourier_2 = MatrixFourierTransform(self.output_grid, self.input_grid_2)
boundary = np.abs(self.input_grid_2.x).max()
mask_1 = self.input_grid_1.as_('polar').r < boundary
mask_2 = self.input_grid_2.as_('polar').r >= boundary
# * (2*np.pi)**self.input_grid.ndim is due to conversion from PSD from "per Hertz" to "per radian", which yields a factor of 2pi per dimension
self.C_1 = np.sqrt(psd(self.input_grid_1, *psd_args, **psd_kwargs) / self.input_grid_1.weights * (2*np.pi)**self.input_grid_1.ndim)
self.C_1[mask_1] = 0
self.C_2 = np.sqrt(psd(self.input_grid_2, *psd_args, **psd_kwargs) / self.input_grid_2.weights * (2*np.pi)**self.input_grid_1.ndim)
self.C_2[mask_2] = 0
def make_random(self):
N_1 = self.input_grid_1.size
N_2 = self.input_grid_2.size
C_1 = self.C_1 * (np.random.randn(N_1) + 1j * np.random.randn(N_1))
C_2 = self.C_2 * (np.random.randn(N_2) + 1j * np.random.randn(N_2))
return SpectralNoiseMultiscale(self, C_1, C_2)
def make_zero(self):
N_1 = self.input_grid_1.size
N_2 = self.input_grid_2.size
C_1 = Field(np.zeros(N_1, dtype='complex'), self.C_1.grid)
C_2 = Field(np.zeros(N_1, dtype='complex'), self.C_2.grid)
return SpectralNoiseMultiscale(self, C_1, C_2)
class SpectralNoiseMultiscale(SpectralNoise):
def __init__(self, factory, C_1, C_2):
self.factory = factory
self.C_1 = C_1
self.C_2 = C_2
self.coords_1 = C_1.grid.separated_coords
self.coords_2 = C_2.grid.separated_coords
def shift(self, shift):
S_1 = [shift[i] * self.coords_1[i] for i in range(len(self.coords_1))]
S_1 = np.add.reduce(np.ix_(*S_1))
S_2 = [shift[i] * self.coords_2[i] for i in range(len(self.coords_2))]
S_2 = np.add.reduce(np.ix_(*S_2))
self.C_1 *= np.exp(-1j * S_1.ravel())
self.C_2 *= np.exp(-1j * S_2.ravel())
def __iadd__(self, b):
self.C_1 += b.C_1
self.C_2 += b.C_2
return self
def __imul__(self, f):
self.C_1 *= f
self.C_2 *= f
return self
def __call__(self):
ps = self.factory.fourier_1.backward(self.C_1).real
ps += self.factory.fourier_2.backward(self.C_2).real
return ps |
py | 1a5295d0ed2d849bb0ee80936c4c861aca956b48 | # Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from oslo_config import cfg
import webob
from nova.api.openstack.compute import block_device_mapping \
as block_device_mapping_v21
from nova.api.openstack.compute import extension_info
from nova.api.openstack.compute.legacy_v2 import servers as servers_v20
from nova.api.openstack.compute import multiple_create as multiple_create_v21
from nova.api.openstack.compute import servers as servers_v21
from nova.api.openstack import extensions as extensions_v20
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import db
from nova import exception
from nova.network import manager
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.image import fake
CONF = cfg.CONF
FAKE_UUID = fakes.FAKE_UUID
def fake_gen_uuid():
return FAKE_UUID
def return_security_group(context, instance_id, security_group_id):
pass
class MultiCreateExtensionTestV21(test.TestCase):
validation_error = exception.ValidationError
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(MultiCreateExtensionTestV21, self).setUp()
self.flags(verbose=True,
enable_instance_password=True)
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
ext_info = extension_info.LoadedExtensionInfo()
self.controller = servers_v21.ServersController(
extension_info=ext_info)
CONF.set_override('extensions_blacklist', 'os-multiple-create',
'osapi_v21')
self.no_mult_create_controller = servers_v21.ServersController(
extension_info=ext_info)
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"security_groups": inst['security_groups'],
})
self.instance_cache_by_id[instance['id']] = instance
self.instance_cache_by_uuid[instance['uuid']] = instance
return instance
def instance_get(context, instance_id):
"""Stub for compute/api create() pulling in instance after
scheduling
"""
return self.instance_cache_by_id[instance_id]
def instance_update(context, uuid, values):
instance = self.instance_cache_by_uuid[uuid]
instance.update(values)
return instance
def server_update(context, instance_uuid, params,
columns_to_join=None):
inst = self.instance_cache_by_uuid[instance_uuid]
inst.update(params)
return (inst, inst)
def fake_method(*args, **kwargs):
pass
def project_get_networks(context, user_id):
return dict(id='1', host='localhost')
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
self.stubs.Set(db, 'instance_add_security_group',
return_security_group)
self.stubs.Set(db, 'project_get_networks',
project_get_networks)
self.stubs.Set(db, 'instance_create', instance_create)
self.stubs.Set(db, 'instance_system_metadata_update',
fake_method)
self.stubs.Set(db, 'instance_get', instance_get)
self.stubs.Set(db, 'instance_update', instance_update)
self.stubs.Set(db, 'instance_update_and_get_original',
server_update)
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
fake_method)
self.req = fakes.HTTPRequest.blank('')
def _test_create_extra(self, params, no_image=False,
override_controller=None):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
if no_image:
server.pop('imageRef', None)
server.update(params)
body = dict(server=server)
if override_controller:
server = override_controller.create(self.req,
body=body).obj['server']
else:
server = self.controller.create(self.req,
body=body).obj['server']
def _check_multiple_create_extension_disabled(self, **kwargs):
# NOTE: on v2.1 API, "create a server" API doesn't add the following
# attributes into kwargs when non-loading multiple_create extension.
# However, v2.0 API adds them as values "1" instead. So we need to
# define checking methods for each API here.
self.assertNotIn('min_count', kwargs)
self.assertNotIn('max_count', kwargs)
def test_create_instance_with_multiple_create_disabled(self):
min_count = 2
max_count = 3
params = {
multiple_create_v21.MIN_ATTRIBUTE_NAME: min_count,
multiple_create_v21.MAX_ATTRIBUTE_NAME: max_count,
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self._check_multiple_create_extension_disabled(**kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(
params,
override_controller=self.no_mult_create_controller)
def test_multiple_create_with_string_type_min_and_max(self):
min_count = '2'
max_count = '3'
params = {
multiple_create_v21.MIN_ATTRIBUTE_NAME: min_count,
multiple_create_v21.MAX_ATTRIBUTE_NAME: max_count,
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertIsInstance(kwargs['min_count'], int)
self.assertIsInstance(kwargs['max_count'], int)
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(kwargs['max_count'], 3)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
def test_create_instance_with_multiple_create_enabled(self):
min_count = 2
max_count = 3
params = {
multiple_create_v21.MIN_ATTRIBUTE_NAME: min_count,
multiple_create_v21.MAX_ATTRIBUTE_NAME: max_count,
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(kwargs['max_count'], 3)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
def test_create_instance_invalid_negative_min(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: -1,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(self.validation_error,
self.controller.create,
self.req,
body=body)
def test_create_instance_invalid_negative_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MAX_ATTRIBUTE_NAME: -1,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(self.validation_error,
self.controller.create,
self.req,
body=body)
def test_create_instance_with_blank_min(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: '',
'name': 'server_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
}
}
self.assertRaises(self.validation_error,
self.controller.create,
self.req,
body=body)
def test_create_instance_with_blank_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MAX_ATTRIBUTE_NAME: '',
'name': 'server_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
}
}
self.assertRaises(self.validation_error,
self.controller.create,
self.req,
body=body)
def test_create_instance_invalid_min_greater_than_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: 4,
multiple_create_v21.MAX_ATTRIBUTE_NAME: 2,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req,
body=body)
def test_create_instance_invalid_alpha_min(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: 'abcd',
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(self.validation_error,
self.controller.create,
self.req,
body=body)
def test_create_instance_invalid_alpha_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MAX_ATTRIBUTE_NAME: 'abcd',
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(self.validation_error,
self.controller.create,
self.req,
body=body)
def test_create_multiple_instances(self):
"""Test creating multiple instances but not asking for
reservation_id
"""
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: 2,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
res = self.controller.create(self.req, body=body).obj
self.assertEqual(FAKE_UUID, res["server"]["id"])
self._check_admin_password_len(res["server"])
def test_create_multiple_instances_pass_disabled(self):
"""Test creating multiple instances but not asking for
reservation_id
"""
self.flags(enable_instance_password=False)
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: 2,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
res = self.controller.create(self.req, body=body).obj
self.assertEqual(FAKE_UUID, res["server"]["id"])
self._check_admin_password_missing(res["server"])
def _check_admin_password_len(self, server_dict):
"""utility function - check server_dict for admin_password length."""
self.assertEqual(CONF.password_length,
len(server_dict["adminPass"]))
def _check_admin_password_missing(self, server_dict):
"""utility function - check server_dict for admin_password absence."""
self.assertNotIn("admin_password", server_dict)
def _create_multiple_instances_resv_id_return(self, resv_id_return):
"""Test creating multiple instances with asking for
reservation_id
"""
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: 2,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
multiple_create_v21.RRID_ATTRIBUTE_NAME: resv_id_return
}
}
res = self.controller.create(self.req, body=body)
reservation_id = res.obj['reservation_id']
self.assertNotEqual(reservation_id, "")
self.assertIsNotNone(reservation_id)
self.assertTrue(len(reservation_id) > 1)
def test_create_multiple_instances_with_resv_id_return(self):
self._create_multiple_instances_resv_id_return(True)
def test_create_multiple_instances_with_string_resv_id_return(self):
self._create_multiple_instances_resv_id_return("True")
def test_create_multiple_instances_with_multiple_volume_bdm(self):
"""Test that a BadRequest is raised if multiple instances
are requested with a list of block device mappings for volumes.
"""
min_count = 2
bdm = [{'source_type': 'volume', 'uuid': 'vol-xxxx'},
{'source_type': 'volume', 'uuid': 'vol-yyyy'}
]
params = {
block_device_mapping_v21.ATTRIBUTE_NAME: bdm,
multiple_create_v21.MIN_ATTRIBUTE_NAME: min_count
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(len(kwargs['block_device_mapping']), 2)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
exc = self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params, no_image=True)
self.assertEqual("Cannot attach one or more volumes to multiple "
"instances", exc.explanation)
def test_create_multiple_instances_with_single_volume_bdm(self):
"""Test that a BadRequest is raised if multiple instances
are requested to boot from a single volume.
"""
min_count = 2
bdm = [{'source_type': 'volume', 'uuid': 'vol-xxxx'}]
params = {
block_device_mapping_v21.ATTRIBUTE_NAME: bdm,
multiple_create_v21.MIN_ATTRIBUTE_NAME: min_count
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(kwargs['block_device_mapping'][0]['volume_id'],
'vol-xxxx')
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
exc = self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params, no_image=True)
self.assertEqual("Cannot attach one or more volumes to multiple "
"instances", exc.explanation)
def test_create_multiple_instance_with_non_integer_max_count(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MAX_ATTRIBUTE_NAME: 2.5,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
def test_create_multiple_instance_with_non_integer_min_count(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: 2.5,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
class MultiCreateExtensionTestV2(MultiCreateExtensionTestV21):
validation_error = webob.exc.HTTPBadRequest
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(MultiCreateExtensionTestV2, self).setUp()
self.flags(verbose=True,
enable_instance_password=True)
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
fakes.stub_out_nw_api(self.stubs)
self.ext_mgr = extensions_v20.ExtensionManager()
self.ext_mgr.extensions = {
'os-volumes': 'fake',
'os-multiple-create': 'fake',
'os-block-device-mapping-v2-boot': 'fake'
}
self.controller = servers_v20.Controller(self.ext_mgr)
no_mult_ext_mgr = extensions_v20.ExtensionManager()
no_mult_ext_mgr.extensions = {
'os-volumes': 'fake',
'os-block-device-mapping-v2-boot': 'fake'
}
self.no_mult_create_controller = servers_v20.Controller(
no_mult_ext_mgr)
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"config_drive": None,
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"root_device_name": inst.get('root_device_name', 'vda'),
"security_groups": inst['security_groups'],
})
self.instance_cache_by_id[instance['id']] = instance
self.instance_cache_by_uuid[instance['uuid']] = instance
return instance
def instance_get(context, instance_id):
"""Stub for compute/api create() pulling in instance after
scheduling
"""
return self.instance_cache_by_id[instance_id]
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
self.stubs.Set(db, 'instance_create', instance_create)
self.stubs.Set(db, 'instance_get', instance_get)
def _check_multiple_create_extension_disabled(self, **kwargs):
self.assertEqual(kwargs['min_count'], 1)
self.assertEqual(kwargs['max_count'], 1)
|
py | 1a5296fdbe25de7d413cdcb1ca46ae64b2822e4a | import statistics
import time
import pytest
import streamz
from metrix import MElement, MStream, MSinkPrinter
from metrix import MCoordinator as MC
@pytest.fixture(scope="module")
def test_elements():
return [
{"name": "m1", "value": 1, "tags": None},
{"name": "m2", "value": 2, "tags": {"foo": "bar"}},
{"name": "m1", "value": 3, "tags": None},
{"name": "m2", "value": 1, "tags": {"bat": "baz"}},
{"name": "m2", "value": 2, "tags": {"foo": "bar"}},
{"name": "m1", "value": 3, "tags": None},
{"name": "m2", "value": 1, "tags": {"foo": "bar"}},
{"name": "m1", "value": 1, "tags": {"foo": "bar"}},
{"name": "m1", "value": 3, "tags": {"bat": "baz"}},
{"name": "m2", "value": 2, "tags": None},
]
class MSinkToList:
"""Ad-hoc metric sink -- useful for testing, but not production."""
def __init__(self):
self.data = []
def __call__(self, me: MElement):
self.data.append(me)
@pytest.mark.parametrize(
"mstreams,msinks,rate_limit",
[
(
[MStream("m1", agg=sum, batch_size=1)],
[MSinkPrinter()],
1.0,
),
(
[MStream("m1", agg=sum, batch_size=1)],
[MSinkPrinter()],
None,
),
(
[
MStream("m1", agg=sum, batch_size=1),
MStream("m2", agg=[min, max], batch_size=1)
],
[MSinkPrinter(), MSinkPrinter()],
[1.0, 0.5],
),
(None, None, None),
]
)
def test_metric_coordinator_init(mstreams, msinks, rate_limit):
mc = MC(mstreams=mstreams, msinks=msinks, rate_limit=rate_limit)
assert str(mc)
assert all(hasattr(mc, attr) for attr in ["stream", "metric_mstreams", "msinks"])
if mstreams:
assert (
len(mc.metric_mstreams) == len(mc.stream.upstreams) == len(mstreams) and
sorted(mc.metric_mstreams.keys()) == sorted(mstream.name for mstream in mstreams)
)
if msinks:
assert len(mc.msinks) == len(mc.stream.downstreams) == len(msinks)
if rate_limit:
assert all(isinstance(ds, streamz.core.rate_limit) for ds in mc.stream.downstreams)
if isinstance(rate_limit, list):
assert all(ds.interval == rl for ds, rl in zip(mc.stream.downstreams, rate_limit))
else:
assert all(ds.interval == rate_limit for ds in mc.stream.downstreams)
else:
assert all(isinstance(ds, streamz.core.buffer) for ds in mc.stream.downstreams)
@pytest.mark.parametrize(
"mstreams,msinks,rate_limit",
[
(
[MStream("m1", agg=sum, batch_size=1)],
[MSinkPrinter()],
[1.0, 1.0],
),
(
[MStream("m1", agg=sum, batch_size=1)],
[MSinkPrinter()],
"10s",
),
]
)
def test_metric_coordinator_bad_init(mstreams, msinks, rate_limit):
with pytest.raises((ValueError, TypeError)):
_ = MC(mstreams=mstreams, msinks=msinks, rate_limit=rate_limit)
@pytest.mark.parametrize(
"mstreams",
[
[MStream("m1", agg=sum, batch_size=1)],
[
MStream("m1", agg=sum, batch_size=1),
MStream("m2", agg=[min, max], batch_size=1)
],
]
)
def test_metric_coordinator_add_mstream(mstreams):
mc = MC()
for mstream in mstreams:
mc.add_mstream(mstream)
assert (
len(mc.metric_mstreams) == len(mc.stream.upstreams) == len(mstreams) and
sorted(mc.metric_mstreams.keys()) == sorted(mstream.name for mstream in mstreams)
)
@pytest.mark.parametrize(
"msinks,rate_limits",
[
([MSinkPrinter()], [1.0]),
([MSinkPrinter(), MSinkPrinter()], [1.0, 0.5]),
([MSinkPrinter()], [None]),
]
)
def test_metric_coordinator_add_msink(msinks,rate_limits):
mc = MC()
for sink, rate_limit in zip(msinks, rate_limits):
mc.add_msink(sink, rate_limit)
assert len(mc.msinks) == len(mc.stream.downstreams) == len(msinks)
@pytest.mark.slow
@pytest.mark.parametrize(
"init_kwargs,exp_results",
[
(
{
"mstreams": [
MStream("m1", agg=sum, batch_size=5),
MStream("m2", agg=statistics.mean, batch_size=5)
],
"msinks": [MSinkToList(), MSinkToList()]
},
[
MElement(name="m1.sum", value=7, tags=None),
MElement(name="m1.sum", value=1, tags={"foo": "bar"}),
MElement(name="m1.sum", value=3, tags={"bat": "baz"}),
MElement(name="m2.mean", value=1.6666666666666667, tags={"foo": "bar"}),
MElement(name="m2.mean", value=1, tags={"bat": "baz"}),
MElement(name="m2.mean", value=2, tags=None),
],
),
(
{
"mstreams": [
MStream("m1", agg=sum, batch_size=1),
MStream("m2", agg=statistics.mean, batch_size=1)
],
"msinks": [MSinkToList(), MSinkToList()]
},
[
MElement(name="m1.sum", value=1, tags=None),
MElement(name="m2.mean", value=2, tags={"foo": "bar"}),
MElement(name="m1.sum", value=3, tags=None),
MElement(name="m2.mean", value=1, tags={"bat": "baz"}),
MElement(name="m2.mean", value=2, tags={"foo": "bar"}),
MElement(name="m1.sum", value=3, tags=None),
MElement(name="m2.mean", value=1, tags={"foo": "bar"}),
MElement(name="m1.sum", value=1, tags={"foo": "bar"}),
MElement(name="m1.sum", value=3, tags={"bat": "baz"}),
MElement(name="m2.mean", value=2, tags=None),
],
),
]
)
def test_metric_stream_send(init_kwargs, exp_results, test_elements):
mc = MC(**init_kwargs)
for te in test_elements:
mc.send(**te)
time.sleep(0.01)
time.sleep(0.2)
assert all(msink.data == exp_results for msink in mc.msinks)
|
py | 1a529748ecbeff7ccae1f1070281e76e839765d2 | # coding=utf-8
class Plugin(object):
"""
@type cfg1: modules.Configuration
@type api1: modules.Poloniex.Poloniex
@type log1: modules.Logger.Logger
"""
def __init__(self, cfg1, api1, log1, notify_config1):
self.api = api1
self.config = cfg1
self.notify_config = notify_config1
self.log = log1
self.all_currencies = self.config.get_all_currencies()
# override this to run plugin init code
def on_bot_init(self):
self.log.log(self.__class__.__name__ + ' plugin initializing...')
# override this to run plugin loop code before lending
def before_lending(self):
pass
# override this to run plugin loop code after lending
def after_lending(self):
pass
# override this to run plugin stop code
# since the bot can be killed, there is not guarantee this will be called.
def on_bot_stop(self):
pass
|
py | 1a5299061ff514b40889a092a2aee6209f30ff12 | from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Type, Union
from vkbottle import ABCView, BaseReturnManager
from vkbottle.dispatch.handlers import FromFuncHandler
from vkbottle.framework.bot import BotLabeler
from vkbottle.modules import logger
from vkbottle_types.events import MessageEvent as _MessageEvent
from vkbottle_callback.rules import *
from vkbottle_callback.types import MessageEvent
if TYPE_CHECKING:
from vkbottle import ABCAPI, ABCStateDispenser
from vkbottle.dispatch.rules import ABCRule
from vkbottle.dispatch.views import ABCView
from vkbottle.dispatch.views.bot import ABCBotMessageView, RawBotEventView
from vkbottle.framework.bot.labeler.abc import LabeledMessageHandler
class MessageEventReturnHandler(BaseReturnManager):
@BaseReturnManager.instance_of(str)
async def str_handler(self, value: str, event: MessageEvent, _: dict):
await event.show_snackbar(value)
def message_event_min(event: dict, ctx_api: "ABCAPI") -> "MessageEvent":
update = _MessageEvent(**event)
message_event = MessageEvent(
**update.object.dict(),
group_id=update.group_id,
)
setattr(message_event, "unprepared_ctx_api", ctx_api)
return message_event
class MessageEventView(ABCView):
def __init__(self):
super().__init__()
self.handler_return_manager = MessageEventReturnHandler()
async def process_event(self, event: dict) -> bool:
return event["type"] == "message_event"
async def handle_event(
self, event: dict, ctx_api: "ABCAPI", state_dispenser: "ABCStateDispenser"
) -> None:
logger.debug("Handling event ({}) with message_event view".format(event.get("event_id")))
context_variables: dict = {}
message_event = message_event_min(event, ctx_api)
message_event.state_peer = await state_dispenser.cast(message_event.peer_id)
mw_instances = await self.pre_middleware(message_event, context_variables) # type: ignore
if mw_instances is None:
logger.info("Handling stopped, pre_middleware returned error")
return
handle_responses = []
handlers = []
for handler in self.handlers:
result = await handler.filter(message_event) # type: ignore
logger.debug("Handler {} returned {}".format(handler, result))
if result is False:
continue
elif isinstance(result, dict):
context_variables.update(result)
handler_response = await handler.handle(message_event, **context_variables) # type: ignore
handle_responses.append(handler_response)
handlers.append(handler)
return_handler = self.handler_return_manager.get_handler(handler_response)
if return_handler is not None:
await return_handler(
self.handler_return_manager, handler_response, message_event, context_variables
)
if handler.blocking:
break
await self.post_middleware(mw_instances, handle_responses, handlers)
LabeledMessageEventHandler = Callable[..., Callable[[MessageEvent], Any]]
DEFAULT_CUSTOM_RULES: Dict[str, Type[ABCMessageEventRule]] = {
"from_chat": PeerRule,
"peer_ids": FromPeerRule,
"payload": PayloadRule,
"payload_contains": PayloadContainsRule,
"payload_map": PayloadMapRule,
"func": FuncRule,
"coro": CoroutineRule,
"coroutine": CoroutineRule,
"state": StateRule
}
class MessageEventLabeler(BotLabeler):
def __init__(
self,
message_view: Optional["ABCBotMessageView"] = None,
raw_event_view: Optional["RawBotEventView"] = None,
custom_rules: Optional[Dict[str, Type["ABCRule"]]] = None,
auto_rules: Optional[List["ABCRule"]] = None,
message_event_view: Optional["MessageEventView"] = None
):
super().__init__(message_view, raw_event_view, custom_rules, auto_rules)
self.custom_rules = custom_rules or DEFAULT_CUSTOM_RULES
self.message_event_view = message_event_view or MessageEventView()
def message_event(
self, *rules: "ABCRule", blocking: bool = True, **custom_rules
) -> "LabeledMessageHandler":
def decorator(func):
self.message_event_view.handlers.append(
FromFuncHandler(
func,
*rules,
*self.auto_rules,
*self.get_custom_rules(custom_rules),
blocking=blocking,
)
)
return func
return decorator
def load(self, labeler: Union[BotLabeler, "MessageEventLabeler"]):
if type(labeler) is MessageEventLabeler:
self.message_event_view.handlers.extend(labeler.message_event_view.handlers)
self.message_event_view.middlewares.update(labeler.message_event_view.middlewares)
self.message_view.handlers.extend(labeler.message_view.handlers)
self.message_view.middlewares.update(labeler.message_view.middlewares)
for event, handler_basements in labeler.raw_event_view.handlers.items():
event_handlers = self.raw_event_view.handlers.get(event)
if event_handlers:
event_handlers.extend(handler_basements)
else:
self.raw_event_view.handlers[event] = handler_basements
self.raw_event_view.middlewares.update(labeler.raw_event_view.middlewares)
def views(self) -> Dict[str, "ABCView"]:
return {
"message": self.message_view,
"message_event": self.message_event_view,
"raw": self.raw_event_view
}
__all__ = (
"MessageEventView",
"MessageEventLabeler"
)
|
py | 1a529abc1173caa0f97f01a11c9e09dcceef697f | import os
import sys
import pandas as pd
import numpy as np
from sklearn.datasets import make_classification
from keras import backend as K
from keras import initializers, layers
from keras.utils import to_categorical
from keras.constraints import non_neg, max_norm
from keras.initializers import Zeros
from keras.constraints import Constraint
import tensorflow as tf
from decision_tree import *
def split_pred(df, label):
return df[[x for x in df.columns if x != label]], df[label]
# add sys.args
if len(sys.argv) == 1:
ntree=5
last_only=True
else:
_, ntree, last_only = sys.argv
last_only = last_only == "1"
ntree = int(ntree)
depth = 5
dim_size = 16
num_class=26
path = "clean_data"
train_adult = pd.read_csv(path+'/letter_train_scale.csv')
test_adult = pd.read_csv(path+'/letter_test_scale.csv')
x, y = split_pred(train_adult, "lettr")
x_test, y_test = split_pred(test_adult, "lettr")
y = to_categorical(y)
y_test = to_categorical(y_test)
save_dir = os.path.join(os.getcwd(), 'saved_models')
save_dir = "letter_benchmark"
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
tree = Tree() # this keeps the state of the current decision tree...
input_dim = dim_size
nepochs = 200
class TimingCallback(Callback):
def on_train_begin(self, logs={}):
self.times = []
def on_epoch_begin(self, batch, logs={}):
self.epoch_time_start = time.time()
def on_epoch_end(self, batch, logs={}):
# write stuff to disc here...
self.times.append(time.time() - self.epoch_time_start)
def gen_states(tree, tree_list=[0], target_idx=None, return_tree_list=False):
def size_0(dct):
for key, val in dct.items():
if len(val) > 0:
return False
return True
tree_index = max(tree_list)
if target_idx is None:
curr_list = [tree_index+1, tree_index+2, tree_index+3]
else:
curr_list = [tree_index+1, target_idx, tree_index+2]
tree_list.extend(curr_list)
d0, s0 = tree.prune()
d1 = tree.tree.copy()
d2, s2 = tree.graft()
if size_0(d0):
# reset
d0 = Tree().tree.copy()
state_info = {'prune': (d0, curr_list[0]),
'base': (d1, curr_list[1]),
'graft': (d2, curr_list[2]),
'state': {
'prune': s0, 'graft': s2
}}
if return_tree_list:
return state_info, tree_list, curr_list
else:
return state_info
# In[6]:
def outputshape(input_shape):
return [(input_shape[0], input_shape[1]) for _ in range(input_shape[2])]
def normalise_pred(x):
x = tf.stack(x)
x = tf.transpose(x, [1, 0, 2])
return x
def normalise_pred_shape(input_shape):
shape = list(input_shape[0])
num_trees = len(input_shape)
return tuple([shape[0], num_trees, shape[1]])
shape = list(input_shape[0])
return tuple([shape[0], 2])
# In[7]:
def softmax_tau(proba, tau=0.1):
"""
This is a softmax which goes towards one-hot encoding overtime.
We want to decay tau from 1.0 to 0.1 roughly
"""
from scipy.special import logit, expit
out = expit(logit(proba)/tau)
return out/np.sum(out)
def get_layer_weights(model, name='hwy', sample=False, tau=1.0):
out = K.eval([x for x in model.layers if x.name == name][0].weights[0]).flatten()
return normalise_weights(out, sample, tau)
def normalise_weights(out, sample=False, tau=1.0):
out = np.abs(out)
out = out/np.sum(out)
if sample and tau >= 1.0:
draw = np.random.choice(range(out.shape[0]), 1, p=out)
return draw[0]
elif sample:
draw = np.random.choice(range(out.shape[0]), 1, p=softmax_tau(out, tau))
return draw[0]
elif tau >= 1.0:
return out
else:
return softmax_tau(out, tau)
# In[8]:
def calculate_routes(adj_list=None):
"""
Calculates routes given a provided adjancency list,
assume that root node is always 0.
Assume this is a binary tree as well...
Test cases:
{0:[1, 2], 1:[], 2:[]} --> [(0, 0), (1, 0),
(0, 0), (1, 1),
(0, 1), (2, 0),
(0, 1), (2, 1)]
{0:[1], 1:[2], 2:[]} --> [(0, 0), (1, 0), (2, 0),
(0, 0), (1, 0), (2, 1),
(0, 0), (1, 1),
(0, 1)]
calculate_routes({0:[1,2], 1:[], 2:[]})
calculate_routes({0:[1], 1:[2], 2:[]})
"""
if adj_list is None:
raise Exception("Adj_list cannot be none")
def get_next(path):
next_paths = adj_list[path[-1]]
if len(next_paths) > 0:
for p in next_paths:
get_next(path + [p])
else:
all_paths.append(path)
all_paths = []
get_next([0])
# convert paths to indices...
path_indx = []
for path in all_paths:
cur_path = []
for cur_node, nxt_node in zip(path, path[1:]+[None]):
# print(cur_node, nxt_node)
pos_dir = np.array(sorted(adj_list[cur_node]))
pos_idx = np.argwhere(pos_dir==nxt_node).flatten().tolist()
if len(pos_idx) > 0 and len(pos_dir) == 2: # i.e. has 2 children
cur_path.append((cur_node, pos_idx[0]))
elif len(pos_idx) > 0 and len(pos_dir) == 1: # i.e. has 1 child
path_indx.append(cur_path + [(cur_node, 1)]) # then it will have a leaf!
cur_path.append((cur_node, pos_idx[0]))
elif nxt_node is not None:
cur_path.append((cur_node, pos_dir.shape[0]))
else:
path_indx.append(cur_path + [(cur_node, 0)])
path_indx.append(cur_path + [(cur_node, 1)])
return path_indx
def build_tree(main_input, depth, tree_number=0, last_only=True):
"""
Builds a single decision tree, returns all the specs needed to preserve tree state...
"""
# main_input = Input(shape=(dim_size,), name='main_input')
tree_nodes = DecisionTreeNode(depth=depth, name=f'decision_tree{tree_number}')(main_input)
tree_route = DecisionTreeRouting(depth=depth, name=f'decision_route{tree_number}')([main_input, tree_nodes])
leaf_layers = layers.Lambda(lambda x: [tf.squeeze(y) for y in tf.split(x, [1 for _ in range(K.int_shape(x)[2])], axis=2)], output_shape=outputshape)(tree_route)
pred_layer_tree = [Dense(num_class, activation='softmax', name="t{}_tree_l{}".format(tree_number, idx))(x) for idx, x in enumerate(leaf_layers)]
stack_pred = layers.Lambda(normalise_pred, output_shape=normalise_pred_shape)(pred_layer_tree)
tree_d = DecisionPredRouting(depth=depth)([stack_pred, tree_nodes])
if last_only:
return [tree_d]
else:
return [tree_d], [tree_d]+pred_layer_tree
def normalise_pred2(x):
x = tf.stack(x)
x = tf.transpose(x, [1, 0, 2])
cl = K.sum(x, axis=1)
cl = cl/tf.norm(cl, ord=1, axis=1, keepdims=True)
return cl
def normalise_pred_shape2(input_shape):
shape = list(input_shape[0])
return tuple([shape[0], num_class])
main_input = Input(shape=(dim_size,), name='main_input')
tree = []
out_list = []
for idx in range(ntree):
if last_only:
tree.append(build_tree(main_input, depth, idx, last_only))
else:
t_, out = build_tree(main_input, depth, idx, last_only)
tree.append(t_)
out_list.extend(out)
stack_pred = layers.Lambda(normalise_pred2, output_shape=normalise_pred_shape2)([x[0] for x in tree])
if last_only:
outputs = [stack_pred]
else:
outputs = [stack_pred] + out_list
model = Model(inputs=main_input, outputs=outputs)
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
time_cb = TimingCallback()
print("Running model with {} layers".format(len(model.layers)))
hist = model.fit([x], [y for _ in range(len(outputs))],
validation_data=([x_test], [y_test for _ in range(len(outputs))]),
epochs=nepochs, verbose=2,
callbacks = [time_cb])
hist_df = pd.DataFrame(hist.history)
print(pd.DataFrame(hist.history).iloc[-1])
hist_df['times'] = time_cb.times[-hist_df.shape[0]:]
hist_df.to_csv('{}/benchmark_rf{}_lastonly{}_{}.csv'.format(save_dir, ntree, last_only, datetime.now().strftime('%Y-%m-%d_%H-%M-%S')), index=True)
|
py | 1a529af4ae12ebf1026b0ab0eaa0d3f0c01653ef | """
MonteCarlo rejection of Models
==========================
The created geological models with gempy were exported as SHEMAT-Suite input files. `SHEMAT-Suite <https://git.rwth-aachen.de/SHEMAT-Suite/SHEMAT-Suite-open>`_ [1] is a code for
solving coupled heat transport in porous media. It is written in fortran and uses a finite differences scheme in a hexahedral grid.
In this example, we will load a heat transport simulation from the base POC model we created in "Geological model creation and gravity simulation". We will demonstrate methods contained
in OpenWF for loading the result file, displaying the parameters it contains and how to visualize these parameters. Finally, we will calculate the conductive heat flow and plot it.
"""
#%%
# import libraries
import warnings
warnings.filterwarnings("ignore")
import h5py
import numpy as np
import pandas as pd
import os,sys
import glob
from scipy import stats
import random
import gempy as gp
from gempy.bayesian.fields import probability, information_entropy
plt.style.use(['seaborn-talk'])
sys.path.append('../models/20210319_MC_no_middle_filling/')
print(f"Run mit GemPy version {gp.__version__}")
# In[2]:
def c_rmse(predicted, target):
rm_sq_diff = np.sqrt((predicted.sub(target, axis=0)**2).mean())
return rm_sq_diff
def rejection(rmse, rnseed=np.random.seed(0), verbose=True):
rnseed
Ref = rmse[0]
accept = []
P = []
k = 0
for i in range(1,len(rmse)):
if rmse[i] < Ref:
Ref = rmse[i]
accept.append(i)
elif random.random() < np.exp(-(rmse[i] - Ref)/(u_g)):
P.append(np.exp(-(rmse[i] - Ref)/(u_g)))
Ref = rmse[i]
accept.append(i)
k +=1
if verbose==True:
print(f"{len(accept)} realizations were accepted.")
return accept, P
def fahrenheit_to_celsius(temp_fahrenheit, difference=False):
if not difference:
return (temp_fahrenheit - 32) * 5 / 9
else:
return temp_fahrenheit * 5 / 9
def extTui(datafile, dimension=3, direction='x'):
f = h5py.File(datafile,'r')
z, y, x = f['temp'].shape
if dimension==3:
temp = f['temp'][:,:,:]
uindex = f['uindex'][:,:,:]
elif dimension==2:
if direction=='x':
temp = f['temp'][:,:,x//2]
uindex = f['uindex'][:,:,x//2]
elif direction=='y':
temp = f['temp'][:,y//2,:]
uindex = f['uindex'][:,y//2,:]
elif direction=='z':
temp = f['temp'][z//2,:,:]
uindex = f['uindex'][z//2,:,:]
return temp,uindex
#%%
# Rejection algorithm based on random walk
# ----------------------------------------
# We created a tiny ensemble of 10 different SHEMAT-Suite models in the previous step and will use a rejection algorithm to get a posterior ensemble of models.
# For this, we "borrow" the Metropolis acceptance probability which is defined as:
#
# .. math::
# \alpha(x_{t-1},z) = \begin{cases} min\big(\frac{p(z)}{p(x_{t-1})},1\big), & \text{if } p(x_{t-1}) > 0\\
# 1, & \text{if } p(x_{t-1}) = 0 \end{cases}
#
# A different approach would be to assess the missfit (as RMS error) of each realisation.
# .. math::
# \alpha(x_{t-1},z) = \begin{cases} exp\big(-\frac{S(z) - S(x_{t-1}) }{u_T}\big), & \text{if } S(z) > S(x_{t-1})\\
# 1, & \text{otherwise } \end{cases}
#
# We will use the second approach for now. As discretization error, we take a value from Elison(2015), $u_{T-discr} = 0.7$ K, an estimate of error. This error should
# be estimated to best knowledge.
#
# Using Gauss error propagation, we assess a potential error for the realisations.
# .. math::
# u_T = \sqrt{\big(\frac{\partial T}{\partial x_1}u_1 \big)^2 + ... + \big(\frac{\partial T}{\partial x_n}u_n \big)^2}
#%%
# Literature sources for log-errors:
# ----------------------------------
# _The lower part of the disturbed log profile (below the cross-over point) was rotated to match these corrected tempera-tures. In the upper part of the profile, the same correction as for method A was applied. The quality of this correction method strongly depends on the correct calculation of the lowermost profile temperatures. According to Förster (2001), most of the corrected tem-peratures have errors of ± 3 to 5 K._ https://doi.org/10.1186/s40517-020-00181-w
#
#
# _The effective accuracy of commercial temperature logs is ±0.5ºC (Blackwell and Spafford, 1987)._ http://www.sprensky.com/publishd/temper2.html
#
# _More normal accuracies are +- 0.25 °C over 0-200 °C_ Keith Geothermal Energy lecture
#
# For errors as a function of e.g. logging speed, measurement response time etc, look https://doi.org/10.1016/j.petrol.2020.107727
# import DTM
dtm = np.load('../../models/20210319_MC_no_middle_filling/Graben_base_model/Graben_base_model_topography.npy')
# In[4]:
# load base model
model_path = '../models/2021-06-04_POC_base_model/'
geo_model = gp.load_model('POC_PCT_model',
path=model_path, recompile=False)
# In[5]:
# get delx and dely of the model, so cell sizes
delx = geo_model._grid.regular_grid.dx
dely = geo_model._grid.regular_grid.dy
delz = geo_model._grid.regular_grid.dz
# In[6]:
# import gravity data and borehole locations
g_data = pd.read_csv('../models/20210319_MC_no_middle_filling/2021-06-16_grav_of_POC_base_model.csv')
bhole = np.array([[31, 14],
[78, 22],
[53, 34],
[49, 44]])
# In[7]:
# plot the map
fig = plt.figure(figsize=[15,7])
cs = plt.contourf(dtm[:,:,0], dtm[:,:,1], dtm[:,:,2],20, cmap='gist_earth')
plt.contour(dtm[:,:,0], dtm[:,:,1], dtm[:,:,2],10, colors='gray', zorder=1)
plt.scatter(g_data['X'], g_data['Y'], marker='s', s=150, c='brown', edgecolor='k',
label='gravity stations', zorder=2)
plt.scatter(bhole[:,0]*delx, bhole[:,1]*dely, marker='^', s=200, c='k', label='boreholes',
zorder=3)
plt.colorbar(cs, label='elevation [m]')
plt.legend(frameon=True)
plt.xlabel('X [m]')
plt.ylabel('Y [m]');
#fig.savefig('../imgs/Model_topography_and_grav_stations.png', dpi=300, bbox_inches='tight')
# ## Load the Lithology Blocks
# First let's load the lithology block of all 1000 models, looking at the probabilities of the graben unit and at the model entropy.
# In[8]:
# load and calculate Probability and Entropy using GemPy bayesian field functions
full_ens = np.load('../../../data_var/lith_block_samples_all_1000real.npy')
prior_prob = probability(full_ens)
prior_entr = information_entropy(prior_prob)
# In[9]:
layer = 5
# upper filling
gp.plot_2d(geo_model,
show_lith=False, show_boundaries=False, show_data=False,
regular_grid=prior_prob[layer],
kwargs_regular_grid={'cmap': 'viridis',
'norm': None})
# lower filling
gp.plot_2d(geo_model,
show_lith=False, show_boundaries=False, show_data=False,
regular_grid=prior_prob[layer+1],
kwargs_regular_grid={'cmap': 'viridis',
'norm': None});
# In[16]:
p2dp = gp.plot_2d(geo_model,
show_lith=False, show_boundaries=False, show_data=False,
regular_grid=prior_entr,
kwargs_regular_grid={'cmap': 'magma',
'norm': None,
'colorbar': True}
)
# The Information entropy plot shows where the maximal Uncertainty is in our model, i.e. where the contacts are between the graben units and the basement. A lot of uncertainty is visible in the right part of the model (between around 16000 and 20000), where the main graben unit may or may not be present.
# # Gravity rejection
# In a first stage, we take a look at the gravity signal of each realization. The gravity signal is "recorded" at each of the squares you see in the plot above. Comparing the recorded gravity signals of each realization with the ones of the base model (which we regard as the "true" observations), we can differentiate between fitting and non-fitting ensemble members.
# In[18]:
g_simu = pd.read_csv('../models/20210319_MC_no_middle_filling/MC_grav_simulations_run01and02_1000_reals_rseed0_250+50mstd.csv',
sep=';')
# In[27]:
add_noise = True
if add_noise==True:
np.random.seed(27)
noise = np.random.normal(0, 1., size=15)
g_data_noise = g_data.copy()
g_data_noise['grav'] = g_data_noise['grav'] + noise
print(np.mean(noise))
u_g = np.mean(noise)
# In[20]:
#calculate stdeviation and mean of the prior ensemble
g_simu_stdev = g_simu.std(axis=1)
g_simu_mean = g_simu.mean(axis=1)
# In[21]:
fig = plt.figure(figsize=[15,7])
cs = plt.contourf(dtm[:,:,0], dtm[:,:,1], dtm[:,:,2],20, cmap='gist_earth')
plt.contour(dtm[:,:,0], dtm[:,:,1], dtm[:,:,2],10, colors='gray', zorder=1)
cs = plt.scatter(g_data['X'], g_data['Y'], c=g_simu_stdev, marker='s',
s=100, zorder=2, cmap='magma')
plt.xlabel('x (m)')
plt.ylabel('y (m)')
plt.colorbar(cs, label='standard deviation');
# In[22]:
g_simu_stdev
# we see that station 0 and 14 are not sensitive to changing the PCT depth, so they are not really helping in the rejection, but are influencing the RMSE. With focusing on the sensitive locations, we are likely to increase the performance of the rejection algorithm.
# In[23]:
# drop the first and last entry which do not show variation
simu_drop = g_simu.drop(labels=[0,14], axis=0)
simu_drop_std = simu_drop.std(axis=1)
#station_drop = g_coordinates.drop(labels=[0,14], axis=0)
g_data_drop = g_data.drop(labels=[0,14], axis=0)
g_data_noise_drop = g_data_noise.drop(labels=[0,14], axis=0)
# In[24]:
fig = plt.figure(figsize=[15,7])
cs = plt.contourf(dtm[:,:,0], dtm[:,:,1], dtm[:,:,2],20, cmap='gist_earth')
plt.contour(dtm[:,:,0], dtm[:,:,1], dtm[:,:,2],10, colors='gray', zorder=1)
cs = plt.scatter(g_data_drop['X'], g_data_drop['Y'], c=simu_drop_std, marker='s', s=100,
cmap='magma', zorder=2)
plt.xlabel('x (m)')
plt.ylabel('y (m)')
plt.colorbar(cs, label='standard deviation');
# In[28]:
seed = random.seed(4)
rmse = c_rmse(g_simu, g_data_drop['grav'])
accept, P = rejection(rmse=rmse, rnseed=seed)
# In[29]:
accepted_reals = full_ens[accept, :]
grav_prob = probability(accepted_reals)
grav_entr = information_entropy(grav_prob)
# In[30]:
p2dp = gp.plot_2d(geo_model,
show_lith=False, show_boundaries=False, show_data=False,
regular_grid=grav_entr,
kwargs_regular_grid={'cmap': 'magma',
'norm': None}
)
plt.savefig('../imgs/POC_grav_posterior_IE.png', dpi=300, bbox_inches='tight')
# In[32]:
np.save('../../../data_var/lith_blocks_samples_run01and02.npy', full_ens)
np.save('../../../data_var/lith_blocks_accepted_23042021.npy', accepted_reals)
np.save('../../../data_var/accepted_realizations_23042021.npy', accept)
# ## With noisy data
# What if we add noise to our data?
# In[200]:
random.seed(4)
rmse_noise = c_rmse(g_simu, g_data_noise_drop['grav'])
accepted_noise, P_noise = rejection(rmse=rmse_noise, rnseed=random.seed(40))
# In[171]:
accepted_reals_n = full_ens[accepted_noise, :]
grav_prob_n = probability(accepted_reals_n)
grav_entr_n = information_entropy(grav_prob_n)
# In[172]:
p2dp = gp.plot_2d(geo_model,
show_lith=False, show_boundaries=False, show_data=False,
regular_grid=grav_entr_n,
kwargs_regular_grid={'cmap': 'magma',
'norm': None}
)
# We see that here, more realizations get accepted, in this case around 16 % more.
# ## Temperature rejection
# The black triangles in the Map plot are the locations from 4 different boreholes in the model. Temperature data from these boreholes is now used in a similar fashion to further reduce the model to realizations, which now fit both the gravity and the temperature signal.
# In[31]:
f = h5py.File('../models/20210219_MC_ensemble/PCT_base_model_final.h5','r')
# In[32]:
z,y,x = f['uindex'].shape
# In[33]:
# define uT
T_error = 0.25 # temperature error tool accuracy
s_error = fahrenheit_to_celsius(1.25, difference=True) # sensor response time of 2 sec and 1 year after drilling
l_error = fahrenheit_to_celsius(1.25, difference=True) # logging speed of 20/ft after 1 year
d_error = 1.0 # estimated temperature error by discretization
#u_T = np.sqrt(T_error[0]**2 + T_error[1]**2 + T_error[2]**2 + T_error[3]**2 + d_error**2)
#u_T = np.sum(T_error**2)/4
u_T = np.sqrt(T_error**2 + s_error**2 + l_error**2 + d_error**2)
print(u_T)
# In[34]:
# load Simulation outputs. Those outputs get written by SHEMAT-Suite if runmode = 1
outp_path = '../models/20210319_MC_no_middle_filling/SHEMAT_MC/'
#accepted = accept
accepted = np.loadtxt('../models/20210319_MC_no_middle_filling/accepted_realizations_01042021').astype(int)
diffs = np.loadtxt(outp_path+'PCT_MC_1dat_cor_final.dat',skiprows=3,usecols=(8,),dtype=float)
for i in accepted[1:]:
n = np.loadtxt(outp_path+f'PCT_MC_{i}dat_cor_final.dat',skiprows=3,usecols=(8,),dtype=float)
diffs=np.vstack([diffs,n])
# In[35]:
# calculate RMSE of each realisation.
n = diffs.shape[1] # as we have 4 data points for temperature
diffs_sq = diffs**2
ssr = diffs_sq.sum(axis=1)
rmse = np.sqrt((diffs_sq.sum(axis=1)/n))
# In[36]:
# this is a matrix with all vectors. First 96 columns are differences of the wells, then the column is the SSR,
# final column is RMSE
tot_diffs = np.column_stack((diffs,ssr,rmse))
print(tot_diffs.shape)
# add index to the realizations
ind = np.array(range(tot_diffs.shape[0]))
tot_diffs = np.column_stack((tot_diffs,accepted))
# ## Rejection sampling
# we now start with a random sample and go randomly through the pool, accepting and rejecting realizations.
# The algorithm starts with one refrence sample `Ref`. Then, iteratively, samples (= realizations) get accepted, rejected based on their RMSE values. That is why we use the 6th column of `tot_diffs`. Alternatively, one could also just use the `rmse` array.
# In[37]:
# Chronological implemntation - start von 1 bis N
# Can be used here, if samples generated are already in a random order and not correlated.
# That is usually the case with GemPy exports to SHEMAT-Suite.
random.seed(42)
col = 129
Ref = tot_diffs[0,col]
accept = []
P = []
k=0
for i in range(1,tot_diffs.shape[0]):
if tot_diffs[i,col] < Ref:
Ref = tot_diffs[i,col]
accept.append(i)
elif random.random() < np.exp(-(tot_diffs[i,col] - Ref)/(u_T)):
P.append(np.exp(-(tot_diffs[i,col] - Ref)/(u_T)))
Ref = tot_diffs[i,col]
accept.append(i)
k += 1
print(len(accept))
# In[38]:
accepted_reals_T = accepted_reals[accept, :]
grav_T_prob = probability(accepted_reals_T)
grav_T_entr = information_entropy(grav_T_prob)
# In[39]:
p2dp = gp.plot_2d(geo_model,
show_lith=False, show_boundaries=False, show_data=False,
regular_grid=grav_T_entr,
kwargs_regular_grid={'cmap': 'magma',
'norm': None}
)
plt.savefig('../imgs/POC_grav_temp_posterior_IE.png', dpi=300, bbox_inches='tight')
# In[48]:
np.savetxt('../models/20210319_MC_no_middle_filling/accepted_after_temp_rejection', accept)
# ## Rejection sampling
# we now start with a random sample and go randomly through the pool, accepting and rejecting realizations.
# The algorithm starts with one refrence sample `Ref`. Then, iteratively, samples (= realizations) get accepted, rejected based on their RMSE values. That is why we use the 6th column of `tot_diffs`. Alternatively, one could also just use the `rmse` array.
# In[40]:
# have a look at sensitive data points
st = np.std(diffs, axis=0)
st.shape
# In[41]:
plt.hist(st)
# We see, that there are many points not sensitive
# In[53]:
indices = np.where(st < 0.5)
# In[54]:
diffs_red = np.delete(diffs, obj=indices, axis=1)
# Now let's see how the removal of relatively robust datapoints helps:
# In[55]:
# calculate RMSE of each realisation.
n = diffs_red.shape[1] # as we have 4 data points for temperature
diffs_sq = diffs_red**2
ssr = diffs_sq.sum(axis=1)
rmse = np.sqrt((diffs_sq.sum(axis=1)/n))
# In[56]:
# this is a matrix with all vectors. First 96 columns are differences of the wells, then the column is the SSR,
# final column is RMSE
tot_diffs = np.column_stack((diffs_red,ssr,rmse))
print(tot_diffs.shape)
# add index to the realizations
ind = np.array(range(tot_diffs.shape[0]))
tot_diffs = np.column_stack((tot_diffs,accepted))
# In[57]:
# Chronological implemntation - start von 1 bis N
# Can be used here, if samples generated are already in a random order and not correlated.
# That is usually the case with GemPy exports to SHEMAT-Suite.
random.seed(42)
col = 54
Ref = tot_diffs[0,col]
accept = []
P = []
k=0
for i in range(1,tot_diffs.shape[0]):
if tot_diffs[i,col] < Ref:
Ref = tot_diffs[i,col]
accept.append(i)
elif random.random() < np.exp(-(tot_diffs[i,col] - Ref)/(u_T)):
P.append(np.exp(-(tot_diffs[i,col] - Ref)/(u_T)))
Ref = tot_diffs[i,col]
accept.append(i)
k += 1
print(len(accept))
#print(accept)
# In[58]:
accepted_reals_Ts = accepted_reals[accept, :]
grav_Ts_prob = probability(accepted_reals_Ts)
grav_Ts_entr = information_entropy(grav_Ts_prob)
# In[59]:
p2dp = gp.plot_2d(geo_model,
show_lith=False, show_boundaries=False, show_data=False,
regular_grid=grav_Ts_entr,
kwargs_regular_grid={'cmap': 'magma',
'norm': None}
)
plt.savefig('../imgs/POC_grav_temp_red_posterior_IE.png', dpi=300, bbox_inches='tight')
# In[56]:
np.savetxt('../models/20210319_MC_no_middle_filling/accepted_after_temp_rejection_reduced_datapoints', accept)
# And we see, temperature data is not sensitive to changes in the PCT-depth.
#
# But what if we also treat the thermal conductivity as an uncertain parameter?
# *Then the rejection is way more rigorous.*
# In[60]:
fids = glob.glob('H:PCT_SHEMAT/20210219_MC_outputs/*.h5')
# In[70]:
outpath = 'H:PCT_SHEMAT/20210219_MC_outputs\\'
poTemp = []
poUi = []
dicfil = {}
for fn in fids:
for i in accept:
if fn == outpath+f"PCT_MC_{i}var_TCt_final.h5":
dT,dui = extTui(fn, dimension=2, direction='y')
poTemp.append(dT)
poUi.append(dui)
dicfil[fn.split('/')[-1]] = dui
# In[71]:
poTempa = np.asarray(poTemp)
poUia = np.asarray(poUi)
accepta = np.asarray(accept)
print(poUia.shape,poTempa.shape,accepta.shape)
np.savetxt('accepted_realisations',accepta,fmt='%i',delimiter=' ',newline='\n')
#np.savetxt('posterior_Temps',poTempa,fmt='%.5f',delimiter=' ',newline='\n',header=" posterior 61 realizations for Temperature")
#np.savetxt('posterior_Uindex',poUia,fmt='%i',delimiter=' ',newline='\n')
# In[72]:
# calculate mean temperature field and mean posterior uindex
mTemp = np.mean(poTempa,axis=0)
mUi = np.mean(poUia,axis=0)
# import y and z for visualising
plfn = h5py.File('../models/20210219_MC_ensemble/PCT_base_model_final.h5','r')
x = plfn['x'][0,0,:]
y = plfn['y'][0,:,0]
z = plfn['z'][:,0,0]
refT = plfn['temp'][:,25,:]
# In[73]:
poUi[0].shape
# In[79]:
fig = plt.figure(figsize=(20,8))
cs = plt.contourf(x,z-6500.,mUi,cmap='viridis')
plt.contour(x,z-6500.,mUi,5, colors='gray', zorder=1)
plt.tick_params(axis='both',labelsize=14)
plt.xlabel('x [m]',fontsize=16)
plt.ylabel('depth[m]',fontsize=16)
plt.savefig('../imgs/POC_mean_uindex.png', dpi=300, bbox_inches='tight')
# In[80]:
fig = plt.figure(figsize=(20,8))
cs = plt.pcolor(x,z-6500.,mTemp,cmap='viridis', shading='auto')
plt.tick_params(axis='both',labelsize=14)
plt.xlabel('x [m]',fontsize=16)
plt.ylabel('depth[m]',fontsize=16)
cbar = plt.colorbar(cs,orientation='vertical')
cbar.set_label('Temperature $^\circ$C]',fontsize=16)
cbar.ax.tick_params(labelsize=14)
# In[86]:
#plot ssr of mean posterior and reference model
fig = plt.figure(figsize=(20,8))
cs = plt.pcolor(x,z-6500.,np.abs((refT-mTemp)),cmap='RdBu', shading='auto')
plt.tick_params(axis='both',labelsize=14)
plt.xlabel('x [m]',fontsize=16)
plt.ylabel('depth[m]',fontsize=16)
cbar = plt.colorbar(cs,orientation='vertical')
cbar.set_label('Temperature $^\circ$C]',fontsize=16)
cbar.ax.tick_params(labelsize=14)
plt.savefig('../imgs/POC_absolut_differences_reference_ensemble_mean.png', dpi=300, bbox_inches='tight')
# In[ ]:
|
py | 1a529b078c9f58a086182a3ce665a3252b1c9093 | """Low-level api to work with relationships"""
import functools
import itertools
class BaseFilter:
"Base filter that accepts one argument"
def __init__(self, **query):
assert len(query) == 1
for key, value in query.items():
self.key = key
self.value = value
@staticmethod
def parse_key(key):
"Parses the key to remove the __ if there is one"
return key.split("__")[0]
def match(self, value):
"Checks wether value matches this filter"
parsed_key = self.parse_key(self.key)
if parsed_key != "_self":
value = getattr(value, parsed_key, None)
if value is None:
return False
if self.key.endswith("__gt"):
return value > self.value
elif self.key.endswith("__gte"):
return value >= self.value
elif self.key.endswith("__lt"):
return value < self.value
elif self.key.endswith("__lte"):
return value <= self.value
elif self.key.endswith("__ne"):
return value != self.value
else:
return value == self.value
class AndFilter(BaseFilter):
"Composite filter that combines two filters"
def __init__(self, filters, **query):
self.filters = filters
for key, value in query.items():
self.filters.append(BaseFilter(**{key:value}))
def match(self, value):
is_match = True
for _filter in self.filters:
is_match &= _filter.match(value)
return is_match
class OrFilter(AndFilter):
"Composite filter that combines two filters via an or"
def match(self, value):
is_match = False
for _filter in self.filters:
is_match |= _filter.match(value)
return is_match
class Vertex:
"Represents a dependency link"
def __init__(self, vertex_type:str, from_node:str, to_node:str, **attributes):
# Ensures that we won't override our parameters...
assert "from_node" not in attributes
assert "to_node" not in attributes
assert "vertex_type" not in attributes
self.vertex_type = vertex_type
self.from_node = from_node
self.to_node = to_node
for key, value in attributes.items():
setattr(self, key, value)
def __eq__(self, other):
if isinstance(other, Vertex):
return (self.vertex_type == other.vertex_type
and self.from_node == other.from_node
and self.to_node == other.to_node)
def __hash__(self):
return hash((self.vertex_type, self.from_node, self.to_node))
def __str__(self):
return "(%s) --> (%s)" % (self.from_node, self.to_node)
class CircularDependencyError(BaseException):
pass
class DependencyGraph:
def __init__(self, nodes, plugins):
self.nodes = set(nodes)
self.plugins = plugins
def generate_vertices():
nodes = list(self.nodes)
while nodes:
yield from self.build_vertices(nodes.pop())
self.vertices = set(generate_vertices())
if not self.is_acyclic():
raise CircularDependencyError()
def __str__(self):
return "\n".join([str(vertex) for vertex in self.vertices])
def is_acyclic(self):
"Checks for circular dependencies"
nodes = []
# We build an index
connected_to = {node: set() for node in self.nodes}
from_nodes = {node: set() for node in self.nodes}
for vertice in self.vertices:
connected_to[vertice.to_node].add(vertice)
from_nodes[vertice.from_node].add(vertice)
for node in self.nodes:
# Only nodes that don't have someone dependent on
if len(connected_to[node]) == 0:
nodes.append(node)
vertices = list(self.vertices)
deleted_vertices = set()
while nodes:
node = nodes.pop()
connected = from_nodes[node] - deleted_vertices
for vertice in connected:
deleted_vertices.add(vertice)
if not connected_to[node] - deleted_vertices:
nodes.append(vertice.to_node)
return len(vertices) == len(deleted_vertices)
def build_vertices(self, node):
plugins = filter(lambda p: p.can_create_vertex(node), self.plugins)
for plugin in plugins:
for vertex in plugin.vertices(node):
if vertex.to_node not in self.nodes:
self.nodes.add(vertex.to_node)
# I know, we are limited by recursions.
# Fix it when it is a problem
yield from self.build_vertices(vertex.to_node)
yield vertex
def dependencies(self, node, follow=False):
"Returns dependencies of a node, either all or direct"
vertices = filter(lambda v: v.from_node == node, self.vertices)
for vertex in vertices:
yield vertex.to_node
if follow:
yield from self.dependencies(vertex.to_node, follow)
class Plugin:
"Represents a plugin"
file_extensions = "*"
def __init__(self, **kwargs):
for key, value in kwargs:
setattr(self, key, value)
def vertices(self, node):
"Yields vertices for a node"
raise NotImplementedError()
def can_create_vertex(self, node):
"Checks if this plugin can create links for this type of node"
if self.file_extensions == "*":
return True
if isinstance(self.file_extensions, str):
return node.name.endswith(self.file_extensions)
else:
# If the file extension of the node name is in the plugins file ext
ends_with = False
for file_ext in self.file_extensions:
ends_with = ends_with or node.name.endswith(file_ext)
return ends_with
class StaticDependencies(Plugin):
"Plugin to illustrate manual dependencies"
# Format of a dependency:
# ("A", ("B", "C", "D"))
def __init__(self, dependencies, **kwargs):
self.dependencies = dependencies
def vertices(self, node):
for deps in self.dependencies:
if deps[0] == node:
for sub_node in deps[1]:
yield Vertex("static", node, sub_node)
|
py | 1a529d0a138554d94292047dfd3e37a86c8f912c | """A POP3 client class.
Based on the J. Myers POP3 draft, Jan. 96
"""
# Author: David Ascher <[email protected]>
# [heavily stealing from nntplib.py]
# Updated: Piers Lauder <[email protected]> [Jul '97]
# String method conversion and test jig improvements by ESR, February 2001.
# Added the POP3_SSL class. Methods loosely based on IMAP_SSL. Hector Urtubia <[email protected]> Aug 2003
# Example (see the test function at the end of this file)
# Imports
import re, socket
__all__ = ["POP3","error_proto"]
# Exception raised when an error or invalid response is received:
class error_proto(Exception): pass
# Standard Port
POP3_PORT = 110
# POP SSL PORT
POP3_SSL_PORT = 995
# Line terminators (we always output CRLF, but accept any of CRLF, LFCR, LF)
CR = b'\r'
LF = b'\n'
CRLF = CR+LF
class POP3:
"""This class supports both the minimal and optional command sets.
Arguments can be strings or integers (where appropriate)
(e.g.: retr(1) and retr('1') both work equally well.
Minimal Command Set:
USER name user(name)
PASS string pass_(string)
STAT stat()
LIST [msg] list(msg = None)
RETR msg retr(msg)
DELE msg dele(msg)
NOOP noop()
RSET rset()
QUIT quit()
Optional Commands (some servers support these):
RPOP name rpop(name)
APOP name digest apop(name, digest)
TOP msg n top(msg, n)
UIDL [msg] uidl(msg = None)
Raises one exception: 'error_proto'.
Instantiate with:
POP3(hostname, port=110)
NB: the POP protocol locks the mailbox from user
authorization until QUIT, so be sure to get in, suck
the messages, and quit, each time you access the
mailbox.
POP is a line-based protocol, which means large mail
messages consume lots of python cycles reading them
line-by-line.
If it's available on your mail server, use IMAP4
instead, it doesn't suffer from the two problems
above.
"""
encoding = 'UTF-8'
def __init__(self, host, port=POP3_PORT,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.host = host
self.port = port
self.sock = self._create_socket(timeout)
self.file = self.sock.makefile('rb')
self._debugging = 0
self.welcome = self._getresp()
def _create_socket(self, timeout):
return socket.create_connection((self.host, self.port), timeout)
def _putline(self, line):
if self._debugging > 1: print('*put*', repr(line))
self.sock.sendall(line + CRLF)
# Internal: send one command to the server (through _putline())
def _putcmd(self, line):
if self._debugging: print('*cmd*', repr(line))
line = bytes(line, self.encoding)
self._putline(line)
# Internal: return one line from the server, stripping CRLF.
# This is where all the CPU time of this module is consumed.
# Raise error_proto('-ERR EOF') if the connection is closed.
def _getline(self):
line = self.file.readline()
if self._debugging > 1: print('*get*', repr(line))
if not line: raise error_proto('-ERR EOF')
octets = len(line)
# server can send any combination of CR & LF
# however, 'readline()' returns lines ending in LF
# so only possibilities are ...LF, ...CRLF, CR...LF
if line[-2:] == CRLF:
return line[:-2], octets
if line[0] == CR:
return line[1:-1], octets
return line[:-1], octets
# Internal: get a response from the server.
# Raise 'error_proto' if the response doesn't start with '+'.
def _getresp(self):
resp, o = self._getline()
if self._debugging > 1: print('*resp*', repr(resp))
if not resp.startswith(b'+'):
raise error_proto(resp)
return resp
# Internal: get a response plus following text from the server.
def _getlongresp(self):
resp = self._getresp()
list = []; octets = 0
line, o = self._getline()
while line != b'.':
if line.startswith(b'..'):
o = o-1
line = line[1:]
octets = octets + o
list.append(line)
line, o = self._getline()
return resp, list, octets
# Internal: send a command and get the response
def _shortcmd(self, line):
self._putcmd(line)
return self._getresp()
# Internal: send a command and get the response plus following text
def _longcmd(self, line):
self._putcmd(line)
return self._getlongresp()
# These can be useful:
def getwelcome(self):
return self.welcome
def set_debuglevel(self, level):
self._debugging = level
# Here are all the POP commands:
def user(self, user):
"""Send user name, return response
(should indicate password required).
"""
return self._shortcmd('USER %s' % user)
def pass_(self, pswd):
"""Send password, return response
(response includes message count, mailbox size).
NB: mailbox is locked by server from here to 'quit()'
"""
return self._shortcmd('PASS %s' % pswd)
def stat(self):
"""Get mailbox status.
Result is tuple of 2 ints (message count, mailbox size)
"""
retval = self._shortcmd('STAT')
rets = retval.split()
if self._debugging: print('*stat*', repr(rets))
numMessages = int(rets[1])
sizeMessages = int(rets[2])
return (numMessages, sizeMessages)
def list(self, which=None):
"""Request listing, return result.
Result without a message number argument is in form
['response', ['mesg_num octets', ...], octets].
Result when a message number argument is given is a
single response: the "scan listing" for that message.
"""
if which is not None:
return self._shortcmd('LIST %s' % which)
return self._longcmd('LIST')
def retr(self, which):
"""Retrieve whole message number 'which'.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('RETR %s' % which)
def dele(self, which):
"""Delete message number 'which'.
Result is 'response'.
"""
return self._shortcmd('DELE %s' % which)
def noop(self):
"""Does nothing.
One supposes the response indicates the server is alive.
"""
return self._shortcmd('NOOP')
def rset(self):
"""Unmark all messages marked for deletion."""
return self._shortcmd('RSET')
def quit(self):
"""Signoff: commit changes on server, unlock mailbox, close connection."""
try:
resp = self._shortcmd('QUIT')
except error_proto as val:
resp = val
self.file.close()
self.sock.close()
del self.file, self.sock
return resp
#__del__ = quit
# optional commands:
def rpop(self, user):
"""Not sure what this does."""
return self._shortcmd('RPOP %s' % user)
timestamp = re.compile(br'\+OK.*(<[^>]+>)')
def apop(self, user, password):
"""Authorisation
- only possible if server has supplied a timestamp in initial greeting.
Args:
user - mailbox user;
password - mailbox password.
NB: mailbox is locked by server from here to 'quit()'
"""
secret = bytes(password, self.encoding)
m = self.timestamp.match(self.welcome)
if not m:
raise error_proto('-ERR APOP not supported by server')
import hashlib
digest = m.group(1)+secret
digest = hashlib.md5(digest).hexdigest()
return self._shortcmd('APOP %s %s' % (user, digest))
def top(self, which, howmuch):
"""Retrieve message header of message number 'which'
and first 'howmuch' lines of message body.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('TOP %s %s' % (which, howmuch))
def uidl(self, which=None):
"""Return message digest (unique id) list.
If 'which', result contains unique id for that message
in the form 'response mesgnum uid', otherwise result is
the list ['response', ['mesgnum uid', ...], octets]
"""
if which is not None:
return self._shortcmd('UIDL %s' % which)
return self._longcmd('UIDL')
try:
import ssl
except ImportError:
pass
else:
class POP3_SSL(POP3):
"""POP3 client class over SSL connection
Instantiate with: POP3_SSL(hostname, port=995, keyfile=None, certfile=None)
hostname - the hostname of the pop3 over ssl server
port - port number
keyfile - PEM formatted file that countains your private key
certfile - PEM formatted certificate chain file
See the methods of the parent class POP3 for more documentation.
"""
def __init__(self, host, port=POP3_SSL_PORT,
keyfile=None, certfile=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.keyfile = keyfile
self.certfile = certfile
POP3.__init__(self, host, port, timeout)
def _create_socket(self, timeout):
sock = POP3._create_socket(self, timeout)
return ssl.wrap_socket(sock, self.keyfile, self.certfile)
__all__.append("POP3_SSL")
if __name__ == "__main__":
import sys
a = POP3(sys.argv[1])
print(a.getwelcome())
a.user(sys.argv[2])
a.pass_(sys.argv[3])
a.list()
(numMsgs, totalSize) = a.stat()
for i in range(1, numMsgs + 1):
(header, msg, octets) = a.retr(i)
print("Message %d:" % i)
for line in msg:
print(' ' + line)
print('-----------------------')
a.quit()
|
py | 1a529d1e80df70e048e2117f6ed122412593a937 | # Aula 18 - 03-11-2019
# Exercicios para lista simples
# Dada a seguinte lista, resolva os seguintes questões:
lista = [10, 20, 'amor', 'abacaxi', 80, 'Abioluz', 'Cachorro grande é de arrasar']
# 0 1 2 3 4 5 6
print('1: Usando a indexação, escreva na tela a palavra abacaxi')
print(lista[3])
print('2: Usando a indexação, escreva na tela os seguintes dados: 20, amor, abacaxi')
print(lista[1:4])
print('3: Usando a indexação, escreva na tela uma lista com dados de 20 até Abioluz')
print(lista[1:6])
print('4: Usando a indexação, escreva na tela uma lista com os seguintes dados:'
'\nCachorro grande é de arrasar, Abioluz, 80, abacaxi, amor, 20, 10')
print(f'{lista[::-1]}\n')
print('5: Usando o f-string e a indexação escreva na tela os seguintes dados:'
'\n { abacaxi } é muito bom, sinto muito { amor } quando eu chupo { 80 }" deles.')
print(f'{lista[3]} é muito bom, sinto muto {lista[2]} quando eu chupo {lista[4]} deles.\n')
print('6: Usando a indexação, escreva na tela os seguintes dados:'
'\n10, amor, 80, Cachorro grande é de arrasar')
print(f'{lista[0:7:2]}\n')
print('7: Usando o f-string e a indexação escreva na tela os seguintes dados:'
'Abioluz - abacaxi - 10 - Cachorro grande é de arrasar - 20 - 80' )
print(f'{lista[5]},{lista[3]},{lista[0]},{lista[6]},{lista[1]},{lista[4]}\n')
print('8: Usando o f-string e a indexação escreva na tela os seguintes dados:'
'\namor - 10 - 10 - abacaxi - Cachorro grande é de arrasar - Abioluz - 10 - 20')
print(f'{lista[2]},{lista[0]},{lista[0]},{lista[3]},{lista[6]},{lista[5]},{lista[0]},{lista[1]}\n')
print('9: Usando a indexação, escreva na tela uma lista com dados de 10 até 80')
print(f'{lista[0:5]}\n')
print('10: Usando a indexação, escreva na tela os seguintes dados:'
'\n10, abacaxi, Cachorro grande é de arrasar')
print(f'{lista[0:7:3]}\n') |
py | 1a529d7a672ddb21711ca440867b7613ca339eef | import pygame
import ctypes
import os
import queue
import sys
import random
import Main
import Functions
import Screens
pygame.init()
# getting the size of user's screen
user32 = ctypes.windll.user32
screensize = user32.GetSystemMetrics(78), user32.GetSystemMetrics(79)
scaled_screen_height = int(screensize[1] * .9 * .75)
# setting the size of snake and fields.
scale_factor = 2 # can be changed to full number. the bigger num - the smaller snake
body_size = int(scaled_screen_height / 12 / scale_factor)
block_size = body_size * scale_factor #: base variable for setting all the sizes - important one!
# creating the game window
game_width, game_height = 12 * block_size, 16 * block_size #: size of the game - adapted to background image
icon = pygame.image.load('media/heads/head_down.jpg')
pygame.display.set_caption('Snake')
pygame.display.set_icon(icon)
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (int((screensize[0] - 12 * block_size) / 2), 32)
# all images in the game
screen = pygame.display.set_mode((game_width, game_height))
surface = pygame.image.load("media/screens/background_800.png").convert()
surface = pygame.transform.scale(surface, (block_size*12, block_size*16))
head = Functions.load_img('media/heads/head_down.png', 1.5)
head_eat = Functions.load_img('media/heads/head_eat_down.png', 1.5)
head_dead = Functions.load_img('media/heads/head_down_dead.png', 1.5)
food = Functions.load_img('media/food/mouse.png', 1.2)
lost = pygame.image.load("media/screens/lost.png").convert()
lost = pygame.transform.scale(lost, (block_size*6, block_size*6))
welcome = pygame.image.load("media/screens/welcome.png").convert()
welcome = pygame.transform.scale(welcome, (block_size*6, block_size*6))
# movement constants
UP = (0, -1)
DOWN = (0, 1)
LEFT = (-1, 0)
RIGHT = (1, 0)
PAUSE = (0, 0)
# colors
dark_gray = (30, 30, 30)
black = (0, 0, 0)
orange = (219, 106, 15)
# sad variables
is_eaten = False
is_dead = False
# eaten mouses
points = 0
high_score = list()
score_multi = 1
# fonts
large_text = pygame.font.SysFont('Calibri', int(block_size*2), True, False)
normal_text = pygame.font.SysFont('Calibri', int(block_size*0.8), True, False)
small_text = pygame.font.SysFont('Calibri', int(block_size*0.4), True, False)
# time
clock = pygame.time.Clock()
fps = 10 # frames per second
screen.blit(surface, [0, 0])
# queue with movements
q = queue.Queue()
x_tmp = 1
y_tmp = 0
starting = True
resume_dir = RIGHT
|
py | 1a529e87a7824d45164124fb4e90207065b78f32 | '''
The init module gathers routines for initialization
'''
from bnpy.init import FromSaved
from bnpy.init import FromTruth
from bnpy.init import FromLP
from bnpy.init import FromScratchRelational
from bnpy.init import FromScratchGauss
from bnpy.init import FromScratchMult
from bnpy.init import FromScratchBern
from bnpy.init import FromScratchBregman
from bnpy.init import FromScratchBregmanMixture
# from FromScratchMult import initSSByBregDiv_Mult
# from FromScratchBern import initSSByBregDiv_Bern
# from FromScratchGauss import initSSByBregDiv_Gauss
# from FromScratchGauss import initSSByBregDiv_ZeroMeanGauss
def initSSByBregDiv(curModel=None, **kwargs):
obsName = curModel.getObsModelName()
if obsName.count('Mult'):
return initSSByBregDiv_Mult(curModel=curModel, **kwargs)
elif obsName.count('ZeroMeanGauss'):
return initSSByBregDiv_ZeroMeanGauss(curModel=curModel, **kwargs)
elif obsName.count('Gauss'):
return initSSByBregDiv_Gauss(curModel=curModel, **kwargs)
else:
raise NotImplementedError("Unknown obsmodel name: " + obsName)
|
py | 1a529e8b766e04fb9970de124521058812e6622e | """
This project demonstrates NESTED LOOPS (i.e., loops within loops)
in the context of PRINTING on the CONSOLE.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Nihaar Munnamgi.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
def main():
""" Calls the other functions to test them. """
run_test_rectangle_of_stars()
run_test_triangle_of_stars()
run_test_decreasing_exclamation_marks()
run_test_alternating_brackets()
run_test_triangle_same_number_in_each_row()
run_test_triangle_all_numbers_in_each_row()
def run_test_rectangle_of_stars():
""" Tests the rectangle_of_stars function. """
print()
print('--------------------------------------------')
print('Testing the RECTANGLE_OF_STARS function:')
print('--------------------------------------------')
print('Test 1 of rectangle_of_stars: (3, 5)')
rectangle_of_stars(3, 5)
print('Test 2 of rectangle_of_stars: (4, 11)')
rectangle_of_stars(4, 11)
print('Test 3 of rectangle_of_stars: (6, 2)')
rectangle_of_stars(6, 2)
def rectangle_of_stars(r, c):
"""
Prints a rectangle of stars (asterisks), with r rows and c columns.
For example, when r = 3 and c = 5:
*****
*****
*****
Preconditions: r and c are non-negative integers.
"""
# -------------------------------------------------------------------------
# DONE: 2. Implement and test this function.
# Some tests are already written for you (above).
#
# *** Unless your instructor directs you otherwise,
# see the video
# nested_loops_in_PRINTING.mp4
# in Preparation for Session 18
# ** NOW **
# and follow along in that video as you do this problem.
# (Pause the video when it completes this problem.)
# ***
#
# IMPLEMENTATION RESTRICTION:
# ** You may NOT use string multiplication **
# in this or the other problems in this module, as doing so
# would defeat the goal of providing practice at loops within loops.
# -------------------------------------------------------------------------
for j in range(r):
for k in range(c):
print('*', end='')
print()
def run_test_triangle_of_stars():
""" Tests the triangle_of_stars function. """
print()
print('-------------------------------------------')
print('Testing the TRIANGLE_OF_STARS function:')
print('-------------------------------------------')
print('Test 1 of triangle_of_stars: (5)')
triangle_of_stars(5)
print('Test 2 of triangle_of_stars: (1)')
triangle_of_stars(1)
print('Test 3 of triangle_of_stars: (3)')
triangle_of_stars(3)
print('Test 4 of triangle_of_stars: (6)')
triangle_of_stars(6)
def triangle_of_stars(r):
"""
Prints a triangle of stars (asterisks), with r rows.
-- The first row is 1 star,
the second is 2 stars,
the third is 3 stars, and so forth.
For example, when r = 5:
*
**
***
****
*****
Precondition: r is a non-negative integer.
"""
# -------------------------------------------------------------------------
# TODO: 3. Implement and test this function.
# Some tests are already written for you (above).
#
# *** Unless your instructor directs you otherwise,
# see the video
# nested_loops_in_PRINTING.mp4
# in Preparation for Session 18
# ** NOW **
# and follow along in that video as you do this problem.
# (Continue the video from where you paused it
# in the previous problem.)
# ***
#
# IMPLEMENTATION RESTRICTION:
# ** You may NOT use string multiplication **
# in this or the other problems in this module, as doing so
# would defeat the goal of providing practice at loops within loops.
# -------------------------------------------------------------------------
for j in range(r):
for k in range(j+1):
print('*', end='')
print()
def run_test_decreasing_exclamation_marks():
""" Tests the decreasing_exclamation_marks function. """
print()
print('----------------------------------------------------------')
print('Testing the DECREASING_EXCLAMATION_MARKS function:')
print('----------------------------------------------------------')
print('Test 1 of decreasing_exclamation_marks: (5, 2)')
decreasing_exclamation_marks(5, 2)
print('Test 2 of decreasing_exclamation_marks: (3, 1)')
decreasing_exclamation_marks(3, 1)
print('Test 3 of decreasing_exclamation_marks: (4, 4)')
decreasing_exclamation_marks(4, 4)
print('Test 4 of decreasing_exclamation_marks: (8, 6)')
decreasing_exclamation_marks(8, 6)
def decreasing_exclamation_marks(m, n):
"""
Prints exclamation marks: m on the first row,
m-1 on the next row, m-2 on the next, etc, until n on the last row.
For example, when m = 5 and n = 2:
!!!!!
!!!!
!!!
!!
Precondition: m and n are positive integers with m >= n.
"""
# -------------------------------------------------------------------------
# TODO: 4. Implement and test this function.
# Some tests are already written for you (above).
#
# IMPLEMENTATION RESTRICTION:
# ** You may NOT use string multiplication **
# in this or the other problems in this module, as doing so
# would defeat the goal of providing practice at loops within loops.
# -------------------------------------------------------------------------
for j in range(m-n+1):
for k in range(m-j):
print('!',end='')
print()
def run_test_alternating_brackets():
""" Tests the alternating_brackets function. """
print()
print('----------------------------------------------------------')
print('Testing the ALTERNATING_BRACKETS function:')
print('----------------------------------------------------------')
print('Test 1 of alternating_brackets: (5, 2)')
alternating_brackets(5, 2)
print('Test 2 of alternating_brackets: (3, 1)')
alternating_brackets(3, 1)
print('Test 3 of alternating_brackets: (4, 4)')
alternating_brackets(4, 4)
print('Test 4 of alternating_brackets: (8, 6)')
alternating_brackets(8, 6)
def alternating_brackets(m, n):
"""
Prints alternating left/right square brackets: m on the first row,
m-1 on the next row, m-2 on the next, etc, until n on the last row.
For example, when m = 5 and n = 2:
[][][
[][]
[][
[]
Precondition: m and n are positive integers with m >= n.
"""
# -------------------------------------------------------------------------
# TODO: 5. Implement and test this function.
# Some tests are already written for you (above).
#
# IMPLEMENTATION RESTRICTION:
# ** You may NOT use string multiplication **
# in this or the other problems in this module, as doing so
# would defeat the goal of providing practice at loops within loops.
# -------------------------------------------------------------------------
for j in range(m-n+1):
for k in range(m-j):
if k%2==0:
print('[',end='')
else:
print(']',end='')
print()
def run_test_triangle_same_number_in_each_row():
""" Tests the triangle_same_number_in_each_row function. """
print()
print('----------------------------------------------------------')
print('Testing the TRIANGLE_SAME_NUMBER_IN_EACH_ROW function:')
print('----------------------------------------------------------')
print('Test 1 of triangle_same_number_in_each_row: (5)')
triangle_same_number_in_each_row(5)
print('Test 2 of triangle_same_number_in_each_row: (1)')
triangle_same_number_in_each_row(1)
print('Test 3 of triangle_same_number_in_each_row: (3)')
triangle_same_number_in_each_row(3)
print('Test 4 of triangle_same_number_in_each_row: (6)')
triangle_same_number_in_each_row(6)
def triangle_same_number_in_each_row(r):
"""
Prints a triangle of numbers, with r rows.
The first row is 1, the 2nd is 22, the 3rd is 333, etc.
For example, when r = 5:
1
22
333
4444
55555
Precondition: r is a non-negative integer.
"""
# -------------------------------------------------------------------------
# TODO: 6. Implement and test this function.
# Some tests are already written for you (above).
#
# IMPLEMENTATION RESTRICTION:
# ** You may NOT use string multiplication **
# in this or the other problems in this module, as doing so
# would defeat the goal of providing practice at loops within loops.
# -------------------------------------------------------------------------
for k in range(r):
for j in range(k+1):
print(k+1,end='')
print()
def run_test_triangle_all_numbers_in_each_row():
""" Tests the triangle_all_numbers_in_each_row function. """
print()
print('----------------------------------------------------------')
print('Testing the TRIANGLE_ALL_NUMBERS_IN_EACH_ROW function:')
print('----------------------------------------------------------')
print('Test 1 of triangle_all_numbers_in_each_row: (5)')
triangle_all_numbers_in_each_row(5)
print('Test 2 of triangle_all_numbers_in_each_row: (1)')
triangle_all_numbers_in_each_row(1)
print('Test 3 of triangle_all_numbers_in_each_row: (3)')
triangle_all_numbers_in_each_row(3)
print('Test 4 of triangle_all_numbers_in_each_row: (6)')
triangle_all_numbers_in_each_row(6)
def triangle_all_numbers_in_each_row(r):
"""
Prints a triangle of numbers, with r rows.
The first row is 1, the 2nd is 12, the 3rd is 123, etc.
For example, when r = 5:
1
12
123
1234
12345
Precondition: r is a non-negative integer.
"""
# -------------------------------------------------------------------------
# TODO: 7. Implement and test this function.
# Some tests are already written for you (above).
#
# IMPLEMENTATION RESTRICTION:
# ** You may NOT use string multiplication **
# in this or the other problems in this module, as doing so
# would defeat the goal of providing practice at loops within loops.
# -------------------------------------------------------------------------
for j in range(r):
for k in range(j+1):
print(k+1,end='')
print()
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
|
py | 1a529e8e4cf0d731a25e1be78cca73746a4292b5 | # Dual Annealing implementation.
# Copyright (c) 2018 Sylvain Gubian <[email protected]>,
# Yang Xiang <[email protected]>
# Author: Sylvain Gubian, Yang Xiang, PMP S.A.
"""
A Dual Annealing global optimization algorithm
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.optimize import OptimizeResult
from scipy.optimize import minimize
from scipy.special import gammaln
from scipy._lib._util import check_random_state
__all__ = ['dual_annealing']
class VisitingDistribution(object):
"""
Class used to generate new coordinates based on the distorted
Cauchy-Lorentz distribution. Depending on the steps within the strategy
chain, the class implements the strategy for generating new location
changes.
Parameters
----------
lb : array_like
A 1-D numpy ndarray containing lower bounds of the generated
components. Neither NaN or inf are allowed.
ub : array_like
A 1-D numpy ndarray containing upper bounds for the generated
components. Neither NaN or inf are allowed.
visiting_param : float
Parameter for visiting distribution. Default value is 2.62.
Higher values give the visiting distribution a heavier tail, this
makes the algorithm jump to a more distant region.
The value range is (0, 3]. It's value is fixed for the life of the
object.
rand_state : `~numpy.random.mtrand.RandomState` object
A `~numpy.random.mtrand.RandomState` object for using the current state
of the created random generator container.
"""
TAIL_LIMIT = 1.e8
MIN_VISIT_BOUND = 1.e-10
def __init__(self, lb, ub, visiting_param, rand_state):
# if you wish to make _visiting_param adjustable during the life of
# the object then _factor2, _factor3, _factor5, _d1, _factor6 will
# have to be dynamically calculated in `visit_fn`. They're factored
# out here so they don't need to be recalculated all the time.
self._visiting_param = visiting_param
self.rand_state = rand_state
self.lower = lb
self.upper = ub
self.bound_range = ub - lb
# these are invariant numbers unless visiting_param changes
self._factor2 = np.exp((4.0 - self._visiting_param) * np.log(
self._visiting_param - 1.0))
self._factor3 = np.exp((2.0 - self._visiting_param) * np.log(2.0)
/ (self._visiting_param - 1.0))
self._factor4_p = np.sqrt(np.pi) * self._factor2 / (self._factor3 * (
3.0 - self._visiting_param))
self._factor5 = 1.0 / (self._visiting_param - 1.0) - 0.5
self._d1 = 2.0 - self._factor5
self._factor6 = np.pi * (1.0 - self._factor5) / np.sin(
np.pi * (1.0 - self._factor5)) / np.exp(gammaln(self._d1))
def visiting(self, x, step, temperature):
""" Based on the step in the strategy chain, new coordinated are
generated by changing all components is the same time or only
one of them, the new values are computed with visit_fn method
"""
dim = x.size
if step < dim:
# Changing all coordinates with a new visiting value
visits = self.visit_fn(temperature, dim)
upper_sample = self.rand_state.random_sample()
lower_sample = self.rand_state.random_sample()
visits[visits > self.TAIL_LIMIT] = self.TAIL_LIMIT * upper_sample
visits[visits < -self.TAIL_LIMIT] = -self.TAIL_LIMIT * lower_sample
x_visit = visits + x
a = x_visit - self.lower
b = np.fmod(a, self.bound_range) + self.bound_range
x_visit = np.fmod(b, self.bound_range) + self.lower
x_visit[np.fabs(
x_visit - self.lower) < self.MIN_VISIT_BOUND] += 1.e-10
else:
# Changing only one coordinate at a time based on strategy
# chain step
x_visit = np.copy(x)
visit = self.visit_fn(temperature, 1)
if visit > self.TAIL_LIMIT:
visit = self.TAIL_LIMIT * self.rand_state.random_sample()
elif visit < -self.TAIL_LIMIT:
visit = -self.TAIL_LIMIT * self.rand_state.random_sample()
index = step - dim
x_visit[index] = visit + x[index]
a = x_visit[index] - self.lower[index]
b = np.fmod(a, self.bound_range[index]) + self.bound_range[index]
x_visit[index] = np.fmod(b, self.bound_range[
index]) + self.lower[index]
if np.fabs(x_visit[index] - self.lower[
index]) < self.MIN_VISIT_BOUND:
x_visit[index] += self.MIN_VISIT_BOUND
return x_visit
def visit_fn(self, temperature, dim):
""" Formula Visita from p. 405 of reference [2] """
x, y = self.rand_state.normal(size=(dim, 2)).T
factor1 = np.exp(np.log(temperature) / (self._visiting_param - 1.0))
factor4 = self._factor4_p * factor1
# sigmax
x *= np.exp(-(self._visiting_param - 1.0) * np.log(
self._factor6 / factor4) / (3.0 - self._visiting_param))
den = np.exp((self._visiting_param - 1.0) * np.log(np.fabs(y)) /
(3.0 - self._visiting_param))
return x / den
class EnergyState(object):
"""
Class used to record the energy state. At any time, it knows what is the
currently used coordinates and the most recent best location.
Parameters
----------
lower : array_like
A 1-D numpy ndarray containing lower bounds for generating an initial
random components in the `reset` method.
upper : array_like
A 1-D numpy ndarray containing upper bounds for generating an initial
random components in the `reset` method
components. Neither NaN or inf are allowed.
callback : callable, ``callback(x, f, context)``, optional
A callback function which will be called for all minima found.
``x`` and ``f`` are the coordinates and function value of the
latest minimum found, and `context` has value in [0, 1, 2]
"""
# Maximimum number of trials for generating a valid starting point
MAX_REINIT_COUNT = 1000
def __init__(self, lower, upper, callback=None):
self.ebest = None
self.current_energy = None
self.current_location = None
self.xbest = None
self.lower = lower
self.upper = upper
self.callback = callback
def reset(self, func_wrapper, rand_state, x0=None):
"""
Initialize current location is the search domain. If `x0` is not
provided, a random location within the bounds is generated.
"""
if x0 is None:
self.current_location = self.lower + rand_state.random_sample(
len(self.lower)) * (self.upper - self.lower)
else:
self.current_location = np.copy(x0)
init_error = True
reinit_counter = 0
while init_error:
self.current_energy = func_wrapper.fun(self.current_location)
if self.current_energy is None:
raise ValueError('Objective function is returning None')
if (not np.isfinite(self.current_energy) or np.isnan(
self.current_energy)):
if reinit_counter >= EnergyState.MAX_REINIT_COUNT:
init_error = False
message = (
'Stopping algorithm because function '
'create NaN or (+/-) infinity values even with '
'trying new random parameters'
)
raise ValueError(message)
self.current_location = self.lower + rand_state.random_sample(
self.lower.size) * (self.upper - self.lower)
reinit_counter += 1
else:
init_error = False
# If first time reset, initialize ebest and xbest
if self.ebest is None and self.xbest is None:
self.ebest = self.current_energy
self.xbest = np.copy(self.current_location)
# Otherwise, we keep them in case of reannealing reset
def update_best(self, e, x, context):
self.ebest = e
self.xbest = np.copy(x)
if self.callback is not None:
val = self.callback(x, e, context)
if val is not None:
if val:
return('Callback function requested to stop early by '
'returning True')
def update_current(self, e, x):
self.current_energy = e
self.current_location = np.copy(x)
class StrategyChain(object):
"""
Class that implements within a Markov chain the strategy for location
acceptance and local search decision making.
Parameters
----------
acceptance_param : float
Parameter for acceptance distribution. It is used to control the
probability of acceptance. The lower the acceptance parameter, the
smaller the probability of acceptance. Default value is -5.0 with
a range (-1e4, -5].
visit_dist : VisitingDistribution
Instance of `VisitingDistribution` class.
func_wrapper : ObjectiveFunWrapper
Instance of `ObjectiveFunWrapper` class.
minimizer_wrapper: LocalSearchWrapper
Instance of `LocalSearchWrapper` class.
rand_state : `~numpy.random.mtrand.RandomState` object
A `~numpy.random.mtrand.RandomState` object for using the current state
of the created random generator container.
energy_state: EnergyState
Instance of `EnergyState` class.
"""
def __init__(self, acceptance_param, visit_dist, func_wrapper,
minimizer_wrapper, rand_state, energy_state):
# Local strategy chain minimum energy and location
self.emin = energy_state.current_energy
self.xmin = np.array(energy_state.current_location)
# Global optimizer state
self.energy_state = energy_state
# Acceptance parameter
self.acceptance_param = acceptance_param
# Visiting distribution instance
self.visit_dist = visit_dist
# Wrapper to objective function
self.func_wrapper = func_wrapper
# Wrapper to the local minimizer
self.minimizer_wrapper = minimizer_wrapper
self.not_improved_idx = 0
self.not_improved_max_idx = 1000
self._rand_state = rand_state
self.temperature_step = 0
self.K = 100 * len(energy_state.current_location)
def accept_reject(self, j, e, x_visit):
r = self._rand_state.random_sample()
pqv_temp = (self.acceptance_param - 1.0) * (
e - self.energy_state.current_energy) / (
self.temperature_step + 1.)
if pqv_temp <= 0.:
pqv = 0.
else:
pqv = np.exp(np.log(pqv_temp) / (
1. - self.acceptance_param))
if r <= pqv:
# We accept the new location and update state
self.energy_state.update_current(e, x_visit)
self.xmin = np.copy(self.energy_state.current_location)
# No improvement for a long time
if self.not_improved_idx >= self.not_improved_max_idx:
if j == 0 or self.energy_state.current_energy < self.emin:
self.emin = self.energy_state.current_energy
self.xmin = np.copy(self.energy_state.current_location)
def run(self, step, temperature):
self.temperature_step = temperature / float(step + 1)
self.not_improved_idx += 1
for j in range(self.energy_state.current_location.size * 2):
if j == 0:
if step == 0:
self.energy_state_improved = True
else:
self.energy_state_improved = False
x_visit = self.visit_dist.visiting(
self.energy_state.current_location, j, temperature)
# Calling the objective function
e = self.func_wrapper.fun(x_visit)
if e < self.energy_state.current_energy:
# We have got a better energy value
self.energy_state.update_current(e, x_visit)
if e < self.energy_state.ebest:
val = self.energy_state.update_best(e, x_visit, 0)
if val is not None:
if val:
return val
self.energy_state_improved = True
self.not_improved_idx = 0
else:
# We have not improved but do we accept the new location?
self.accept_reject(j, e, x_visit)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during annealing')
# End of StrategyChain loop
def local_search(self):
# Decision making for performing a local search
# based on strategy chain results
# If energy has been improved or no improvement since too long,
# performing a local search with the best strategy chain location
if self.energy_state_improved:
# Global energy has improved, let's see if LS improves further
e, x = self.minimizer_wrapper.local_search(self.energy_state.xbest,
self.energy_state.ebest)
if e < self.energy_state.ebest:
self.not_improved_idx = 0
val = self.energy_state.update_best(e, x, 1)
if val is not None:
if val:
return val
self.energy_state.update_current(e, x)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during local search')
# Check probability of a need to perform a LS even if no improvement
do_ls = False
if self.K < 90 * len(self.energy_state.current_location):
pls = np.exp(self.K * (
self.energy_state.ebest - self.energy_state.current_energy) /
self.temperature_step)
if pls >= self._rand_state.random_sample():
do_ls = True
# Global energy not improved, let's see what LS gives
# on the best strategy chain location
if self.not_improved_idx >= self.not_improved_max_idx:
do_ls = True
if do_ls:
e, x = self.minimizer_wrapper.local_search(self.xmin, self.emin)
self.xmin = np.copy(x)
self.emin = e
self.not_improved_idx = 0
self.not_improved_max_idx = self.energy_state.current_location.size
if e < self.energy_state.ebest:
val = self.energy_state.update_best(
self.emin, self.xmin, 2)
if val is not None:
if val:
return val
self.energy_state.update_current(e, x)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during dual annealing')
class ObjectiveFunWrapper(object):
def __init__(self, func, maxfun=1e7, *args):
self.func = func
self.args = args
# Number of objective function evaluations
self.nfev = 0
# Number of gradient function evaluation if used
self.ngev = 0
# Number of hessian of the objective function if used
self.nhev = 0
self.maxfun = maxfun
def fun(self, x):
self.nfev += 1
return self.func(x, *self.args)
class LocalSearchWrapper(object):
"""
Class used to wrap around the minimizer used for local search
Default local minimizer is SciPy minimizer L-BFGS-B
"""
LS_MAXITER_RATIO = 6
LS_MAXITER_MIN = 100
LS_MAXITER_MAX = 1000
def __init__(self, bounds, func_wrapper, **kwargs):
self.func_wrapper = func_wrapper
self.kwargs = kwargs
self.minimizer = minimize
bounds_list = list(zip(*bounds))
self.lower = np.array(bounds_list[0])
self.upper = np.array(bounds_list[1])
# If no minimizer specified, use SciPy minimize with 'L-BFGS-B' method
if not self.kwargs:
n = len(self.lower)
ls_max_iter = min(max(n * self.LS_MAXITER_RATIO,
self.LS_MAXITER_MIN),
self.LS_MAXITER_MAX)
self.kwargs['method'] = 'L-BFGS-B'
self.kwargs['options'] = {
'maxiter': ls_max_iter,
}
self.kwargs['bounds'] = list(zip(self.lower, self.upper))
def local_search(self, x, e):
# Run local search from the given x location where energy value is e
x_tmp = np.copy(x)
mres = self.minimizer(self.func_wrapper.fun, x, **self.kwargs)
if 'njev' in mres.keys():
self.func_wrapper.ngev += mres.njev
if 'nhev' in mres.keys():
self.func_wrapper.nhev += mres.nhev
# Check if is valid value
is_finite = np.all(np.isfinite(mres.x)) and np.isfinite(mres.fun)
in_bounds = np.all(mres.x >= self.lower) and np.all(
mres.x <= self.upper)
is_valid = is_finite and in_bounds
# Use the new point only if it is valid and return a better results
if is_valid and mres.fun < e:
return mres.fun, mres.x
else:
return e, x_tmp
def dual_annealing(func, bounds, args=(), maxiter=1000,
local_search_options={}, initial_temp=5230.,
restart_temp_ratio=2.e-5, visit=2.62, accept=-5.0,
maxfun=1e7, seed=None, no_local_search=False,
callback=None, x0=None):
"""
Find the global minimum of a function using Dual Annealing.
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
and ``args`` is a tuple of any additional fixed parameters needed to
completely specify the function.
bounds : sequence, shape (n, 2)
Bounds for variables. ``(min, max)`` pairs for each element in ``x``,
defining bounds for the objective function parameter.
args : tuple, optional
Any additional fixed parameters needed to completely specify the
objective function.
maxiter : int, optional
The maximum number of global search iterations. Default value is 1000.
local_search_options : dict, optional
Extra keyword arguments to be passed to the local minimizer
(`minimize`). Some important options could be:
``method`` for the minimizer method to use and ``args`` for
objective function additional arguments.
initial_temp : float, optional
The initial temperature, use higher values to facilitates a wider
search of the energy landscape, allowing dual_annealing to escape
local minima that it is trapped in. Default value is 5230. Range is
(0.01, 5.e4].
restart_temp_ratio : float, optional
During the annealing process, temperature is decreasing, when it
reaches ``initial_temp * restart_temp_ratio``, the reannealing process
is triggered. Default value of the ratio is 2e-5. Range is (0, 1).
visit : float, optional
Parameter for visiting distribution. Default value is 2.62. Higher
values give the visiting distribution a heavier tail, this makes
the algorithm jump to a more distant region. The value range is (0, 3].
accept : float, optional
Parameter for acceptance distribution. It is used to control the
probability of acceptance. The lower the acceptance parameter, the
smaller the probability of acceptance. Default value is -5.0 with
a range (-1e4, -5].
maxfun : int, optional
Soft limit for the number of objective function calls. If the
algorithm is in the middle of a local search, this number will be
exceeded, the algorithm will stop just after the local search is
done. Default value is 1e7.
seed : {int or `~numpy.random.mtrand.RandomState` instance}, optional
If `seed` is not specified the `~numpy.random.mtrand.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``RandomState`` instance, then that
instance is used.
Specify `seed` for repeatable minimizations. The random numbers
generated with this seed only affect the visiting distribution
function and new coordinates generation.
no_local_search : bool, optional
If `no_local_search` is set to True, a traditional Generalized
Simulated Annealing will be performed with no local search
strategy applied.
callback : callable, optional
A callback function with signature ``callback(x, f, context)``,
which will be called for all minima found.
``x`` and ``f`` are the coordinates and function value of the
latest minimum found, and ``context`` has value in [0, 1, 2], with the
following meaning:
- 0: minimum detected in the annealing process.
- 1: detection occured in the local search process.
- 2: detection done in the dual annealing process.
If the callback implementation returns True, the algorithm will stop.
x0 : ndarray, shape(n,), optional
Coordinates of a single n-dimensional starting point.
Returns
-------
res : OptimizeResult
The optimization result represented as a `OptimizeResult` object.
Important attributes are: ``x`` the solution array, ``fun`` the value
of the function at the solution, and ``message`` which describes the
cause of the termination.
See `OptimizeResult` for a description of other attributes.
Notes
-----
This function implements the Dual Annealing optimization. This stochastic
approach derived from [3]_ combines the generalization of CSA (Classical
Simulated Annealing) and FSA (Fast Simulated Annealing) [1]_ [2]_ coupled
to a strategy for applying a local search on accepted locations [4]_.
An alternative implementation of this same algorithm is described in [5]_
and benchmarks are presented in [6]_. This approach introduces an advanced
method to refine the solution found by the generalized annealing
process. This algorithm uses a distorted Cauchy-Lorentz visiting
distribution, with its shape controlled by the parameter :math:`q_{v}`
.. math::
g_{q_{v}}(\\Delta x(t)) \\propto \\frac{ \\
\\left[T_{q_{v}}(t) \\right]^{-\\frac{D}{3-q_{v}}}}{ \\
\\left[{1+(q_{v}-1)\\frac{(\\Delta x(t))^{2}} { \\
\\left[T_{q_{v}}(t)\\right]^{\\frac{2}{3-q_{v}}}}}\\right]^{ \\
\\frac{1}{q_{v}-1}+\\frac{D-1}{2}}}
Where :math:`t` is the artificial time. This visiting distribution is used
to generate a trial jump distance :math:`\\Delta x(t)` of variable
:math:`x(t)` under artificial temperature :math:`T_{q_{v}}(t)`.
From the starting point, after calling the visiting distribution
function, the acceptance probability is computed as follows:
.. math::
p_{q_{a}} = \\min{\\{1,\\left[1-(1-q_{a}) \\beta \\Delta E \\right]^{ \\
\\frac{1}{1-q_{a}}}\\}}
Where :math:`q_{a}` is a acceptance parameter. For :math:`q_{a}<1`, zero
acceptance probability is assigned to the cases where
.. math::
[1-(1-q_{a}) \\beta \\Delta E] < 0
The artificial temperature :math:`T_{q_{v}}(t)` is decreased according to
.. math::
T_{q_{v}}(t) = T_{q_{v}}(1) \\frac{2^{q_{v}-1}-1}{\\left( \\
1 + t\\right)^{q_{v}-1}-1}
Where :math:`q_{v}` is the visiting parameter.
.. versionadded:: 1.2.0
References
----------
.. [1] Tsallis C. Possible generalization of Boltzmann-Gibbs
statistics. Journal of Statistical Physics, 52, 479-487 (1998).
.. [2] Tsallis C, Stariolo DA. Generalized Simulated Annealing.
Physica A, 233, 395-406 (1996).
.. [3] Xiang Y, Sun DY, Fan W, Gong XG. Generalized Simulated
Annealing Algorithm and Its Application to the Thomson Model.
Physics Letters A, 233, 216-220 (1997).
.. [4] Xiang Y, Gong XG. Efficiency of Generalized Simulated
Annealing. Physical Review E, 62, 4473 (2000).
.. [5] Xiang Y, Gubian S, Suomela B, Hoeng J. Generalized
Simulated Annealing for Efficient Global Optimization: the GenSA
Package for R. The R Journal, Volume 5/1 (2013).
.. [6] Mullen, K. Continuous Global Optimization in R. Journal of
Statistical Software, 60(6), 1 - 45, (2014). DOI:10.18637/jss.v060.i06
Examples
--------
The following example is a 10-dimensional problem, with many local minima.
The function involved is called Rastrigin
(https://en.wikipedia.org/wiki/Rastrigin_function)
>>> from scipy.optimize import dual_annealing
>>> func = lambda x: np.sum(x*x - 10*np.cos(2*np.pi*x)) + 10*np.size(x)
>>> lw = [-5.12] * 10
>>> up = [5.12] * 10
>>> ret = dual_annealing(func, bounds=list(zip(lw, up)), seed=1234)
>>> print("global minimum: xmin = {0}, f(xmin) = {1:.6f}".format(
... ret.x, ret.fun))
global minimum: xmin = [-4.26437714e-09 -3.91699361e-09 -1.86149218e-09 -3.97165720e-09
-6.29151648e-09 -6.53145322e-09 -3.93616815e-09 -6.55623025e-09
-6.05775280e-09 -5.00668935e-09], f(xmin) = 0.000000
""" # noqa: E501
if x0 is not None and not len(x0) == len(bounds):
raise ValueError('Bounds size does not match x0')
lu = list(zip(*bounds))
lower = np.array(lu[0])
upper = np.array(lu[1])
# Check that restart temperature ratio is correct
if restart_temp_ratio <= 0. or restart_temp_ratio >= 1.:
raise ValueError('Restart temperature ratio has to be in range (0, 1)')
# Checking bounds are valid
if (np.any(np.isinf(lower)) or np.any(np.isinf(upper)) or np.any(
np.isnan(lower)) or np.any(np.isnan(upper))):
raise ValueError('Some bounds values are inf values or nan values')
# Checking that bounds are consistent
if not np.all(lower < upper):
raise ValueError('Bounds are not consistent min < max')
# Checking that bounds are the same length
if not len(lower) == len(upper):
raise ValueError('Bounds do not have the same dimensions')
# Wrapper for the objective function
func_wrapper = ObjectiveFunWrapper(func, maxfun, *args)
# Wrapper fot the minimizer
minimizer_wrapper = LocalSearchWrapper(
bounds, func_wrapper, **local_search_options)
# Initialization of RandomState for reproducible runs if seed provided
rand_state = check_random_state(seed)
# Initialization of the energy state
energy_state = EnergyState(lower, upper, callback)
energy_state.reset(func_wrapper, rand_state, x0)
# Minimum value of annealing temperature reached to perform
# re-annealing
temperature_restart = initial_temp * restart_temp_ratio
# VisitingDistribution instance
visit_dist = VisitingDistribution(lower, upper, visit, rand_state)
# Strategy chain instance
strategy_chain = StrategyChain(accept, visit_dist, func_wrapper,
minimizer_wrapper, rand_state, energy_state)
need_to_stop = False
iteration = 0
message = []
# OptimizeResult object to be returned
optimize_res = OptimizeResult()
optimize_res.success = True
optimize_res.status = 0
t1 = np.exp((visit - 1) * np.log(2.0)) - 1.0
# Run the search loop
while(not need_to_stop):
for i in range(maxiter):
# Compute temperature for this step
s = float(i) + 2.0
t2 = np.exp((visit - 1) * np.log(s)) - 1.0
temperature = initial_temp * t1 / t2
if iteration >= maxiter:
message.append("Maximum number of iteration reached")
need_to_stop = True
break
# Need a re-annealing process?
if temperature < temperature_restart:
energy_state.reset(func_wrapper, rand_state)
break
# starting strategy chain
val = strategy_chain.run(i, temperature)
if val is not None:
message.append(val)
need_to_stop = True
optimize_res.success = False
break
# Possible local search at the end of the strategy chain
if not no_local_search:
val = strategy_chain.local_search()
if val is not None:
message.append(val)
need_to_stop = True
optimize_res.success = False
break
iteration += 1
# Setting the OptimizeResult values
optimize_res.x = energy_state.xbest
optimize_res.fun = energy_state.ebest
optimize_res.nit = iteration
optimize_res.nfev = func_wrapper.nfev
optimize_res.njev = func_wrapper.ngev
optimize_res.nhev = func_wrapper.nhev
optimize_res.message = message
return optimize_res
|
py | 1a529f37907137872570250e10dd0b22e84b54d8 | from __future__ import absolute_import, division, print_function
import numpy as np
import scipy.io as io
import tensorflow as tf
import align.detect_face
#ref = io.loadmat('pnet_dbg.mat')
with tf.Graph().as_default():
sess = tf.compat.v1.Session()
with sess.as_default():
with tf.compat.v1.variable_scope('pnet'):
# data = tf.compat.v1.placeholder(tf.float32, (None,None,None,3), 'input')
data = tf.compat.v1.placeholder(tf.float32, (1, 1610, 1901, 3), 'input')
pnet = align.detect_face.PNet({'data': data})
pnet.load('../../data/det1.npy', sess)
# with tf.compat.v1.variable_scope('rnet'):
# data = tf.compat.v1.placeholder(tf.float32, (None,24,24,3), 'input')
# rnet = align.detect_face.RNet({'data':data})
# rnet.load('../../data/det2.npy', sess)
# with tf.compat.v1.variable_scope('onet'):
# data = tf.compat.v1.placeholder(tf.float32, (None,48,48,3), 'input')
# onet = align.detect_face.ONet({'data':data})
# onet.load('../../data/det3.npy', sess)
def pnet_fun(img): return sess.run(
('pnet/conv4-2/BiasAdd:0', 'pnet/prob1:0'), feed_dict={'pnet/input:0': img})
# rnet_fun = lambda img : sess.run(('rnet/conv5-2/conv5-2:0', 'rnet/prob1:0'), feed_dict={'rnet/input:0':img})
# onet_fun = lambda img : sess.run(('onet/conv6-2/conv6-2:0', 'onet/conv6-3/conv6-3:0', 'onet/prob1:0'), feed_dict={'onet/input:0':img})
ref = io.loadmat('pnet_dbg.mat')
img_x = np.expand_dims(ref['im_data'], 0)
img_y = np.transpose(img_x, (0, 2, 1, 3))
out = pnet_fun(img_y)
out0 = np.transpose(out[0], (0, 2, 1, 3))
out1 = np.transpose(out[1], (0, 2, 1, 3))
# np.where(abs(out0[0,:,:,:]-ref['out0'])>1e-18)
qqq3 = np.where(abs(out1[0, :, :, :]-ref['out1'])
> 1e-7) # 3390 diffs with softmax2
print(qqq3[0].shape)
np.set_printoptions(formatter={'float': '{: 0.4f}'.format})
# prob1=sess1.run('prob1:0', feed_dict={data:img})
# print(prob1[0,0,0,:])
# conv42=sess1.run('conv4-2/BiasAdd:0', feed_dict={data:img})
# print(conv42[0,0,0,:])
# conv42, prob1 = pnet_fun(img)
# print(prob1[0,0,0,:])
# print(conv42[0,0,0,:])
# [ 0.9929 0.0071] prob1, caffe
# [ 0.9929 0.0071] prob1, tensorflow
# [ 0.1207 -0.0116 -0.1231 -0.0463] conv4-2, caffe
# [ 0.1207 -0.0116 -0.1231 -0.0463] conv4-2, tensorflow
# g2 = tf.Graph()
# with g2.as_default():
# data = tf.compat.v1.placeholder(tf.float32, (None,24,24,3), 'input')
# rnet = align.detect_face.RNet({'data':data})
# sess2 = tf.compat.v1.Session(graph=g2)
# rnet.load('../../data/det2.npy', sess2)
# rnet_fun = lambda img : sess2.run(('conv5-2/conv5-2:0', 'prob1:0'), feed_dict={'input:0':img})
# np.random.seed(666)
# img = np.random.rand(73,3,24,24)
# img = np.transpose(img, (0,2,3,1))
# np.set_printoptions(formatter={'float': '{: 0.4f}'.format})
#
# prob1=sess2.run('prob1:0', feed_dict={data:img})
# print(prob1[0,:])
#
# conv52=sess2.run('conv5-2/conv5-2:0', feed_dict={data:img})
# print(conv52[0,:])
# [ 0.9945 0.0055] prob1, caffe
# [ 0.1108 -0.0038 -0.1631 -0.0890] conv5-2, caffe
# [ 0.9945 0.0055] prob1, tensorflow
# [ 0.1108 -0.0038 -0.1631 -0.0890] conv5-2, tensorflow
# g3 = tf.Graph()
# with g3.as_default():
# data = tf.compat.v1.placeholder(tf.float32, (None,48,48,3), 'input')
# onet = align.detect_face.ONet({'data':data})
# sess3 = tf.compat.v1.Session(graph=g3)
# onet.load('../../data/det3.npy', sess3)
# onet_fun = lambda img : sess3.run(('conv6-2/conv6-2:0', 'conv6-3/conv6-3:0', 'prob1:0'), feed_dict={'input:0':img})
# np.random.seed(666)
# img = np.random.rand(11,3,48,48)
# img = np.transpose(img, (0,2,3,1))
# np.set_printoptions(formatter={'float': '{: 0.4f}'.format})
#
# prob1=sess3.run('prob1:0', feed_dict={data:img})
# print(prob1[0,:])
# print('prob1, tensorflow')
#
# conv62=sess3.run('conv6-2/conv6-2:0', feed_dict={data:img})
# print(conv62[0,:])
# print('conv6-2, tensorflow')
#
# conv63=sess3.run('conv6-3/conv6-3:0', feed_dict={data:img})
# print(conv63[0,:])
# print('conv6-3, tensorflow')
# [ 0.9988 0.0012] prob1, caffe
# [ 0.0446 -0.0968 -0.1091 -0.0212] conv6-2, caffe
# [ 0.2429 0.6104 0.4074 0.3104 0.5939 0.2729 0.2132 0.5462 0.7863 0.7568] conv6-3, caffe
# [ 0.9988 0.0012] prob1, tensorflow
# [ 0.0446 -0.0968 -0.1091 -0.0212] conv6-2, tensorflow
# [ 0.2429 0.6104 0.4074 0.3104 0.5939 0.2729 0.2132 0.5462 0.7863 0.7568] conv6-3, tensorflow
#pnet_fun = lambda img : sess1.run(('conv4-2/BiasAdd:0', 'prob1:0'), feed_dict={'input:0':img})
|
py | 1a529fbaca1eb8644952b2faa1a2d25b85f24ede | from anchore_engine.subsys.auth.realms import CaseSensitivePermission
from anchore_engine.subsys import logger
logger.enable_test_logging()
def test_anchore_permissions():
"""
Test permission comparisons with mixed-case, wild-cards, etc
:return:
"""
logger.info("Testing permission wildcard matches and mixed-case comparisions")
# Default, case-sensitive, exact match
assert CaseSensitivePermission(wildcard_string="Account1:listImages:*").implies(
CaseSensitivePermission(wildcard_string="Account1:listImages:*")
)
# Ignore case
assert CaseSensitivePermission(
wildcard_string="account1:listImages:*", case_sensitive=False
).implies(
CaseSensitivePermission(
wildcard_string="Account1:listImages:*", case_sensitive=False
)
)
# Mixed case, mismatch
assert not CaseSensitivePermission(wildcard_string="account1:listImages:*").implies(
CaseSensitivePermission(wildcard_string="Account1:listImages:*")
)
|
py | 1a529fbfc3e5d341fda428c5443c54fd7e3c545a | from django.forms import ModelForm, TextInput, DateField
from suit.widgets import SuitDateWidget
from suit_redactor.widgets import RedactorWidget
from haystack.forms import HighlightedSearchForm
from .models import Article
class ArticleForm(ModelForm):
class Meta:
widgets = {
'name': TextInput(attrs={'class': 'input-xxlarge'}),
'content': RedactorWidget(editor_options={'lang': 'es',
'minHeight': 400,
'maxHeight': 500}),
'date': SuitDateWidget,
}
class PersonForm(ModelForm):
class Meta:
widgets = {
'name': TextInput(attrs={'class': 'input-xxlarge'}),
'bio': RedactorWidget(editor_options={'lang': 'es',
'minHeight': 400,
'maxHeight': 500})
}
class ArticleSearchForm(HighlightedSearchForm):
# start_date = DateField(required=False)
# end_date = DateField(required=False)
models = [Article, ]
def search(self):
form = super(ArticleSearchForm, self).search().models(*self.get_models())
return form
def get_models(self):
return self.models
|
py | 1a52a01a50b7f2a8f88df4759a03fe7a0b52815d | """This module contains the meta information of OrgResolveLogicalParents ExternalMethod."""
from ..ucscentralcoremeta import MethodMeta, MethodPropertyMeta
method_meta = MethodMeta("OrgResolveLogicalParents", "orgResolveLogicalParents", "Version142b")
prop_meta = {
"cookie": MethodPropertyMeta("Cookie", "cookie", "Xs:string", "Version142b", "InputOutput", False),
"dn": MethodPropertyMeta("Dn", "dn", "ReferenceObject", "Version142b", "InputOutput", False),
"in_hierarchical": MethodPropertyMeta("InHierarchical", "inHierarchical", "Xs:string", "Version142b", "Input", False),
"in_single_level": MethodPropertyMeta("InSingleLevel", "inSingleLevel", "Xs:string", "Version142b", "Input", False),
"out_configs": MethodPropertyMeta("OutConfigs", "outConfigs", "ConfigMap", "Version142b", "Output", True),
}
prop_map = {
"cookie": "cookie",
"dn": "dn",
"inHierarchical": "in_hierarchical",
"inSingleLevel": "in_single_level",
"outConfigs": "out_configs",
}
|
py | 1a52a03ab367ec3b1678f0094890505ff8597e8a | """Parent class for every Overkiz device."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from pyoverkiz.enums import OverkizAttribute, OverkizState
from pyoverkiz.models import Device
from homeassistant.components.sensor import SensorEntityDescription
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN
from .coordinator import OverkizDataUpdateCoordinator
from .executor import OverkizExecutor
class OverkizEntity(CoordinatorEntity):
"""Representation of an Overkiz device entity."""
coordinator: OverkizDataUpdateCoordinator
def __init__(
self, device_url: str, coordinator: OverkizDataUpdateCoordinator
) -> None:
"""Initialize the device."""
super().__init__(coordinator)
self.device_url = device_url
self.base_device_url, *_ = self.device_url.split("#")
self.executor = OverkizExecutor(device_url, coordinator)
self._attr_assumed_state = not self.device.states
self._attr_available = self.device.available
self._attr_unique_id = self.device.device_url
self._attr_name = self.device.label
self._attr_device_info = self.generate_device_info()
@property
def device(self) -> Device:
"""Return Overkiz device linked to this entity."""
return self.coordinator.data[self.device_url]
def generate_device_info(self) -> DeviceInfo:
"""Return device registry information for this entity."""
# Some devices, such as the Smart Thermostat have several devices in one physical device,
# with same device url, terminated by '#' and a number.
# In this case, we use the base device url as the device identifier.
if "#" in self.device_url and not self.device_url.endswith("#1"):
# Only return the url of the base device, to inherit device name and model from parent device.
return {
"identifiers": {(DOMAIN, self.executor.base_device_url)},
}
manufacturer = (
self.executor.select_attribute(OverkizAttribute.CORE_MANUFACTURER)
or self.executor.select_state(OverkizState.CORE_MANUFACTURER_NAME)
or self.coordinator.client.server.manufacturer
)
model = (
self.executor.select_state(
OverkizState.CORE_MODEL,
OverkizState.CORE_PRODUCT_MODEL_NAME,
OverkizState.IO_MODEL,
)
or self.device.widget
)
return DeviceInfo(
identifiers={(DOMAIN, self.executor.base_device_url)},
name=self.device.label,
manufacturer=manufacturer,
model=model,
sw_version=self.executor.select_attribute(
OverkizAttribute.CORE_FIRMWARE_REVISION
),
hw_version=self.device.controllable_name,
suggested_area=self.coordinator.areas[self.device.place_oid],
via_device=self.executor.get_gateway_id(),
configuration_url=self.coordinator.client.server.configuration_url,
)
@dataclass
class OverkizSensorDescription(SensorEntityDescription):
"""Class to describe an Overkiz sensor."""
native_value: Callable[
[str | int | float], str | int | float
] | None = lambda val: val
class OverkizDescriptiveEntity(OverkizEntity):
"""Representation of a Overkiz device entity based on a description."""
def __init__(
self,
device_url: str,
coordinator: OverkizDataUpdateCoordinator,
description: OverkizSensorDescription,
) -> None:
"""Initialize the device."""
super().__init__(device_url, coordinator)
self.entity_description = description
self._attr_name = f"{super().name} {self.entity_description.name}"
self._attr_unique_id = f"{super().unique_id}-{self.entity_description.key}"
|
py | 1a52a03f636fbed06f30b4468f205319d5196203 | #! /usr/bin/env python
# -*- coding:UTF-8 -*-
# 使用信号量机制,注意semaphore的使用
# sem可以管理某一类资源的一组实例
# 其实使用queue队列模块是最佳的
import threading
import time
import random
def numbergen(sem, queue, qlock):
while True:
time.sleep(2)
if random.randint(0,1):
value = random.randint(1,100)
qlock.acquire() # 第二道岗,可以独占队列
try:
queue.append(value)
finally:
qlock.release()
print "Placed %d on the queue." % value
sem.release() #第一道岗,表明对消费者来说数据已经可以使用了,数字不一定马上处理
def numbercal(sem, queue, qlock):
while True:
sem.acquire()
qlock.acquire()
try:
value = queue.pop(0)
finally:
qlock.release()
print "%s: Got %d from the queue."%\
(threading.currentThread().getName(),value)
newvalue = value * 2
time.sleep(3)
# 主线程,共享变量,参数传递
childthreads = []
sem = threading.Semaphore(0)
queue = []
qlock = threading.Lock()
# 创建生产线程
t = threading.Thread(target = numbergen, args = [sem, queue, qlock])
t.setDaemon(True)
t.start()
childthreads.append(t)
# 创建消费线程
for i in range(1,3):
t = threading.Thread(target = numbercal, args= [sem,queue, qlock])
t.setDaemon(True)
t.start()
childthreads.append(t)
while True: #forever
time.sleep(300)
|
py | 1a52a057a7f3184769531a2bf6eaa7534499ec88 | import torch
import os
import numpy as np
from tqdm import tqdm
from data_package.smoke_dataset import SmokeDataset
from model_package.mlp_mixer import MLPMixer
from model_package.resnet import resnet18, resnet34,resnext50_32x4d
from torch.utils.data import DataLoader
from data_package.data_transform import VideoTransform
root_dirs = [
"/home/liuky/HDD_1/data/smoke/train_data/smoke_classification_data/base_dataset",
"/home/liuky/HDD_1/data/smoke/train_data/smoke_classification_data/DeAn_dataset",
"/home/liuky/HDD_1/data/smoke/train_data/smoke_classification_data/ZhangYe_dataset",
"/home/liuky/HDD_1/data/smoke/train_data/smoke_classification_data/XinXiang_dataset",
"/home/liuky/HDD_1/data/smoke/train_data/smoke_classification_data/HeNeng_dataset",
"/home/liuky/HDD_1/data/smoke/train_data/smoke_classification_data/TongHua_dataset",
"/home/liuky/HDD_1/data/smoke/train_data/smoke_classification_data/GuRun_dataset",
"/home/liuky/HDD_1/data/smoke/train_data/smoke_classification_data/YunJing_dataset",
# "/home/liuky/HDD_1/data/smoke/train_data/smoke_classification_data/WanZai_dataset",
# "D:\data\smoke_car\MLP_data\\base_dataset",
# "D:\data\smoke_car\MLP_data\\DeAn_dataset",
# "D:\data\smoke_car\MLP_data\\ZhangYe_dataset",
# "D:\data\smoke_car\MLP_data\\XinXiang_dataset",
# "D:\data\smoke_car\MLP_data\\HeNeng_dataset",
# "D:\data\smoke_car\MLP_data\\TongHua_dataset",
# "D:\data\smoke_car\MLP_data\\GuRun_dataset",
# "D:\data\smoke_car\MLP_data\\YunJing_dataset"
]
test_dris = [
# "D:\data\smoke_car\MLP_data\\WanZai_dataset",
"/home/liuky/HDD_1/data/smoke/train_data/smoke_classification_data/WanZai_dataset",
# "/home/liuky/HDD_1/data/smoke/train_data/smoke_classification_data/test_dataset",
]
load_weight = "weights/ResNet_C2_E60.snap"
save_model_name = "ResNet"
batch_size = 16
init_lr = 0.01
lr_steps = [50, 100, 150, 200, 250]
start_epoch = 0
max_epoch = 300
use_cuda = False
def train():
# model = MLPMixer(in_channels=96,
# num_patch=25 * 25,
# patch_size=25,
# num_classes=2,
# dim=512,
# depth=8,
# token_dim=256,
# channel_dim=2048
# )
model = resnext50_32x4d(
True if start_epoch > 0 else True,
num_classes=2)
if use_cuda:
model = model.cuda()
if len(load_weight) > 0 and start_epoch > 0:
print("|INFO|loading model:%s|" % load_weight)
static_dict = torch.load(load_weight)
model.load_state_dict(static_dict)
criterion = torch.nn.CrossEntropyLoss()
optim = torch.optim.Adam([{"params": model.parameters(), "initial_lr": init_lr}], lr=init_lr)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optim, lr_steps, 0.1, last_epoch=start_epoch)
dataset = SmokeDataset(root_dirs=root_dirs,
transform=VideoTransform(size=100, flip_p=0.5, std=255.,
use_bright_contrast=True,
horizontal_flip=True,
vertical_flip=True,
random_sample=False)
)
data_loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, collate_fn=SmokeDataset.collate)
test_dataset = SmokeDataset(root_dirs=test_dris,
transform=VideoTransform(size=100, flip_p=0.5, std=255.,
use_bright_contrast=False,
horizontal_flip=False,
vertical_flip=False,
random_sample=False)
)
test_data_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False,
collate_fn=SmokeDataset.collate)
for epoch in range(start_epoch, max_epoch):
process_bar = tqdm(data_loader, ncols=180)
total_loss = 0
total_acc = 0
model.eval()
for idx, (data, label) in enumerate(test_data_loader):
if use_cuda:
data = data.cuda()
label = label.cuda()
logit = model(data)
pred = torch.argmax(logit, 1)
acc = pred == label
acc = acc.sum() / batch_size
total_acc += acc
print("\n|INFO|acc:%.4f|" % (total_acc / (idx + 1)))
model.train()
for idx, (data, label) in enumerate(process_bar):
if use_cuda:
data = data.cuda()
label = label.cuda()
logit = model(data)
optim.zero_grad()
loss = criterion.forward(input=logit, target=label)
loss.backward()
optim.step()
total_loss += loss.data
process_bar.desc = "|INFO|epoch:%d|step:%d|loss:%.4f/%.4f|lr:%f|" % (
epoch, idx, loss.data, total_loss.data / (idx + 1), optim.param_groups[0]["lr"])
lr_scheduler.step()
# test
if (epoch % 10 == 0) and epoch > 0:
save_path = os.path.abspath('weights/%s_C2_E%d.snap' % (save_model_name,epoch))
if not os.path.exists(os.path.dirname(save_path)):
os.makedirs(os.path.dirname(save_path))
torch.save(model.state_dict(), save_path)
print("\n|INFO|save model in %s|" % save_path)
if __name__ == '__main__':
train()
from torchvision.models import resnext50_32x4d
|
py | 1a52a0f305301f3e688f3624c0508d36a5489b48 | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class _Info(object):
def __init__(self, name, _type=None, entry_type=None):
self._name = name
self._type = _type
if entry_type is not None and self._type != 'GenericSet':
raise ValueError(
'entry_type should only be specified if _type is GenericSet')
self._entry_type = entry_type
@property
def name(self):
return self._name
@property
def type(self):
return self._type
@property
def entry_type(self):
return self._entry_type
ANGLE_REVISIONS = _Info('angleRevisions', 'GenericSet', str)
ARCHITECTURES = _Info('architectures', 'GenericSet', str)
BENCHMARKS = _Info('benchmarks', 'GenericSet', str)
BENCHMARK_START = _Info('benchmarkStart', 'DateRange')
BENCHMARK_DESCRIPTIONS = _Info('benchmarkDescriptions', 'GenericSet', str)
BOTS = _Info('bots', 'GenericSet', str)
BUG_COMPONENTS = _Info('bugComponents', 'GenericSet', str)
BUILD_URLS = _Info('buildUrls', 'GenericSet', str)
BUILDS = _Info('builds', 'GenericSet', int)
CATAPULT_REVISIONS = _Info('catapultRevisions', 'GenericSet', str)
CHROMIUM_COMMIT_POSITIONS = _Info('chromiumCommitPositions', 'GenericSet', int)
CHROMIUM_REVISIONS = _Info('chromiumRevisions', 'GenericSet', str)
DESCRIPTION = _Info('description', 'GenericSet', str)
DEVICE_IDS = _Info('deviceIds', 'GenericSet', str)
DOCUMENTATION_URLS = _Info('documentationLinks', 'GenericSet', str)
FUCHSIA_GARNET_REVISIONS = _Info('fuchsiaGarnetRevisions', 'GenericSet', str)
FUCHSIA_PERIDOT_REVISIONS = _Info('fuchsiaPeridotRevisions', 'GenericSet', str)
FUCHSIA_TOPAZ_REVISIONS = _Info('fuchsiaTopazRevisions', 'GenericSet', str)
FUCHSIA_ZIRCON_REVISIONS = _Info('fuchsiaZirconRevisions', 'GenericSet', str)
GPUS = _Info('gpus', 'GenericSet', str)
HAD_FAILURES = _Info('hadFailures', 'GenericSet', bool)
IS_REFERENCE_BUILD = _Info('isReferenceBuild', 'GenericSet', bool)
LABELS = _Info('labels', 'GenericSet', str)
LOG_URLS = _Info('logUrls', 'GenericSet', str)
MASTERS = _Info('masters', 'GenericSet', str)
MEMORY_AMOUNTS = _Info('memoryAmounts', 'GenericSet', int)
OS_NAMES = _Info('osNames', 'GenericSet', str)
OS_VERSIONS = _Info('osVersions', 'GenericSet', str)
OWNERS = _Info('owners', 'GenericSet', str)
POINT_ID = _Info('pointId', 'GenericSet', int)
PRODUCT_VERSIONS = _Info('productVersions', 'GenericSet', str)
REVISION_TIMESTAMPS = _Info('revisionTimestamps', 'DateRange')
SKIA_REVISIONS = _Info('skiaRevisions', 'GenericSet', str)
STATISTICS_NAMES = _Info('statisticsNames', 'GenericSet', str)
STORIES = _Info('stories', 'GenericSet', str)
STORYSET_REPEATS = _Info('storysetRepeats', 'GenericSet', int)
STORY_TAGS = _Info('storyTags', 'GenericSet', str)
SUMMARY_KEYS = _Info('summaryKeys', 'GenericSet', str)
TEST_PATH = _Info('testPath', 'GenericSet', str)
TRACE_START = _Info('traceStart', 'DateRange')
TRACE_URLS = _Info('traceUrls', 'GenericSet', str)
V8_COMMIT_POSITIONS = _Info('v8CommitPositions', 'DateRange')
V8_REVISIONS = _Info('v8Revisions', 'GenericSet', str)
WEBRTC_REVISIONS = _Info('webrtcRevisions', 'GenericSet', str)
WEBRTC_INTERNAL_REVISIONS = _Info('webrtcInternalRevisions', 'GenericSet', str)
def _CreateCachedInfoTypes():
info_types = {}
for info in globals().values():
if isinstance(info, _Info):
info_types[info.name] = info
return info_types
_CACHED_INFO_TYPES = _CreateCachedInfoTypes()
def GetTypeForName(name):
info = _CACHED_INFO_TYPES.get(name)
if info:
return info.type
def AllInfos():
for info in _CACHED_INFO_TYPES.values():
yield info
def AllNames():
for info in AllInfos():
yield info.name
|
py | 1a52a103d1856c66f88ae0087a7d8a0e8f0e8e1c | # coding=utf-8
from randgen import generate_random_grid, generate_random_separation_grid
|
py | 1a52a153193752ba83fdd4c2c05723b60cb4c3ca | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from uuid import uuid4
import pytest
import helpers
import upload_model_explain_tabular_managed_container_sample
PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT")
IMAGE_URI = "gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-1:latest"
ARTIFACT_URI = "gs://ucaip-samples-us-central1/model/boston_housing/"
DISPLAY_NAME = f"temp_upload_model_test_{uuid4()}"
INPUT_TENSOR_NAME = "dense_input"
OUTPUT_TENSOR_NAME = "dense_2"
@pytest.fixture(scope="function", autouse=True)
def teardown(teardown_model):
yield
@pytest.mark.skip(reason="https://github.com/googleapis/java-aiplatform/issues/420")
def test_ucaip_generated_upload_model_explain_tabular_managed_constainer_sample(capsys, shared_state):
upload_model_explain_tabular_managed_container_sample.upload_model_explain_tabular_managed_container_sample(
display_name=DISPLAY_NAME,
artifact_uri=ARTIFACT_URI,
container_spec_image_uri=IMAGE_URI,
project=PROJECT_ID,
input_tensor_name=INPUT_TENSOR_NAME,
output_tensor_name=OUTPUT_TENSOR_NAME,
feature_names=["crim", "zn", "indus", "chas", "nox", "rm", "age",
"dis", "rad", "tax", "ptratio", "b", "lstat"]
)
out, _ = capsys.readouterr()
shared_state["model_name"] = helpers.get_name(out, key="model")
|
py | 1a52a16d8ed2041f5aa8b75ed7d54628e1d3389a | import numpy as np
from PuzzleLib.Backend import gpuarray
from PuzzleLib.Modules.Module import ModuleError, Module
class Glue(Module):
def __init__(self, modules=None, fwdGlue=None, bwdGlue=None, fwdShapeGlue=None, bwdShapeGlue=None, name=None):
super().__init__(name)
if modules is not None and not isinstance(modules, dict):
raise ModuleError("Modules object must be non-empty dictionary")
self.modules = modules
self.fwdGlue = fwdGlue
self.bwdGlue = bwdGlue
self.fwdShapeGlue = fwdShapeGlue
self.bwdShapeGlue = bwdShapeGlue
def updateData(self, data):
self.data = self.fwdGlue(data, self.modules)
def updateGrad(self, grad):
self.grad = self.bwdGlue(grad, self.modules)
def dataShapeFrom(self, shape):
if self.fwdShapeGlue is not None:
return self.fwdShapeGlue(shape)
else:
raise ModuleError("Forward shape glue hook is not installed")
def gradShapeFrom(self, shape):
if self.bwdShapeGlue is not None:
return self.bwdShapeGlue(shape)
else:
raise ModuleError("Backward shape glue hook is not installed")
def unittest():
data1 = gpuarray.to_gpu(np.random.randn(10, 2, 3, 3).astype(np.float32))
data2 = gpuarray.to_gpu(np.random.randn(10, 2, 3, 3).astype(np.float32))
data3 = gpuarray.to_gpu(np.random.randn(10, 10).astype(np.float32))
def fwdGlue(data, modules):
dat1, dat2, dat3 = data
split = modules["split"]
out1, out2 = split(data3)
return [dat1 + dat2, out1, out2]
def bwdGlue(grad, modules):
gr1, gr2, gr3 = grad
split = modules["split"]
split.backward([gr2, gr3])
return [gr1, gr1, split.grad]
from PuzzleLib.Modules.Split import Split
glue = Glue(fwdGlue=fwdGlue, bwdGlue=bwdGlue, modules={"split": Split(axis=1, sections=(5, 5))})
glue([data1, data2, data3])
grad1 = gpuarray.to_gpu(np.random.randn(*glue.data[0].shape).astype(np.float32))
grad2 = gpuarray.to_gpu(np.random.randn(*glue.data[1].shape).astype(np.float32))
grad3 = gpuarray.to_gpu(np.random.randn(*glue.data[2].shape).astype(np.float32))
glue.backward([grad1, grad2, grad3])
if __name__ == "__main__":
unittest()
|
py | 1a52a186f7af0813b85e88ff617ab8ee47bbb217 | import datetime as dt
from typing import Union
import numpy as np
import pandas as pd
# type aliases
Array = Union[pd.DataFrame, pd.Series, np.ndarray]
Ephemeris = Union[pd.DataFrame, "LHorizon"]
Timelike = Union[str, dt.datetime, float] |
py | 1a52a2c398b456bdf302bdb437286f591dabba31 | import os
import re
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
RECEIVED_FILE_CHAR_LIMIT = 50 * 1000
# The limit in number of characters of files to accept
def new_file(basename, content):
"""
The method creates a new File object or derived object from File based on the basename file extension. This should
be used to create Files instead of using the File object constructors. This also ensures that the file is not longer
than the file char limit.
:param basename:
The file name
:param content:
The file content
:return:
Returns a File or derived File object
"""
if len(content) > RECEIVED_FILE_CHAR_LIMIT:
raise Exception(f"File {basename} exceeds size limits")
fn, ext = os.path.splitext(basename)
if ext == ".adb" or ext == ".ads":
return AdaFile(basename, content)
elif ext == ".c" or ext == ".h":
return CFile(basename, content)
elif ext == ".cpp" or ext == ".hh":
return CPPFile(basename, content)
elif ext == ".gpr":
return ProjectFile(basename, content)
else:
return File(basename, content)
def find_mains(filelist):
"""
This checks a list of files to find files that can be considered mains. For Ada files, the criteria is that the
adb file does not have a corresponding ads file. For C files, we use the CFile.is_main() method.
:param filelist:
The list of files to check for mains
:return:
The list of files that have mains
"""
mains = []
for f in filelist:
logger.debug(f"Checking {f.get_name()} for main")
if f.language() == "Ada":
filename = f.get_name()
base, ext = os.path.splitext(filename)
if ext == ".adb":
logger.debug(f"Looking for spec for {f.get_name()}")
if not next((x for x in filelist if x.get_name() == (base + ".ads")), None):
logger.debug(f"Found main in {f.get_name()}")
mains.append(filename)
else:
if f.is_main():
mains.append(f.get_name())
logger.debug(f"Found main in {f.get_name()}")
return mains
class File:
"""
This is the base File class used to represent generic Files.
Attributes
----------
basename : str
The file name
content : str
the name of the animal
Methods
-------
get_name()
Returns the name of the file
get_content()
Returns the content of the file
language()
Returns the coding language for the file is any
is_main()
Checks if the file is a main
"""
def __init__(self, basename, content):
"""
Constructor for File. THIS SHOULD NOT BE CALLED DIRECTLY!! Use new_file method instead.
:param basename:
File name
:param content:
File content
"""
self.basename = basename
self.content = content
def get_name(self):
"""
Returns the name of the file
:return:
The file name
"""
return self.basename
def get_content(self):
"""
Returns the content of the file
:return:
The file content
"""
return self.content
def language(self):
"""
Returns the language for the file
:return:
Returns the file language or None
"""
return None
def is_main(self):
"""
Returns if the file is/has a main. Valid for C or CPP only now.
:return:
Returns True if the file has/is a main
"""
return False
class AdaFile(File):
"""
Class for an Ada file. Inherits from File.
Attributes
----------
basename : str
The file name
content : str
the name of the animal
Methods
-------
get_name()
Returns the name of the file
get_content()
Returns the content of the file
language()
Returns the coding language for the file is any
is_main()
Checks if the file is a main
"""
def is_main(self):
"""
This should check if the Ada file is a main. This is unimplemented and shouldn't be used
"""
# TODO: figure out how to do this
raise NotImplementedError
def language(self):
"""
Returns "Ada"
:return:
The language string
"""
return "Ada"
class CFile(File):
"""
Class for a C file. Inherits from File.
Attributes
----------
basename : str
The file name
content : str
the name of the animal
Methods
-------
get_name()
Returns the name of the file
get_content()
Returns the content of the file
language()
Returns the coding language for the file is any
is_main()
Checks if the file is a main
"""
def is_main(self):
"""
Uses a regex to compute if the C file has the right function layout/name for a main
:return:
True if the regex matches
"""
main_re = re.compile("^(?:void|int) +main\(.*\)(?: |\n)*{", re.MULTILINE)
return main_re.findall(self.content)
def language(self):
"""
Returns "c"
:return:
The language string
"""
return "c"
class CPPFile(CFile):
"""
Class for a CPP file. Inherits from CFile.
Attributes
----------
basename : str
The file name
content : str
the name of the animal
Methods
-------
get_name()
Returns the name of the file
get_content()
Returns the content of the file
language()
Returns the coding language for the file is any
is_main()
Checks if the file is a main
"""
def language(self):
"""
Returns "c++"
:return:
The language string
"""
return "c++"
class ProjectFile(File):
"""
Class for a Project file. Inherits from File.
Attributes
----------
basename : str
The file name
content : str
the name of the animal
allowed_switches : dict
the list of allowed switch to apply to gpr packages
Methods
-------
get_name()
Returns the name of the file
get_content()
Returns the content of the file
language()
Returns the coding language for the file is any
is_main()
Checks if the file is a main
insert_language(languages)
Inserts the languages for the project into the project file
define_mains(mains)
Inserts the mains for the project into the project file
"""
allowed_switches = {
'Builder': ['-g'],
'Compiler': ['-g', '-O0', '-gnata', '-gnatwa', '-gnato', '-gnato0', '-gnato11', '-gnato23',
'-gnato21', '-gnato22']
}
def insert_languages(self, languages):
"""
Inserts languages into the correct place in the project file
:param languages:
The list of languages to add to the project
"""
lang_list = [f'"{x}"' for x in languages]
to_insert = f"for Languages use ({', '.join(lang_list)});"
self.content = self.content.replace("--LANGUAGE_PLACEHOLDER--", to_insert)
def define_mains(self, mains):
"""
Inserts the mains into the correct place in the project file
:param mains:
The list of mains to add to the project
"""
main_list = [f'"{x}"' for x in mains]
to_insert = f"for Main use ({', '.join(main_list)});"
self.content = self.content.replace("--MAIN_PLACEHOLDER--", to_insert)
def insert_switches(self, switch_list):
sw_dict = {}
regex = re.compile(r'(Builder|Compiler)\((.+)\)')
for sec in switch_list:
match = regex.search(sec)
if match:
pkg_name = match.group(1)
switches = set(match.group(2).split(','))
if pkg_name in sw_dict.keys():
sw_dict[pkg_name] = sw_dict[pkg_name] | switches
else:
sw_dict[pkg_name] = switches
for pkg, unfiltered_switches in sw_dict.items():
filtered_switches = []
for switch in unfiltered_switches:
if switch in self.allowed_switches[pkg]:
filtered_switches.append('"' + switch + '"')
else:
logger.error(f"Illegal switch requested in pkg {pkg}: {switch}")
if filtered_switches:
placeholder_str = "--" + pkg.upper() + "_SWITCHES_PLACEHOLDER--"
switches_str = ', '.join(filtered_switches)
line_str = f'for Switches ("Ada") use ({switches_str});'
logger.debug(f"Adding line {line_str} to pkg {pkg}")
self.content = self.content.replace(placeholder_str, line_str)
|
py | 1a52a3da2f353b10b690602a60718f4a53abd3a5 | """
Test basic functionality for loading sample datasets.
"""
import pandas as pd
from pygmt.datasets import (
load_fractures_compilation,
load_hotspots,
load_japan_quakes,
load_mars_shape,
load_ocean_ridge_points,
load_sample_bathymetry,
load_usgs_quakes,
)
def test_japan_quakes():
"""
Check that the dataset loads without errors.
"""
data = load_japan_quakes()
assert data.shape == (115, 7)
summary = data.describe()
assert summary.loc["min", "year"] == 1987
assert summary.loc["max", "year"] == 1988
assert summary.loc["min", "month"] == 1
assert summary.loc["max", "month"] == 12
assert summary.loc["min", "day"] == 1
assert summary.loc["max", "day"] == 31
def test_ocean_ridge_points():
"""
Check that the @ridge.txt dataset loads without errors.
"""
data = load_ocean_ridge_points()
assert data.shape == (4146, 2)
summary = data.describe()
assert summary.loc["min", "longitude"] == -179.9401
assert summary.loc["max", "longitude"] == 179.935
assert summary.loc["min", "latitude"] == -65.6182
assert summary.loc["max", "latitude"] == 86.8
def test_sample_bathymetry():
"""
Check that the @tut_ship.xyz dataset loads without errors.
"""
data = load_sample_bathymetry()
assert data.shape == (82970, 3)
summary = data.describe()
assert summary.loc["min", "longitude"] == 245.0
assert summary.loc["max", "longitude"] == 254.705
assert summary.loc["min", "latitude"] == 20.0
assert summary.loc["max", "latitude"] == 29.99131
assert summary.loc["min", "bathymetry"] == -7708.0
assert summary.loc["max", "bathymetry"] == -9.0
def test_usgs_quakes():
"""
Check that the dataset loads without errors.
"""
data = load_usgs_quakes()
assert data.shape == (1197, 22)
def test_fractures_compilation():
"""
Check that the @fractures_06.txt dataset loads without errors.
"""
data = load_fractures_compilation()
assert data.shape == (361, 2)
summary = data.describe()
assert summary.loc["min", "length"] == 98.6561
assert summary.loc["max", "length"] == 984.652
assert summary.loc["min", "azimuth"] == 0.0
assert summary.loc["max", "azimuth"] == 360.0
def test_mars_shape():
"""
Check that the @mars370d.txt dataset loads without errors.
"""
data = load_mars_shape()
assert data.shape == (370, 3)
summary = data.describe()
assert summary.loc["min", "longitude"] == 0.008
assert summary.loc["max", "longitude"] == 359.983
assert summary.loc["min", "latitude"] == -79.715
assert summary.loc["max", "latitude"] == 85.887
assert summary.loc["min", "radius(m)"] == -6930
assert summary.loc["max", "radius(m)"] == 15001
def test_hotspots():
"""
Check that the @hotspots.txt dataset loads without errors.
"""
data = load_hotspots()
assert data.shape == (55, 4)
assert data.columns.values.tolist() == [
"longitude",
"latitude",
"symbol_size",
"place_name",
]
assert isinstance(data, pd.DataFrame)
|
py | 1a52a3f86b4ecaf9a2d115c22f6e5f17149f9bf2 | import os
import pickle
import cv2
import numpy as np
from matplotlib import pyplot as plt
def load_data(filepath):
# Loading dataset
data = pickle.load(open(filepath, 'rb'))
# Plot data
for i in range(15):
plt.imshow(cv2.cvtColor(data[i+50], cv2.COLOR_BGR2RGB))
plt.show()
return
if __name__ == "__main__":
#data = pickle.load(open("data/",'rb'))
#X_test = data['X_test']
#pickle.dump(X_test[0:100], open("./ans_imgs",'wb'))
load_data("./ans_imgs")
|
py | 1a52a41bc82d0004c5d2a41fdc5f1ff118082802 | import unittest
from conans.client import tools
from conans.client.build.visual_environment import VisualStudioBuildEnvironment
from conans.test.utils.mocks import MockSettings, MockConanfile
from conans.test.utils.tools import TestClient
class VisualStudioBuildEnvironmentTest(unittest.TestCase):
def test_visual(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"compiler.runtime": "MDd"})
conanfile = MockConanfile(settings)
conanfile.deps_cpp_info.include_paths.append("/one/include/path")
conanfile.deps_cpp_info.include_paths.append("/two/include/path")
conanfile.deps_cpp_info.lib_paths.append("/one/lib/path")
conanfile.deps_cpp_info.lib_paths.append("/two/lib/path")
conanfile.deps_cpp_info.cflags.append("-mycflag")
conanfile.deps_cpp_info.cflags.append("-mycflag2")
conanfile.deps_cpp_info.cxxflags.append("-mycxxflag")
conanfile.deps_cpp_info.cxxflags.append("-mycxxflag2")
conanfile.deps_cpp_info.exelinkflags.append("-myexelinkflag")
conanfile.deps_cpp_info.sharedlinkflags.append("-mysharedlinkflag")
conanfile.deps_cpp_info.libs.extend(['gdi32', 'user32.lib'])
tool = VisualStudioBuildEnvironment(conanfile)
self.assertEqual(tool.vars_dict, {
"CL": ["-I/one/include/path", "-I/two/include/path",
'-MDd',
'-mycflag',
'-mycflag2',
'-Zi',
'-Ob0',
'-Od',
'-mycxxflag',
'-mycxxflag2'],
"LIB": ["/one/lib/path", "/two/lib/path"],
"UseEnv": "True",
"_LINK_": ['-myexelinkflag', '-mysharedlinkflag', 'gdi32.lib', 'user32.lib']
})
tool.parallel = True
self.assertEqual(tool.vars_dict, {
"CL": ["-I/one/include/path", "-I/two/include/path",
'-MDd',
'-mycflag',
'-mycflag2',
'-Zi',
'-Ob0',
'-Od',
'-mycxxflag',
'-mycxxflag2',
'/MP%s' % tools.cpu_count(output=conanfile.output)],
"LIB": ["/one/lib/path", "/two/lib/path"],
"UseEnv": "True",
"_LINK_": ['-myexelinkflag', '-mysharedlinkflag', 'gdi32.lib', 'user32.lib']
})
tool.parallel = False
# Now alter the paths before the vars_dict call
tool.include_paths.append("/three/include/path")
tool.lib_paths.append("/three/lib/path")
self.assertEqual(tool.vars_dict, {
"CL": ["-I/one/include/path",
"-I/two/include/path",
"-I/three/include/path",
'-MDd',
'-mycflag',
'-mycflag2',
'-Zi',
'-Ob0',
'-Od',
'-mycxxflag',
'-mycxxflag2'],
"LIB": ["/one/lib/path", "/two/lib/path", "/three/lib/path"],
"UseEnv": "True",
"_LINK_": ['-myexelinkflag', '-mysharedlinkflag', 'gdi32.lib', 'user32.lib']
})
# Now try appending to environment
with tools.environment_append({"CL": "-I/four/include/path -I/five/include/path",
"LIB": "/four/lib/path;/five/lib/path"}):
self.assertEqual(tool.vars_dict, {
"CL": ["-I/one/include/path", "-I/two/include/path",
"-I/three/include/path",
'-MDd',
'-mycflag',
'-mycflag2',
'-Zi',
'-Ob0',
'-Od',
'-mycxxflag',
'-mycxxflag2',
"-I/four/include/path -I/five/include/path"],
"LIB": ["/one/lib/path", "/two/lib/path", "/three/lib/path",
"/four/lib/path;/five/lib/path"],
"UseEnv": "True",
"_LINK_": ['-myexelinkflag', '-mysharedlinkflag', 'gdi32.lib', 'user32.lib']
})
self.assertEqual(tool.vars, {
"CL": '-I"/one/include/path" -I"/two/include/path" -I"/three/include/path" -MDd '
'-mycflag -mycflag2 -Zi -Ob0 -Od '
'-mycxxflag -mycxxflag2 '
'-I/four/include/path -I/five/include/path',
"LIB": "/one/lib/path;/two/lib/path;/three/lib/path;/four/lib/path;/five/lib/path",
"UseEnv": "True",
"_LINK_": "-myexelinkflag -mysharedlinkflag gdi32.lib user32.lib"
})
def test_build_type_toolset(self):
profile = """
[settings]
os=Windows
compiler=Visual Studio
compiler.version=15
build_type=Release
"""
profile_toolset = """
[settings]
os=Windows
compiler=Visual Studio
compiler.version=15
compiler.toolset=v141
build_type=Release
"""
profile_toolset_clang = """
[settings]
os=Windows
compiler=Visual Studio
compiler.version=15
build_type=Release
compiler.toolset=v141_clang_c2
"""
conanfile = """
from conans import ConanFile, VisualStudioBuildEnvironment
class TestConan(ConanFile):
name = "testlib"
version = "1.0"
settings = "compiler", "build_type", "os"
def build(self):
env_build = VisualStudioBuildEnvironment(self)
self.output.info(env_build.flags)
"""
client = TestClient()
client.save({"profile": profile,
"profile_toolset": profile_toolset,
"profile_toolset_clang": profile_toolset_clang,
"conanfile.py": conanfile})
result = {"Debug": "['-Zi', '-Ob0', '-Od']",
"Release": "['-DNDEBUG', '-O2', '-Ob2']",
"RelWithDebInfo": "['-DNDEBUG', '-Zi', '-O2', '-Ob1']",
"MinSizeRel": "['-DNDEBUG', '-O1', '-Ob1']"}
result_toolset_clang = {"Debug": "['-gline-tables-only', '-fno-inline', '-O0']",
"Release": "['-DNDEBUG', '-O2']",
"RelWithDebInfo": "['-DNDEBUG', '-gline-tables-only', '-O2', '-fno-inline']",
"MinSizeRel": "['-DNDEBUG']"}
for build_type in ["Debug", "Release", "RelWithDebInfo", "MinSizeRel"]:
client.run("create . danimtb/testing -pr=profile -s build_type=%s" % build_type)
self.assertIn(result[build_type], client.out)
client.run("create . danimtb/testing -pr=profile_toolset -s build_type=%s" % build_type)
self.assertIn(result[build_type], client.out)
client.run("create . danimtb/testing -pr=profile_toolset_clang -s build_type=%s" %
build_type)
self.assertIn(result_toolset_clang[build_type], client.out)
|
py | 1a52a590128de906b425d052d12078c3b01e6b4d | import os
import getpass
import pyblish.api
from openpype.lib import get_openpype_username
class CollectCurrentUserPype(pyblish.api.ContextPlugin):
"""Inject the currently logged on user into the Context"""
# Order must be after default pyblish-base CollectCurrentUser
order = pyblish.api.CollectorOrder + 0.001
label = "Collect Pype User"
def process(self, context):
user = get_openpype_username()
context.data["user"] = user
self.log.debug("Collected user \"{}\"".format(user))
|
py | 1a52a6312837b54073ab96edad8ff61ff9b0d045 | import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton
from PyQt5.QtGui import QIcon
class MyWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setGeometry(100, 200, 300, 400)
self.setWindowTitle("PyQt")
self.setWindowIcon(QIcon("icon/graph.png"))
btn = QPushButton("버튼1", self)
btn.move(10, 10)
btn2 = QPushButton("버튼2", self)
btn2.move(10, 40)
app = QApplication(sys.argv)
window = MyWindow()
window.show()
app.exec_()
|
py | 1a52a641c5a9ba8cd45bae7c2707977e8a6ff41f | from unittest import TestCase, main
from expects import expect, equal
from twin_sister.injection.dependency_context import DependencyContext
class TestSetEnv(TestCase):
def test_can_set_arbitrary_var(self):
key = 'some_key'
value = 'some_value'
context = DependencyContext(supply_env=True)
context.set_env(**{key: value})
expect(context.os.environ[key]).to(equal(value))
def test_can_set_multiple_vars(self):
k1 = 'doe'
v1 = 'a deer, a female deer'
k2 = 'ray'
v2 = 'a drop of golden sun'
context = DependencyContext(supply_env=True)
context.set_env(**{k1: v1, k2: v2})
expect(context.os.environ[k1]).to(equal(v1))
expect(context.os.environ[k2]).to(equal(v2))
def test_can_replace_existing_var(self):
key = 'quokka'
old = 'old value'
new = 'new value'
context = DependencyContext(supply_env=True)
context.set_env(**{key: old})
context.set_env(**{key: new})
expect(context.os.environ[key]).to(equal(new))
def test_does_not_affect_unspecified_var(self):
existing_key = 'dog_milk'
existing_value = 'Lasts longer than any other milk'
context = DependencyContext(supply_env=True)
context.os.environ[existing_key] = existing_value
context.set_env(goat_milk='and little lambs eat ivy')
expect(context.os.environ[existing_key]).to(
equal(existing_value))
def test_converts_number_to_string(self):
context = DependencyContext(supply_env=True)
context.set_env(n=13)
expect(context.os.environ['n']).to(equal(str(13)))
def test_converts_bool_to_string(self):
context = DependencyContext(supply_env=True)
context.set_env(n=False)
expect(context.os.environ['n']).to(equal(str(False)))
def test_converts_arbitrary_rubbish_to_string(self):
context = DependencyContext(supply_env=True)
rubbish = {'menu': ['spam', 'eggs', 'sausage', 'biggles']}
context.set_env(rubbish=rubbish)
expect(context.os.environ['rubbish']).to(
equal(str(rubbish)))
def test_complains_if_env_not_supplied(self):
context = DependencyContext()
try:
context.set_env(things='0')
assert False, 'No exception was raised'
except RuntimeError:
pass
if '__main__' == __name__:
main()
|
py | 1a52a673b9c0c06b75c4601c7d598cd6b09c3db2 | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:6647")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:6647")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
py | 1a52a9ab9402369bf73f0e266c99df1895da8a08 | from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.dispatch import receiver
from .models import Profile
#We want a user profile to be created for each new user rather than going through the admin
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_profile(sender, instance, **kwargs):
instance.profile.save()
#saves the new user profile |
py | 1a52a9ad7a9a9af0f1b058b82635b6e876d02e62 | import os
from libcity.config import ConfigParser
from libcity.data import get_dataset
from libcity.utils import get_executor, get_model
task = 'traj_loc_pred'
model = 'DeepMove'
dataset = 'foursquare_tky'
other_args = {
'batch_size': 1
}
config = ConfigParser(task, model, dataset, config_file=None, other_args=other_args)
dataset = get_dataset(config)
train_data, valid_data, test_data = dataset.get_data()
|
py | 1a52ab32ec289106e07399ac9168aabff4f96134 | import os
import sys
from config import cfg
import argparse
import torch
from torch.backends import cudnn
import torchvision.transforms as T
from PIL import Image
sys.path.append('.')
from utils.logger import setup_logger
from model import make_model
import numpy as np
import cv2
from utils.metrics import cosine_similarity
def visualizer(test_img, camid, top_k = 10, img_size=[128,128]):
figure = np.asarray(query_img.resize((img_size[1],img_size[0])))
for k in range(top_k):
name = str(indices[0][k]).zfill(6)
img = np.asarray(Image.open(img_path[indices[0][k]]).resize((img_size[1],img_size[0])))
figure = np.hstack((figure, img))
title=name
figure = cv2.cvtColor(figure,cv2.COLOR_BGR2RGB)
if not os.path.exists(cfg.OUTPUT_DIR+ "/results/"):
print('create a new folder named results in {}'.format(cfg.OUTPUT_DIR))
os.makedirs(cfg.OUTPUT_DIR+ "/results")
cv2.imwrite(cfg.OUTPUT_DIR+ "/results/{}-cam{}.png".format(test_img,camid),figure)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="ReID Baseline Training")
parser.add_argument(
"--config_file", default="./configs/Market1501.yaml", help="path to config file", type=str
)
args = parser.parse_args()
if args.config_file != "":
cfg.merge_from_file(args.config_file)
cfg.freeze()
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
cudnn.benchmark = True
model = make_model(cfg, 255)
model.load_param(cfg.TEST.TEST_WEIGHT)
device = 'cuda'
model = model.to(device)
transform = T.Compose([
T.Resize(cfg.DATA.INPUT_SIZE),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
logger = setup_logger('{}.test'.format(cfg.PROJECT_NAME), cfg.OUTPUT_DIR, if_train=False)
model.eval()
for test_img in os.listdir(cfg.TEST.QUERY_DIR):
logger.info('Finding ID {} ...'.format(test_img))
gallery_feats = torch.load(cfg.OUTPUT_DIR + '/gfeats.pth')
img_path = np.load(cfg.OUTPUT_DIR +'/imgpath.npy')
print(gallery_feats.shape, len(img_path))
query_img = Image.open(cfg.TEST.QUERY_DIR + test_img)
input = torch.unsqueeze(transform(query_img), 0)
input = input.to(device)
with torch.no_grad():
query_feat = model(input)
dist_mat = cosine_similarity(query_feat, gallery_feats)
indices = np.argsort(dist_mat, axis=1)
visualizer(test_img, camid='mixed', top_k=10, img_size=cfg.DATA.INPUT_SIZE) |
py | 1a52abb692530859b59615f3243acb9f8012c9e7 | from django import template
register = template.Library()
@register.filter
def addclass(field, css):
return field.as_widget(attrs={'class': css})
@register.filter
def addattrs(field, attrs):
attr1, attr2 = attrs.split(',')
return field.as_widget(attrs={'class': attr1,
'placeholder': attr2})
|
py | 1a52abf236b502cc72658d978e1cd51495266065 | #!/usr/bin/env python
import mmap
import posix_ipc
import rc_pkt
# see rc_remap_sample.py for usage
# must match rc_ipc.c
shm_name = "/rc_shm"
sem_name = "/rc_sem"
# must match sizeof(struct rc_pkt)
shm_size = rc_pkt.LENGTH
shm = None
sem = None
shm_file = None
def attach():
global shm
global sem
global shm_file
# create/attach shared memory
try:
shm = posix_ipc.SharedMemory(shm_name, posix_ipc.O_CREAT, size=shm_size)
except:
print "rc_shm.attach: ERROR creating shared memory", shm_name
return False
shm_file = mmap.mmap(shm.fd, shm.size)
shm.close_fd()
# create/attach semaphore
try:
sem = posix_ipc.Semaphore(sem_name, posix_ipc.O_CREAT)
except:
print "rc_shm.attach: ERROR creating semaphore", sem_name
return False
return True
# pkt is a tuple (timestamp, sequence, channels[])
def put(pkt):
if shm_file is None or sem is None:
#print "rc_shm.put: must attach first"
return False
# convert from tuple to string
p = rc_pkt.pack(pkt)
if p is None:
return False
# write to shared memory
sem.acquire()
shm_file.seek(0)
shm_file.write(p)
sem.release()
return True
# return pkt or None
# pkt is returned as a tuple (timestamp, sequence, channels[])
def get():
if shm_file is None or sem is None:
#print "rc_shm.get: must attach first"
return False
sem.acquire()
shm_file.seek(0)
s = shm_file.read(shm_size)
sem.release()
return rc_pkt.unpack(s)
def detach():
global shm
global sem
global shm_file
if shm_file:
shm_file.close()
shm_file = None
shm = None
sem = None
|
py | 1a52ac58316e08164118fc06fcd5abb299442e37 | """Create LM input function for TPUEstimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow as tf
# pylint: disable=g-import-not-at-top
try:
from google3.experimental.users.zihangd.pretrain.data_utils import type_cast
from google3.experimental.users.zihangd.pretrain.data_utils import sparse_to_dense
except ImportError as e:
from data_utils import type_cast
from data_utils import sparse_to_dense
# pylint: enable=g-import-not-at-top
FLAGS = flags.FLAGS
def lm_process(dataset, seq_len, use_bfloat16):
"""Turn a dataset of doc tfrecords into a dataset of chunked seqeuences."""
# Flatten the original dataset into a continuous stream and then chunk the
# continuous stream into segments of fixed length `seq_len`
dataset = dataset.unbatch().repeat()
# Each window has one more element so that we can split inputs & target.
# Meanwhile, we only shift `seq_len` positions.
# Example:
# tf.data.Dataset.range(7).window(size=3, shift=2) produces
# { {0, 1, 2}, {2, 3, 4}, {4, 5, 6}}
window_size = seq_len + 1
dataset = dataset.window(size=window_size, shift=seq_len)
def window_to_tensor(example):
"""Converts a dataset of (nested) windows to one of (nested) tensors."""
new_example = {}
for k, v in example.items():
# Here, v is a "window", i.e. a finite sized dataset, that contains
# "window_size" tensors of shape [] tensors.
# Hence, `v.batch(window_size)` returns a new dataset `u` that contains
# "one single" tensor of shape [window_size].
# Then, `get_single_elment` simply gets "the single tensor" out from `u`
u = v.batch(window_size)
element = tf.data.experimental.get_single_element(u)
new_example[k] = element
return new_example
dataset = dataset.map(window_to_tensor)
def split_inp_and_tgt(example):
"""Split inputs and target from the windowed seq and set shape & type."""
inputs = example.pop("inputs")
for k in example.keys():
example[k] = example[k][:seq_len]
example["inputs"] = inputs[:seq_len]
example["target"] = inputs[1:seq_len+1]
for k in example.keys():
example[k].set_shape((seq_len))
# type cast for example
type_cast(example, use_bfloat16)
return example
dataset = dataset.map(split_inp_and_tgt)
return dataset
def get_record_parser(offline_pos):
"""Config tfrecord parser."""
def parser(record):
"""function used to parse tfrecord."""
record_spec = {
"inputs": tf.VarLenFeature(tf.int64),
"type_id": tf.FixedLenFeature([1], tf.int64),
}
if offline_pos:
record_spec["pos_seq"] = tf.VarLenFeature(tf.int64)
# retrieve serialized example
example = tf.parse_single_example(
serialized=record,
features=record_spec)
inputs = example["inputs"]
inp_len = tf.shape(inputs)[0]
# expand type id to full length
example["type_id"] = tf.broadcast_to(example["type_id"], [inp_len])
if not offline_pos:
# generate position sequence online
example["pos_seq"] = tf.range(inp_len)
# convert all sparse example to dense
example = sparse_to_dense(example)
return example
return parser
def parse_record(dataset,
parser,
is_training,
num_threads=64,
file_shuffle_size=None,
record_shuffle_size=None):
"""Parse tfrecords in a dataset."""
if is_training:
# file-level shuffle
if file_shuffle_size and file_shuffle_size > 1:
tf.logging.info("File level shuffle with size %d", file_shuffle_size)
dataset = dataset.shuffle(file_shuffle_size)
# `cycle_length` is the number of parallel files that get read.
cycle_length = min(8, file_shuffle_size)
tf.logging.info("Interleave %d files", cycle_length)
# `sloppy` mode means that the interleaving is not exact. This adds
# even more randomness to the training pipeline.
dataset = dataset.apply(
tf.contrib.data.parallel_interleave(
tf.data.TFRecordDataset,
sloppy=True,
cycle_length=cycle_length))
if record_shuffle_size and record_shuffle_size > 1:
tf.logging.info("Record level shuffle with size %d",
record_shuffle_size)
dataset = dataset.shuffle(buffer_size=record_shuffle_size)
dataset = dataset.map(parser, num_parallel_calls=num_threads)
dataset = dataset.cache().repeat()
else:
dataset = tf.data.TFRecordDataset(dataset)
dataset = dataset.map(parser)
return dataset
def sent_lm_dataset(params,
file_names,
num_hosts,
num_core_per_host,
seq_len,
is_training,
use_bfloat16=False,
num_threads=64,
record_shuffle_size=4096,
sequence_shuffle_size=2048):
"""Get sentence level LM dataset."""
bsz_per_core = params["batch_size"]
if num_hosts > 1:
host_id = params["context"].current_host
else:
host_id = 0
##### Split input files across hosts
if len(file_names) >= num_hosts:
file_paths = file_names[host_id::num_hosts]
else:
file_paths = file_names
tf.logging.info("Host %d handles %d files:", host_id, len(file_paths))
##### Parse records
dataset = tf.data.Dataset.from_tensor_slices(file_paths)
dataset = parse_record(dataset=dataset,
parser=get_record_parser(offline_pos=False),
is_training=is_training,
num_threads=num_threads,
file_shuffle_size=len(file_paths),
record_shuffle_size=record_shuffle_size)
# process dataset
dataset = lm_process(dataset, seq_len, use_bfloat16)
# Sequence level shuffle
if is_training and sequence_shuffle_size:
tf.logging.info("Seqeunce level shuffle with size %d",
sequence_shuffle_size)
dataset = dataset.shuffle(buffer_size=sequence_shuffle_size)
# batching
dataset = dataset.batch(bsz_per_core, drop_remainder=True)
# Prefetch
dataset = dataset.prefetch(num_core_per_host)
return dataset
def semidoc_lm_dataset(params,
file_names,
num_hosts,
num_core_per_host,
seq_len,
is_training,
use_bfloat16=False,
num_threads=64,
record_shuffle_size=256,
sequence_shuffle_size=2048):
# pylint: disable=g-doc-args
"""Get semi-doc level LM dataset.
Notes:
- Each sequence comes from the same document (except for boundary cases).
This is different from the standard sent-level LM dataset.
- No consecutivity is ensured across batches, which is different from the
standard doc-level LM dataset.
- Effectively, semi-doc dataset maintains short range (seq_len) dependency,
which is more random than doc-level and less random than sent-level.
Returns:
a tf.data.Dataset
"""
# pylint: enable=g-doc-args
bsz_per_core = params["batch_size"]
if num_hosts > 1:
host_id = params["context"].current_host
else:
host_id = 0
##### Split input files across hosts
if len(file_names) >= num_hosts:
file_paths = file_names[host_id::num_hosts]
else:
file_paths = file_names
tf.logging.info("Host %d handles %d files:", host_id, len(file_paths))
##### Parse records
dataset = tf.data.Dataset.from_tensor_slices(file_paths)
dataset = parse_record(dataset=dataset,
parser=get_record_parser(offline_pos=True),
is_training=is_training,
num_threads=num_threads,
file_shuffle_size=len(file_paths),
record_shuffle_size=record_shuffle_size)
# process dataset
dataset = lm_process(dataset, seq_len, use_bfloat16)
# Sequence level shuffle
if is_training and sequence_shuffle_size:
tf.logging.info("Seqeunce level shuffle with size %d",
sequence_shuffle_size)
dataset = dataset.shuffle(buffer_size=sequence_shuffle_size)
# batching
dataset = dataset.batch(bsz_per_core, drop_remainder=True)
# Prefetch
dataset = dataset.prefetch(num_core_per_host)
return dataset
def doc_lm_dataset(params,
file_names,
num_hosts,
num_core_per_host,
seq_len,
is_training,
use_bfloat16=False,
num_threads=64,
record_shuffle_size=256):
"""Get document level LM dataset."""
bsz_per_core = params["batch_size"]
if num_hosts > 1:
host_id = params["context"].current_host
else:
host_id = 0
##### Split input files across hosts
if len(file_names) >= num_hosts:
file_paths = file_names[host_id::num_hosts]
else:
file_paths = file_names
tf.logging.info("Host %d handles %d files:", host_id, len(file_paths))
##### Create dataset from file_paths
dataset = tf.data.Dataset.from_tensor_slices(file_paths)
if len(file_paths) // bsz_per_core >= 2:
##### Enough input files, so do file-level sharding shard
tf.logging.info("Shard first")
# Split the dataset into `bsz_per_core` disjoint shards
shards = [dataset.shard(bsz_per_core, i) for i in range(bsz_per_core)]
# Parse records
file_shuffle_size = (len(file_paths) + bsz_per_core - 1) // bsz_per_core
parse_shard = functools.partial(
parse_record,
parser=get_record_parser(offline_pos=True),
is_training=is_training,
num_threads=num_threads,
file_shuffle_size=file_shuffle_size,
record_shuffle_size=record_shuffle_size)
shards = [parse_shard(dataset=shard) for shard in shards]
else:
##### Not enough input files, so do record-level sharding
tf.logging.info("Parse first")
# Parse records
dataset = parse_record(dataset,
parser=get_record_parser(offline_pos=True),
is_training=is_training,
num_threads=num_threads,
file_shuffle_size=len(file_names),
record_shuffle_size=record_shuffle_size)
# Split the dataset into `bsz_per_core` disjoint shards
shards = [dataset.shard(bsz_per_core, i) for i in range(bsz_per_core)]
# process each shard
process_shard = functools.partial(
lm_process, seq_len=seq_len, use_bfloat16=use_bfloat16)
shards = [process_shard(dataset=shard) for shard in shards]
# merge shards into a single batched dataset
def batch_zipped_dataset(*features):
"""Stack a list of homogeneous inputs from a zipped dataset into one."""
new_feature = {}
for key in features[0].keys():
tensor_list = [f[key] for f in features]
new_feature[key] = tf.stack(tensor_list, axis=0) # [sum bsz, length]
return new_feature
dataset = tf.data.Dataset.zip(tuple(shards))
dataset = dataset.map(batch_zipped_dataset)
# Prefetch
dataset = dataset.prefetch(num_core_per_host)
return dataset
def get_input_fn(
doc_dir,
semi_dir,
sent_dir,
split,
uncased,
seq_len,
bsz_per_host,
num_hosts=1,
num_core_per_host=1,
use_bfloat16=False,
**kwargs):
"""Create Estimator input function."""
def dir_to_paths(data_dir, data_type):
"""Get data file paths in the given dir."""
file_paths = []
if data_dir:
tf.logging.info("=" * 120)
case_str = "uncased." if uncased else ""
glob_base = "data.{}.{}.{}tfrecord*".format(split, data_type, case_str)
for idx, dir_path in enumerate(data_dir.split(",")):
glob = os.path.join(dir_path, glob_base)
cur_file_paths = sorted(tf.io.gfile.glob(glob))
file_paths += cur_file_paths
tf.logging.info("[%d] Data glob: %s", idx, glob)
tf.logging.info("[%d] Num of file path: %d", idx, len(cur_file_paths))
tf.logging.info("[%s] Total number of file path: %d", data_type,
len(file_paths))
return file_paths
doc_files = dir_to_paths(doc_dir, "doc")
semi_files = dir_to_paths(semi_dir, "doc")
sent_files = dir_to_paths(sent_dir, "sent")
file_list = [doc_files, semi_files, sent_files]
func_list = [doc_lm_dataset, semidoc_lm_dataset, sent_lm_dataset]
def input_fn(params):
"""Construct input function for TPUEstimator."""
assert params["batch_size"] * num_core_per_host == bsz_per_host
datasets = []
for files, func in zip(file_list, func_list):
if files:
cur_dataset = func(
params=params,
num_hosts=num_hosts,
num_core_per_host=num_core_per_host,
is_training=split == "train",
file_names=files,
seq_len=seq_len,
use_bfloat16=use_bfloat16,
**kwargs)
datasets.append(cur_dataset)
if len(datasets) > 1:
dataset = tf.data.experimental.sample_from_datasets(datasets)
elif len(datasets) == 1:
dataset = datasets[0]
return dataset
return input_fn
|
py | 1a52ad772e16f4b3e5ac009c460742ca1c82be76 | # -*- coding: utf-8 -*-
"""Identity Services Engine updateNetworkAccessAuthorizationRuleById data model.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from ciscoisesdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorFd9E7E03A6056D1B6E9705E3096D946(object):
"""updateNetworkAccessAuthorizationRuleById request schema
definition."""
def __init__(self):
super(JSONSchemaValidatorFd9E7E03A6056D1B6E9705E3096D946, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"response": {
"properties": {
"link": {
"properties": {
"href": {
"type": "string"
},
"rel": {
"enum": [
"next",
"previous",
"self",
"status"
],
"type": "string"
},
"type": {
"type": "string"
}
},
"required": [
"href"
],
"type": "object"
},
"profile": {
"items": {
"type": "string"
},
"type": "array"
},
"rule": {
"properties": {
"condition": {
"properties": {
"attributeId": {
"type": "string"
},
"attributeName": {
"type": "string"
},
"attributeValue": {
"type": "string"
},
"children": {
"items": {
"properties": {
"conditionType": {
"enum": [
"ConditionReference",
"ConditionAttributes",
"LibraryConditionAttributes",
"ConditionAndBlock",
"LibraryConditionAndBlock",
"ConditionOrBlock",
"LibraryConditionOrBlock",
"TimeAndDateCondition"
],
"type": "string"
},
"isNegate": {
"default": false,
"type": "boolean"
},
"link": {
"properties": {
"href": {
"type": "string"
},
"rel": {
"enum": [
"next",
"previous",
"self",
"status"
],
"type": "string"
},
"type": {
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
},
"minItems": 2,
"type": "array"
},
"conditionType": {
"enum": [
"ConditionReference",
"ConditionAttributes",
"LibraryConditionAttributes",
"ConditionAndBlock",
"LibraryConditionAndBlock",
"ConditionOrBlock",
"LibraryConditionOrBlock",
"TimeAndDateCondition"
],
"type": "string"
},
"datesRange": {
"properties": {
"endDate": {
"maxLength": 10,
"minLength": 10,
"type": "string"
},
"startDate": {
"maxLength": 10,
"minLength": 10,
"type": "string"
}
},
"type": "object"
},
"datesRangeException": {
"properties": {
"endDate": {
"maxLength": 10,
"minLength": 10,
"type": "string"
},
"startDate": {
"maxLength": 10,
"minLength": 10,
"type": "string"
}
},
"type": "object"
},
"description":
{
"default": "",
"type": "string"
},
"dictionaryName": {
"type": "string"
},
"dictionaryValue": {
"type": "string"
},
"hoursRange": {
"properties": {
"endTime": {
"type": "string"
},
"startTime": {
"type": "string"
}
},
"type": "object"
},
"hoursRangeException": {
"properties": {
"endTime": {
"type": "string"
},
"startTime": {
"type": "string"
}
},
"type": "object"
},
"id": {
"type": "string"
},
"isNegate": {
"default": false,
"type": "boolean"
},
"link": {
"properties": {
"href": {
"type": "string"
},
"rel": {
"enum": [
"next",
"previous",
"self",
"status"
],
"type": "string"
},
"type": {
"type": "string"
}
},
"type": "object"
},
"name": {
"type": "string"
},
"operator": {
"enum": [
"equals",
"notEquals",
"contains",
"notContains",
"matches",
"in",
"notIn",
"startsWith",
"notStartsWith",
"endsWith",
"notEndsWith",
"greaterThan",
"lessThan",
"greaterOrEquals",
"lessOrEquals",
"ipGreaterThan",
"ipLessThan",
"ipEquals",
"ipNotEquals"
],
"type": "string"
},
"weekDays": {
"items": {
"enum": [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"
],
"type": "string"
},
"minItems": 1,
"type": "array"
},
"weekDaysException": {
"items": {
"enum": [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"
],
"type": "string"
},
"type": "array"
}
},
"type": "object"
},
"default": {
"default": false,
"type": "boolean"
},
"hitCounts": {
"type": "integer"
},
"id": {
"type": "string"
},
"name": {
"type": "string"
},
"rank": {
"type": "integer"
},
"state": {
"default": "enabled",
"enum": [
"enabled",
"disabled",
"monitor"
],
"type": "string"
}
},
"required": [
"name"
],
"type": "object"
},
"securityGroup": {
"type": "string"
}
},
"required": [
"rule"
],
"type": "object"
},
"version": {
"type": "string"
}
},
"required": [
"response",
"version"
],
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
|
py | 1a52ae17b187c78ad6d0dafc30179a2a3a14764c | """
File: exercise_6.6.py
Author: William Gatharia
This code demonstrates reducing data using lambda functions. The result is a single output.
Note: reduce runs from left to right
"""
#import reduce
from functools import reduce
#source data
source_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
print('----------------------Source data ----------------------------')
print(source_data)
print('----------------------Reduced data using addition ----------------------------')
print(reduce(lambda x, y: x + y, source_data))
print('----------------------Reduced data using multiplication ----------------------------')
print(reduce(lambda x, y: x * y, source_data)) |
py | 1a52aec1f848ecbb986a3dcc1c5886f2d23d09ae | import discord
import os
import datetime
from keep_alive import keep_alive
from discord.ext import commands
my_secret = os.environ['Token']
# client = discord.Client()
# test
bot = commands.Bot(command_prefix='!')
@bot.command(pass_context=True,
help="Update the role of user if you have Admin role eg: !updaterole 'xyz#0000' 'Admin'.",
brief="-Update the role of user."
)
@commands.has_role("Admin")
async def updaterole(ctx, user: discord.Member, role,help="This is role"):
member = user
print(member)
var = discord.utils.get(ctx.guild.roles, name = role)
print(var)
await member.add_roles(var)
await ctx.send(f'Role `{member}` has been Asigned with role {role}')
@bot.command(aliases=['make_role'], help="-Update the role of user if you have Admin role eg: !make_role 'XYZ' ",
brief="-Update the role of user."
)
@commands.has_permissions(manage_roles=True) # Check if the user executing the command can manage roles
async def create_role(ctx, name):
guild = ctx.guild
await guild.create_role(name=name)
await ctx.send(f'Role `{name}` has been created')
@bot.command(name="Poro",help="-Type Poro with prefix of '!' to comunicate with me . ")
async def x(ctx):
emoji = '\N{THUMBS UP SIGN}'
# member = ctx.author
await ctx.send(f"Hello {ctx.author} am Poro and i like you {emoji}. :")
# guild = ctx.guild
# await guild.create_role(name="role name")
async def on_ready():
print(f"{bot.user.name} has connected With you !")
@bot.command(name="create_channel",help="-to create channel eg:!create_channel 'XYZ'")
@commands.has_role("Admin")
async def create_Channel(xx,channel_name):
guild=xx.guild
existing_channel=discord.utils.get(guild.channels, name=channel_name)
if not existing_channel:
print(f"created new channel:{channel_name}")
await guild.create_text_channel(channel_name)
await xx.send(f"i have create channel with name {channel_name} channel created by {xx.author} on {datetime.datetime.now()}")
# @client.event
# async def on_ready():
# print(f'{client.user.name} has connected to Discord!')
# @client.event
# async def on_member_join(member):
# await member.create_dm()
# await member.dm_channel.send(
# f'Hi {member.name}, welcome to my Discord server!'
# )
# # client.run(my_secret)
# @client.event
# async def on_message(message):
# if message.author== client.user:
# return
# if message.content.startswith("$hello"):
# await message.channel.send(f"Hello! {message.author}")
keep_alive()
bot.run(my_secret) |
py | 1a52aedc94214568a8c239b2aca80cb8c7428402 | # Copyright 2014 Rackspace Australia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import testtools
import zuul.connection.gerrit
import zuul.connection.smtp
import zuul.reporter
import zuul.reporter.gerrit
import zuul.reporter.smtp
class TestSMTPReporter(testtools.TestCase):
log = logging.getLogger("zuul.test_reporter")
def setUp(self):
super(TestSMTPReporter, self).setUp()
def test_reporter_abc(self):
# We only need to instantiate a class for this
reporter = zuul.reporter.smtp.SMTPReporter({}) # noqa
def test_reporter_name(self):
self.assertEqual('smtp', zuul.reporter.smtp.SMTPReporter.name)
def test_repr(self):
smtp = zuul.connection.smtp.SMTPConnection('smtp.example.org', {})
self.assertEqual(
'<SMTPReporter connection: smtp://smtp.example.org>',
repr(zuul.reporter.smtp.SMTPReporter(connection=smtp)))
class TestGerritReporter(testtools.TestCase):
log = logging.getLogger("zuul.test_reporter")
def setUp(self):
super(TestGerritReporter, self).setUp()
def test_reporter_abc(self):
# We only need to instantiate a class for this
reporter = zuul.reporter.gerrit.GerritReporter(None) # noqa
def test_reporter_name(self):
self.assertEqual('gerrit', zuul.reporter.gerrit.GerritReporter.name)
def test_repr(self):
gerrit = zuul.connection.gerrit.GerritConnection(
'review.example.org',
{'server': 'review.example.org', 'user': 'zuul'})
self.assertEqual(
'<GerritReporter connection: gerrit://review.example.org>',
repr(zuul.reporter.gerrit.GerritReporter(connection=gerrit)))
|
py | 1a52af231cc5f9e5f78a4a40bd56231e225b70e8 | # Copyright 2018, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
from google.cloud.pubsub_v1.subscriber._protocol import heartbeater
from google.cloud.pubsub_v1.subscriber._protocol import streaming_pull_manager
import mock
import pytest
def test_heartbeat_inactive_manager_active_rpc(caplog):
caplog.set_level(logging.DEBUG)
manager = mock.create_autospec(
streaming_pull_manager.StreamingPullManager, instance=True
)
manager.is_active = False
manager.heartbeat.return_value = True # because of active rpc
heartbeater_ = heartbeater.Heartbeater(manager)
make_sleep_mark_event_as_done(heartbeater_)
heartbeater_.heartbeat()
assert "Sent heartbeat" in caplog.text
assert "exiting" in caplog.text
def test_heartbeat_inactive_manager_inactive_rpc(caplog):
caplog.set_level(logging.DEBUG)
manager = mock.create_autospec(
streaming_pull_manager.StreamingPullManager, instance=True
)
manager.is_active = False
manager.heartbeat.return_value = False # because of inactive rpc
heartbeater_ = heartbeater.Heartbeater(manager)
make_sleep_mark_event_as_done(heartbeater_)
heartbeater_.heartbeat()
assert "Sent heartbeat" not in caplog.text
assert "exiting" in caplog.text
def test_heartbeat_stopped(caplog):
caplog.set_level(logging.DEBUG)
manager = mock.create_autospec(
streaming_pull_manager.StreamingPullManager, instance=True
)
heartbeater_ = heartbeater.Heartbeater(manager)
heartbeater_.stop()
heartbeater_.heartbeat()
assert "Sent heartbeat" not in caplog.text
assert "exiting" in caplog.text
def make_sleep_mark_event_as_done(heartbeater):
# Make sleep actually trigger the done event so that heartbeat()
# exits at the end of the first run.
def trigger_done(timeout):
assert timeout
heartbeater._stop_event.set()
heartbeater._stop_event.wait = trigger_done
def test_heartbeat_once():
manager = mock.create_autospec(
streaming_pull_manager.StreamingPullManager, instance=True
)
heartbeater_ = heartbeater.Heartbeater(manager)
make_sleep_mark_event_as_done(heartbeater_)
heartbeater_.heartbeat()
manager.heartbeat.assert_called_once()
@mock.patch("threading.Thread", autospec=True)
def test_start(thread):
manager = mock.create_autospec(
streaming_pull_manager.StreamingPullManager, instance=True
)
heartbeater_ = heartbeater.Heartbeater(manager)
heartbeater_.start()
thread.assert_called_once_with(
name=heartbeater._HEARTBEAT_WORKER_NAME, target=heartbeater_.heartbeat
)
thread.return_value.start.assert_called_once()
assert heartbeater_._thread is not None
@mock.patch("threading.Thread", autospec=True)
def test_start_already_started(thread):
manager = mock.create_autospec(
streaming_pull_manager.StreamingPullManager, instance=True
)
heartbeater_ = heartbeater.Heartbeater(manager)
heartbeater_._thread = mock.sentinel.thread
with pytest.raises(ValueError):
heartbeater_.start()
thread.assert_not_called()
def test_stop():
manager = mock.create_autospec(
streaming_pull_manager.StreamingPullManager, instance=True
)
heartbeater_ = heartbeater.Heartbeater(manager)
thread = mock.create_autospec(threading.Thread, instance=True)
heartbeater_._thread = thread
heartbeater_.stop()
assert heartbeater_._stop_event.is_set()
thread.join.assert_called_once()
assert heartbeater_._thread is None
def test_stop_no_join():
heartbeater_ = heartbeater.Heartbeater(mock.sentinel.manager)
heartbeater_.stop()
|
py | 1a52af3eaf13d854bcf5c12680aead8a0e96b6d8 | # Copyright (c) 2019 Toyota Research Institute. All rights reserved.
"""
This module provides objects related to the discovery of
new crystal structures using structural domains.
"""
import pandas as pd
import os
from datetime import datetime
from monty.serialization import dumpfn
from camd.domain import StructureDomain, heuristic_setup
from camd.agent.stability import AgentStabilityAdaBoost
from camd.agent.base import RandomAgent
from camd.experiment.base import ATFSampler
from camd.campaigns.base import Campaign
from camd import CAMD_TEST_FILES, CAMD_S3_BUCKET, __version__
from camd.utils.data import load_dataframe, s3_sync
from camd.analysis import StabilityAnalyzer
from camd.experiment.dft import OqmdDFTonMC1
from sklearn.neural_network import MLPRegressor
import pickle
class ProtoDFTCampaign(Campaign):
"""
Subclass of Campaign which implements custom methods
and factories for constructing prototype-generation
stability campaigns for materials discovery with DFT
experiments
"""
@classmethod
def from_chemsys(cls, chemsys, prefix="proto-dft-2/runs"):
"""
Class factory method for constructing campaign from
chemsys.
Args:
chemsys (str): chemical system for the campaign
prefix (str): prefix for s3
Returns:
(ProtoDFTCampaign): Standard proto-dft campaign from
the chemical system
"""
s3_prefix = "{}/{}".format(prefix, chemsys)
# Initialize s3
dumpfn({"started": datetime.now().isoformat(),
"version": __version__}, "start.json")
s3_sync(s3_bucket=CAMD_S3_BUCKET, s3_prefix=s3_prefix, sync_path='.')
# Get structure domain
element_list = chemsys.split('-')
max_coeff, charge_balanced = heuristic_setup(element_list)
domain = StructureDomain.from_bounds(
element_list, charge_balanced=charge_balanced,
n_max_atoms=20, **{'grid': range(1, max_coeff)})
candidate_data = domain.candidates()
# Dump structure/candidate data
with open('candidate_data.pickle', 'wb') as f:
pickle.dump(candidate_data, f)
s3_sync(s3_bucket=CAMD_S3_BUCKET, s3_prefix=s3_prefix, sync_path='.')
# Set up agents and loop parameters
agent = AgentStabilityAdaBoost(
model=MLPRegressor(hidden_layer_sizes=(84, 50)),
n_query=10,
hull_distance=0.2,
exploit_fraction=1.0,
uncertainty=True,
alpha=0.5,
diversify=True,
n_estimators=20
)
analyzer = StabilityAnalyzer(hull_distance=0.2)
experiment = OqmdDFTonMC1(timeout=30000)
seed_data = load_dataframe("oqmd1.2_exp_based_entries_featurized_v2")
# Construct and start loop
return cls(
candidate_data=candidate_data, agent=agent, experiment=experiment,
analyzer=analyzer, seed_data=seed_data,
heuristic_stopper=5, s3_prefix=s3_prefix
)
def autorun(self):
"""
Method for running this campaign automatically
Returns:
None
"""
n_max_iter = n_max_iter_heuristics(
len(self.candidate_data), 10)
self.auto_loop(
n_iterations=n_max_iter, monitor=True,
initialize=True, save_iterations=True
)
class CloudATFCampaign(Campaign):
"""
Simple subclass for cloud-based ATF, mostly for testing
"""
@classmethod
def from_chemsys(cls, chemsys):
"""
Args:
chemsys:
Returns:
"""
s3_prefix = "oqmd-atf/runs/{}".format(chemsys)
df = pd.read_csv(os.path.join(CAMD_TEST_FILES, 'test_df.csv'))
n_seed = 200 # Starting sample size
n_query = 10 # This many new candidates are "calculated with DFT" (i.e. requested from Oracle -- DFT)
agent = RandomAgent(n_query=n_query)
analyzer = StabilityAnalyzer(hull_distance=0.05)
experiment = ATFSampler(dataframe=df)
candidate_data = df
return cls(candidate_data, agent, experiment, analyzer,
create_seed=n_seed, s3_prefix=s3_prefix)
def autorun(self):
"""
Runs campaign with standard parameters
Returns:
None
"""
self.auto_loop(initialize=True, n_iterations=3)
return True
def n_max_iter_heuristics(n_data, n_query, low_bound=5, up_bound=20):
"""
Helper method to define maximum number of iterations for
a given campaign. This is based on the empirical evidence
in various systems >90% of stable materials are identified
when 25% of candidates are tested. We also enforce upper
and lower bounds of 20 and 5 to avoid edge cases with too
many or too few calculations to run.
Args:
n_data (int): number of data points in candidate space
n_query (int): number of queries allowed in each iteration
low_bound (int): lower bound allowed for n_max_iter
up_bound (int): upper bound allowed for n_max_ite
Returns:
maximum number of iterations as integer
"""
_target = round(n_data * 0.25/n_query)
if _target < low_bound:
return low_bound
else:
return min(_target, up_bound)
|
py | 1a52afce8b75e3a344a5d50b35fa2bc039d90b19 | # Copyright (c) 2020, 2021, Oracle and/or its affiliates.
#
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
#
import os
# version
VERSION_TAG = "8.0.28"
MIN_SUPPORTED_VERSION = "8.0.24"
MAX_SUPPORTED_VERSION = "8.0.28"
# image
IMAGE_REGISTRY = os.getenv(
"OPERATOR_TEST_REGISTRY", default=None)
IMAGE_REPOSITORY = os.getenv(
"OPERATOR_TEST_REPOSITORY", default="mysql")
# operator
OPERATOR_IMAGE_NAME = os.getenv(
"OPERATOR_TEST_IMAGE_NAME", default="mysql-operator")
OPERATOR_EE_IMAGE_NAME = os.getenv(
# "OPERATOR_TEST_EE_IMAGE_NAME", default="mysql-operator-commercial")
"OPERATOR_TEST_EE_IMAGE_NAME", default="mysql-operator")
OPERATOR_VERSION_TAG = os.getenv(
"OPERATOR_TEST_VERSION_TAG", default="8.0.28-2.0.3")
OPERATOR_PULL_POLICY = os.getenv(
"OPERATOR_TEST_PULL_POLICY", default="IfNotPresent")
OPERATOR_GR_IP_WHITELIST = os.getenv(
"OPERATOR_TEST_GR_IP_WHITELIST", default="172.17.0.0/8")
# server
SERVER_VERSION_TAG = VERSION_TAG
SERVER_IMAGE_NAME = "mysql-server"
SERVER_EE_IMAGE_NAME = "enterprise-server"
# SERVER_EE_IMAGE_NAME = "mysql-server"
# router
ROUTER_VERSION_TAG = VERSION_TAG
ROUTER_IMAGE_NAME = "mysql-router"
ROUTER_EE_IMAGE_NAME = "enterprise-router"
# ROUTER_EE_IMAGE_NAME = "mysql-router"
# oci
OCI_SKIP = os.getenv(
"OPERATOR_TEST_SKIP_OCI", default=False)
OCI_BACKUP_APIKEY_PATH = os.getenv(
"OPERATOR_TEST_BACKUP_OCI_APIKEY_PATH", default=None)
OCI_RESTORE_APIKEY_PATH = os.getenv(
"OPERATOR_TEST_RESTORE_OCI_APIKEY_PATH", default=None)
OCI_BACKUP_BUCKET = os.getenv(
"OPERATOR_TEST_BACKUP_OCI_BUCKET", default=None)
|
py | 1a52afd5c68ffc2b196402f0c9abc4d9281dcc56 | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.abspath(path.dirname(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, "datadog_checks", "prometheus", "__about__.py")) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
CHECKS_BASE_REQ = 'datadog_checks_base'
setup(
name='datadog-prometheus',
version=ABOUT["__version__"],
description='The prometheus check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent prometheus check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='[email protected]',
# License
license='New BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# The package we're going to ship
packages=['datadog_checks.prometheus'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
# Extra files to ship with the wheel package
include_package_data=True,
)
|
py | 1a52b181cc52685261d60c122f3af71f048ff8f4 | # -*- coding: utf-8 -*-
import logging, urllib, time
from django.utils.translation import gettext as _
from django.utils.timezone import now
from crontab_monitor.models import SelectOption, single_entry_point
def single_entry_point_of_crontab(*args, **kw):
lg = logging.getLogger('django-crontab-monitor')
kw['executed_from'] = kw.get('executed_from', 'crontab')
single_entry_point(*args, **kw)
message = 'Done from single_entry_point_of_crontab'
lg.info(message)
def check_outside_web(alert_log, *args, web_urls='https://www.google.com/|https://www.ho600.com/', **kw):
lg = logging.getLogger('django-crontab-monitor')
lg.debug("alert_log id: {}".format(alert_log.id))
lg.debug("web_urls: {}".format(web_urls))
web_urls = web_urls.split('|')
title = _('No alarm, just logging')
status = SelectOption.objects.get(swarm='alert-log-status', value='LOG')
mail_body = "Executed from {}\n".format(kw.get('executed_from', '__none__'))
mail_body += "args: {}\n".format(args)
mail_body += "kw: {}\n".format(kw)
t0 = time.time()
for url in web_urls:
lg.debug("url: {}".format(url))
try:
res = urllib.request.urlopen(url)
except Exception as e:
status = SelectOption.objects.get(swarm='alert-log-status', value='ALARM')
title = _('Alarm on {url}').format(url=url)
mail_body += 'Exception: {}\n'.format(e)
else:
if res.status == 200:
t1 = time.time()
mail_body += 'Duration of {}: {} seconds\n'.format(url, t1-t0)
t0 = t1
else:
title = _('Alarm on {url}').format(url=url)
status = SelectOption.objects.get(swarm='alert-log-status', value='ALARM')
mail_body += '{} Error: {}\n'.format(res.status, res.read())
if status.value != 'LOG':
break
for receiver in alert_log.inspection.get_receive_notification_users():
alert_log.receivers.add(receiver)
alert_log.title = title
alert_log.mail_body = mail_body
alert_log.status = status
alert_log.executed_end_time = now()
alert_log.save()
lg.info("title: {}".format(alert_log.title))
lg.info("status: {}".format(alert_log.status)) |
py | 1a52b2259918116ecc4ef341615e1f8f83a6d564 | """
Django settings for carpediem project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g=g68l-%q4_1g_+_w*l*3-3*dw@873ma70=a$&3hmvh5269z2y'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'home',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'carpediem.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
os.path.join(BASE_DIR, 'static'),
BASE_DIR / "static",
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'carpediem.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
BASE_DIR / "static",
] |
py | 1a52b2fb547952ad0324ad4721f284f1cc21ac06 | """Test the arraymodule.
Roger E. Masse
"""
import unittest
from test import support
from test.support import _2G
import weakref
import pickle
import operator
import struct
import sys
import warnings
import array
# from array import _array_reconstructor as array_reconstructor # XXX: RUSTPYTHON
# sizeof_wchar = array.array('u').itemsize # XXX: RUSTPYTHON
class ArraySubclass(array.array):
pass
class ArraySubclassWithKwargs(array.array):
def __init__(self, typecode, newarg=None):
array.array.__init__(self)
# TODO: RUSTPYTHON
# We did not support typecode u for unicode yet
# typecodes = 'ubBhHiIlLfdqQ'
typecodes = 'bBhHiIlLfdqQ'
class MiscTest(unittest.TestCase):
def test_bad_constructor(self):
self.assertRaises(TypeError, array.array)
self.assertRaises(TypeError, array.array, spam=42)
self.assertRaises(TypeError, array.array, 'xx')
self.assertRaises(ValueError, array.array, 'x')
def test_empty(self):
# Exercise code for handling zero-length arrays
a = array.array('B')
a[:] = a
self.assertEqual(len(a), 0)
self.assertEqual(len(a + a), 0)
self.assertEqual(len(a * 3), 0)
a += a
self.assertEqual(len(a), 0)
# Machine format codes.
#
# Search for "enum machine_format_code" in Modules/arraymodule.c to get the
# authoritative values.
UNKNOWN_FORMAT = -1
UNSIGNED_INT8 = 0
SIGNED_INT8 = 1
UNSIGNED_INT16_LE = 2
UNSIGNED_INT16_BE = 3
SIGNED_INT16_LE = 4
SIGNED_INT16_BE = 5
UNSIGNED_INT32_LE = 6
UNSIGNED_INT32_BE = 7
SIGNED_INT32_LE = 8
SIGNED_INT32_BE = 9
UNSIGNED_INT64_LE = 10
UNSIGNED_INT64_BE = 11
SIGNED_INT64_LE = 12
SIGNED_INT64_BE = 13
IEEE_754_FLOAT_LE = 14
IEEE_754_FLOAT_BE = 15
IEEE_754_DOUBLE_LE = 16
IEEE_754_DOUBLE_BE = 17
UTF16_LE = 18
UTF16_BE = 19
UTF32_LE = 20
UTF32_BE = 21
class ArrayReconstructorTest(unittest.TestCase):
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_error(self):
self.assertRaises(TypeError, array_reconstructor,
"", "b", 0, b"")
self.assertRaises(TypeError, array_reconstructor,
str, "b", 0, b"")
self.assertRaises(TypeError, array_reconstructor,
array.array, "b", '', b"")
self.assertRaises(TypeError, array_reconstructor,
array.array, "b", 0, "")
self.assertRaises(ValueError, array_reconstructor,
array.array, "?", 0, b"")
self.assertRaises(ValueError, array_reconstructor,
array.array, "b", UNKNOWN_FORMAT, b"")
self.assertRaises(ValueError, array_reconstructor,
array.array, "b", 22, b"")
self.assertRaises(ValueError, array_reconstructor,
array.array, "d", 16, b"a")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_numbers(self):
testcases = (
(['B', 'H', 'I', 'L'], UNSIGNED_INT8, '=BBBB',
[0x80, 0x7f, 0, 0xff]),
(['b', 'h', 'i', 'l'], SIGNED_INT8, '=bbb',
[-0x80, 0x7f, 0]),
(['H', 'I', 'L'], UNSIGNED_INT16_LE, '<HHHH',
[0x8000, 0x7fff, 0, 0xffff]),
(['H', 'I', 'L'], UNSIGNED_INT16_BE, '>HHHH',
[0x8000, 0x7fff, 0, 0xffff]),
(['h', 'i', 'l'], SIGNED_INT16_LE, '<hhh',
[-0x8000, 0x7fff, 0]),
(['h', 'i', 'l'], SIGNED_INT16_BE, '>hhh',
[-0x8000, 0x7fff, 0]),
(['I', 'L'], UNSIGNED_INT32_LE, '<IIII',
[1<<31, (1<<31)-1, 0, (1<<32)-1]),
(['I', 'L'], UNSIGNED_INT32_BE, '>IIII',
[1<<31, (1<<31)-1, 0, (1<<32)-1]),
(['i', 'l'], SIGNED_INT32_LE, '<iii',
[-1<<31, (1<<31)-1, 0]),
(['i', 'l'], SIGNED_INT32_BE, '>iii',
[-1<<31, (1<<31)-1, 0]),
(['L'], UNSIGNED_INT64_LE, '<QQQQ',
[1<<31, (1<<31)-1, 0, (1<<32)-1]),
(['L'], UNSIGNED_INT64_BE, '>QQQQ',
[1<<31, (1<<31)-1, 0, (1<<32)-1]),
(['l'], SIGNED_INT64_LE, '<qqq',
[-1<<31, (1<<31)-1, 0]),
(['l'], SIGNED_INT64_BE, '>qqq',
[-1<<31, (1<<31)-1, 0]),
# The following tests for INT64 will raise an OverflowError
# when run on a 32-bit machine. The tests are simply skipped
# in that case.
(['L'], UNSIGNED_INT64_LE, '<QQQQ',
[1<<63, (1<<63)-1, 0, (1<<64)-1]),
(['L'], UNSIGNED_INT64_BE, '>QQQQ',
[1<<63, (1<<63)-1, 0, (1<<64)-1]),
(['l'], SIGNED_INT64_LE, '<qqq',
[-1<<63, (1<<63)-1, 0]),
(['l'], SIGNED_INT64_BE, '>qqq',
[-1<<63, (1<<63)-1, 0]),
(['f'], IEEE_754_FLOAT_LE, '<ffff',
[16711938.0, float('inf'), float('-inf'), -0.0]),
(['f'], IEEE_754_FLOAT_BE, '>ffff',
[16711938.0, float('inf'), float('-inf'), -0.0]),
(['d'], IEEE_754_DOUBLE_LE, '<dddd',
[9006104071832581.0, float('inf'), float('-inf'), -0.0]),
(['d'], IEEE_754_DOUBLE_BE, '>dddd',
[9006104071832581.0, float('inf'), float('-inf'), -0.0])
)
for testcase in testcases:
valid_typecodes, mformat_code, struct_fmt, values = testcase
arraystr = struct.pack(struct_fmt, *values)
for typecode in valid_typecodes:
try:
a = array.array(typecode, values)
except OverflowError:
continue # Skip this test case.
b = array_reconstructor(
array.array, typecode, mformat_code, arraystr)
self.assertEqual(a, b,
msg="{0!r} != {1!r}; testcase={2!r}".format(a, b, testcase))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_unicode(self):
teststr = "Bonne Journ\xe9e \U0002030a\U00020347"
testcases = (
(UTF16_LE, "UTF-16-LE"),
(UTF16_BE, "UTF-16-BE"),
(UTF32_LE, "UTF-32-LE"),
(UTF32_BE, "UTF-32-BE")
)
for testcase in testcases:
mformat_code, encoding = testcase
a = array.array('u', teststr)
b = array_reconstructor(
array.array, 'u', mformat_code, teststr.encode(encoding))
self.assertEqual(a, b,
msg="{0!r} != {1!r}; testcase={2!r}".format(a, b, testcase))
class BaseTest:
# Required class attributes (provided by subclasses
# typecode: the typecode to test
# example: an initializer usable in the constructor for this type
# smallerexample: the same length as example, but smaller
# biggerexample: the same length as example, but bigger
# outside: An entry that is not in example
# minitemsize: the minimum guaranteed itemsize
def assertEntryEqual(self, entry1, entry2):
self.assertEqual(entry1, entry2)
def badtypecode(self):
# Return a typecode that is different from our own
return typecodes[(typecodes.index(self.typecode)+1) % len(typecodes)]
def test_constructor(self):
a = array.array(self.typecode)
self.assertEqual(a.typecode, self.typecode)
self.assertGreaterEqual(a.itemsize, self.minitemsize)
self.assertRaises(TypeError, array.array, self.typecode, None)
def test_len(self):
a = array.array(self.typecode)
a.append(self.example[0])
self.assertEqual(len(a), 1)
a = array.array(self.typecode, self.example)
self.assertEqual(len(a), len(self.example))
def test_buffer_info(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.buffer_info, 42)
bi = a.buffer_info()
self.assertIsInstance(bi, tuple)
self.assertEqual(len(bi), 2)
self.assertIsInstance(bi[0], int)
self.assertIsInstance(bi[1], int)
self.assertEqual(bi[1], len(a))
def test_byteswap(self):
if self.typecode == 'u':
example = '\U00100100'
else:
example = self.example
a = array.array(self.typecode, example)
self.assertRaises(TypeError, a.byteswap, 42)
if a.itemsize in (1, 2, 4, 8):
b = array.array(self.typecode, example)
b.byteswap()
if a.itemsize==1:
self.assertEqual(a, b)
else:
self.assertNotEqual(a, b)
b.byteswap()
self.assertEqual(a, b)
def test_copy(self):
import copy
a = array.array(self.typecode, self.example)
b = copy.copy(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
def test_deepcopy(self):
import copy
a = array.array(self.typecode, self.example)
b = copy.deepcopy(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reduce_ex(self):
a = array.array(self.typecode, self.example)
for protocol in range(3):
self.assertIs(a.__reduce_ex__(protocol)[0], array.array)
for protocol in range(3, pickle.HIGHEST_PROTOCOL + 1):
self.assertIs(a.__reduce_ex__(protocol)[0], array_reconstructor)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_pickle(self):
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
a = array.array(self.typecode, self.example)
b = pickle.loads(pickle.dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
a = ArraySubclass(self.typecode, self.example)
a.x = 10
b = pickle.loads(pickle.dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(type(a), type(b))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_pickle_for_empty_array(self):
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
a = array.array(self.typecode)
b = pickle.loads(pickle.dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
a = ArraySubclass(self.typecode)
a.x = 10
b = pickle.loads(pickle.dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(type(a), type(b))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_iterator_pickle(self):
orig = array.array(self.typecode, self.example)
data = list(orig)
data2 = data[::-1]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# initial iterator
itorig = iter(orig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a.fromlist(data2)
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data + data2)
# running iterator
next(itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a.fromlist(data2)
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data[1:] + data2)
# empty iterator
for i in range(1, len(data)):
next(itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a.fromlist(data2)
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data2)
# exhausted iterator
self.assertRaises(StopIteration, next, itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a.fromlist(data2)
self.assertEqual(list(it), [])
def test_exhausted_iterator(self):
a = array.array(self.typecode, self.example)
self.assertEqual(list(a), list(self.example))
exhit = iter(a)
empit = iter(a)
for x in exhit: # exhaust the iterator
next(empit) # not exhausted
a.append(self.outside)
self.assertEqual(list(exhit), [])
self.assertEqual(list(empit), [self.outside])
self.assertEqual(list(a), list(self.example) + [self.outside])
def test_insert(self):
a = array.array(self.typecode, self.example)
a.insert(0, self.example[0])
self.assertEqual(len(a), 1+len(self.example))
self.assertEqual(a[0], a[1])
self.assertRaises(TypeError, a.insert)
self.assertRaises(TypeError, a.insert, None)
self.assertRaises(TypeError, a.insert, 0, None)
a = array.array(self.typecode, self.example)
a.insert(-1, self.example[0])
self.assertEqual(
a,
array.array(
self.typecode,
self.example[:-1] + self.example[:1] + self.example[-1:]
)
)
a = array.array(self.typecode, self.example)
a.insert(-1000, self.example[0])
self.assertEqual(
a,
array.array(self.typecode, self.example[:1] + self.example)
)
a = array.array(self.typecode, self.example)
a.insert(1000, self.example[0])
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[:1])
)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tofromfile(self):
a = array.array(self.typecode, 2*self.example)
self.assertRaises(TypeError, a.tofile)
support.unlink(support.TESTFN)
f = open(support.TESTFN, 'wb')
try:
a.tofile(f)
f.close()
b = array.array(self.typecode)
f = open(support.TESTFN, 'rb')
self.assertRaises(TypeError, b.fromfile)
b.fromfile(f, len(self.example))
self.assertEqual(b, array.array(self.typecode, self.example))
self.assertNotEqual(a, b)
self.assertRaises(EOFError, b.fromfile, f, len(self.example)+1)
self.assertEqual(a, b)
f.close()
finally:
if not f.closed:
f.close()
support.unlink(support.TESTFN)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_fromfile_ioerror(self):
# Issue #5395: Check if fromfile raises a proper OSError
# instead of EOFError.
a = array.array(self.typecode)
f = open(support.TESTFN, 'wb')
try:
self.assertRaises(OSError, a.fromfile, f, len(self.example))
finally:
f.close()
support.unlink(support.TESTFN)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_filewrite(self):
a = array.array(self.typecode, 2*self.example)
f = open(support.TESTFN, 'wb')
try:
f.write(a)
f.close()
b = array.array(self.typecode)
f = open(support.TESTFN, 'rb')
b.fromfile(f, len(self.example))
self.assertEqual(b, array.array(self.typecode, self.example))
self.assertNotEqual(a, b)
b.fromfile(f, len(self.example))
self.assertEqual(a, b)
f.close()
finally:
if not f.closed:
f.close()
support.unlink(support.TESTFN)
def test_tofromlist(self):
a = array.array(self.typecode, 2*self.example)
b = array.array(self.typecode)
self.assertRaises(TypeError, a.tolist, 42)
self.assertRaises(TypeError, b.fromlist)
self.assertRaises(TypeError, b.fromlist, 42)
self.assertRaises(TypeError, b.fromlist, [None])
b.fromlist(a.tolist())
self.assertEqual(a, b)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tofromstring(self):
# Warnings not raised when arguments are incorrect as Argument Clinic
# handles that before the warning can be raised.
nb_warnings = 2
with warnings.catch_warnings(record=True) as r:
warnings.filterwarnings("always",
message=r"(to|from)string\(\) is deprecated",
category=DeprecationWarning)
a = array.array(self.typecode, 2*self.example)
b = array.array(self.typecode)
self.assertRaises(TypeError, a.tostring, 42)
self.assertRaises(TypeError, b.fromstring)
self.assertRaises(TypeError, b.fromstring, 42)
b.fromstring(a.tostring())
self.assertEqual(a, b)
if a.itemsize>1:
self.assertRaises(ValueError, b.fromstring, "x")
nb_warnings += 1
self.assertEqual(len(r), nb_warnings)
def test_tofrombytes(self):
a = array.array(self.typecode, 2*self.example)
b = array.array(self.typecode)
self.assertRaises(TypeError, a.tobytes, 42)
self.assertRaises(TypeError, b.frombytes)
self.assertRaises(TypeError, b.frombytes, 42)
b.frombytes(a.tobytes())
c = array.array(self.typecode, bytearray(a.tobytes()))
self.assertEqual(a, b)
self.assertEqual(a, c)
if a.itemsize>1:
self.assertRaises(ValueError, b.frombytes, b"x")
def test_fromarray(self):
a = array.array(self.typecode, self.example)
b = array.array(self.typecode, a)
self.assertEqual(a, b)
def test_repr(self):
a = array.array(self.typecode, 2*self.example)
self.assertEqual(a, eval(repr(a), {"array": array.array}))
a = array.array(self.typecode)
self.assertEqual(repr(a), "array('%s')" % self.typecode)
def test_str(self):
a = array.array(self.typecode, 2*self.example)
str(a)
def test_cmp(self):
a = array.array(self.typecode, self.example)
self.assertIs(a == 42, False)
self.assertIs(a != 42, True)
self.assertIs(a == a, True)
self.assertIs(a != a, False)
self.assertIs(a < a, False)
self.assertIs(a <= a, True)
self.assertIs(a > a, False)
self.assertIs(a >= a, True)
al = array.array(self.typecode, self.smallerexample)
ab = array.array(self.typecode, self.biggerexample)
self.assertIs(a == 2*a, False)
self.assertIs(a != 2*a, True)
self.assertIs(a < 2*a, True)
self.assertIs(a <= 2*a, True)
self.assertIs(a > 2*a, False)
self.assertIs(a >= 2*a, False)
self.assertIs(a == al, False)
self.assertIs(a != al, True)
self.assertIs(a < al, False)
self.assertIs(a <= al, False)
self.assertIs(a > al, True)
self.assertIs(a >= al, True)
self.assertIs(a == ab, False)
self.assertIs(a != ab, True)
self.assertIs(a < ab, True)
self.assertIs(a <= ab, True)
self.assertIs(a > ab, False)
self.assertIs(a >= ab, False)
def test_add(self):
a = array.array(self.typecode, self.example) \
+ array.array(self.typecode, self.example[::-1])
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[::-1])
)
b = array.array(self.badtypecode())
self.assertRaises(TypeError, a.__add__, b)
self.assertRaises(TypeError, a.__add__, "bad")
def test_iadd(self):
a = array.array(self.typecode, self.example[::-1])
b = a
a += array.array(self.typecode, 2*self.example)
self.assertIs(a, b)
self.assertEqual(
a,
array.array(self.typecode, self.example[::-1]+2*self.example)
)
a = array.array(self.typecode, self.example)
a += a
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example)
)
b = array.array(self.badtypecode())
self.assertRaises(TypeError, a.__add__, b)
self.assertRaises(TypeError, a.__iadd__, "bad")
def test_mul(self):
a = 5*array.array(self.typecode, self.example)
self.assertEqual(
a,
array.array(self.typecode, 5*self.example)
)
a = array.array(self.typecode, self.example)*5
self.assertEqual(
a,
array.array(self.typecode, self.example*5)
)
a = 0*array.array(self.typecode, self.example)
self.assertEqual(
a,
array.array(self.typecode)
)
a = (-1)*array.array(self.typecode, self.example)
self.assertEqual(
a,
array.array(self.typecode)
)
a = 5 * array.array(self.typecode, self.example[:1])
self.assertEqual(
a,
array.array(self.typecode, [a[0]] * 5)
)
self.assertRaises(TypeError, a.__mul__, "bad")
def test_imul(self):
a = array.array(self.typecode, self.example)
b = a
a *= 5
self.assertIs(a, b)
self.assertEqual(
a,
array.array(self.typecode, 5*self.example)
)
a *= 0
self.assertIs(a, b)
self.assertEqual(a, array.array(self.typecode))
a *= 1000
self.assertIs(a, b)
self.assertEqual(a, array.array(self.typecode))
a *= -1
self.assertIs(a, b)
self.assertEqual(a, array.array(self.typecode))
a = array.array(self.typecode, self.example)
a *= -1
self.assertEqual(a, array.array(self.typecode))
self.assertRaises(TypeError, a.__imul__, "bad")
def test_getitem(self):
a = array.array(self.typecode, self.example)
self.assertEntryEqual(a[0], self.example[0])
self.assertEntryEqual(a[0], self.example[0])
self.assertEntryEqual(a[-1], self.example[-1])
self.assertEntryEqual(a[-1], self.example[-1])
self.assertEntryEqual(a[len(self.example)-1], self.example[-1])
self.assertEntryEqual(a[-len(self.example)], self.example[0])
self.assertRaises(TypeError, a.__getitem__)
self.assertRaises(IndexError, a.__getitem__, len(self.example))
self.assertRaises(IndexError, a.__getitem__, -len(self.example)-1)
def test_setitem(self):
a = array.array(self.typecode, self.example)
a[0] = a[-1]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[0] = a[-1]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[-1] = a[0]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[-1] = a[0]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[len(self.example)-1] = a[0]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[-len(self.example)] = a[-1]
self.assertEntryEqual(a[0], a[-1])
self.assertRaises(TypeError, a.__setitem__)
self.assertRaises(TypeError, a.__setitem__, None)
self.assertRaises(TypeError, a.__setitem__, 0, None)
self.assertRaises(
IndexError,
a.__setitem__,
len(self.example), self.example[0]
)
self.assertRaises(
IndexError,
a.__setitem__,
-len(self.example)-1, self.example[0]
)
def test_delitem(self):
a = array.array(self.typecode, self.example)
del a[0]
self.assertEqual(
a,
array.array(self.typecode, self.example[1:])
)
a = array.array(self.typecode, self.example)
del a[-1]
self.assertEqual(
a,
array.array(self.typecode, self.example[:-1])
)
a = array.array(self.typecode, self.example)
del a[len(self.example)-1]
self.assertEqual(
a,
array.array(self.typecode, self.example[:-1])
)
a = array.array(self.typecode, self.example)
del a[-len(self.example)]
self.assertEqual(
a,
array.array(self.typecode, self.example[1:])
)
self.assertRaises(TypeError, a.__delitem__)
self.assertRaises(TypeError, a.__delitem__, None)
self.assertRaises(IndexError, a.__delitem__, len(self.example))
self.assertRaises(IndexError, a.__delitem__, -len(self.example)-1)
def test_getslice(self):
a = array.array(self.typecode, self.example)
self.assertEqual(a[:], a)
self.assertEqual(
a[1:],
array.array(self.typecode, self.example[1:])
)
self.assertEqual(
a[:1],
array.array(self.typecode, self.example[:1])
)
self.assertEqual(
a[:-1],
array.array(self.typecode, self.example[:-1])
)
self.assertEqual(
a[-1:],
array.array(self.typecode, self.example[-1:])
)
self.assertEqual(
a[-1:-1],
array.array(self.typecode)
)
self.assertEqual(
a[2:1],
array.array(self.typecode)
)
self.assertEqual(
a[1000:],
array.array(self.typecode)
)
self.assertEqual(a[-1000:], a)
self.assertEqual(a[:1000], a)
self.assertEqual(
a[:-1000],
array.array(self.typecode)
)
self.assertEqual(a[-1000:1000], a)
self.assertEqual(
a[2000:1000],
array.array(self.typecode)
)
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing
# (Assumes list conversion works correctly, too)
a = array.array(self.typecode, self.example)
indices = (0, None, 1, 3, 19, 100, sys.maxsize, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Everything except the initial 0 (invalid step)
for step in indices[1:]:
self.assertEqual(list(a[start:stop:step]),
list(a)[start:stop:step])
def test_setslice(self):
a = array.array(self.typecode, self.example)
a[:1] = a
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[1:])
)
a = array.array(self.typecode, self.example)
a[:-1] = a
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[-1:])
)
a = array.array(self.typecode, self.example)
a[-1:] = a
self.assertEqual(
a,
array.array(self.typecode, self.example[:-1] + self.example)
)
a = array.array(self.typecode, self.example)
a[1:] = a
self.assertEqual(
a,
array.array(self.typecode, self.example[:1] + self.example)
)
a = array.array(self.typecode, self.example)
a[1:-1] = a
self.assertEqual(
a,
array.array(
self.typecode,
self.example[:1] + self.example + self.example[-1:]
)
)
a = array.array(self.typecode, self.example)
a[1000:] = a
self.assertEqual(
a,
array.array(self.typecode, 2*self.example)
)
a = array.array(self.typecode, self.example)
a[-1000:] = a
self.assertEqual(
a,
array.array(self.typecode, self.example)
)
a = array.array(self.typecode, self.example)
a[:1000] = a
self.assertEqual(
a,
array.array(self.typecode, self.example)
)
a = array.array(self.typecode, self.example)
a[:-1000] = a
self.assertEqual(
a,
array.array(self.typecode, 2*self.example)
)
a = array.array(self.typecode, self.example)
a[1:0] = a
self.assertEqual(
a,
array.array(self.typecode, self.example[:1] + self.example + self.example[1:])
)
a = array.array(self.typecode, self.example)
a[2000:1000] = a
self.assertEqual(
a,
array.array(self.typecode, 2*self.example)
)
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.__setitem__, slice(0, 0), None)
self.assertRaises(TypeError, a.__setitem__, slice(0, 1), None)
b = array.array(self.badtypecode())
self.assertRaises(TypeError, a.__setitem__, slice(0, 0), b)
self.assertRaises(TypeError, a.__setitem__, slice(0, 1), b)
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 100, sys.maxsize, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Everything except the initial 0 (invalid step)
for step in indices[1:]:
a = array.array(self.typecode, self.example)
L = list(a)
# Make sure we have a slice of exactly the right length,
# but with (hopefully) different data.
data = L[start:stop:step]
data.reverse()
L[start:stop:step] = data
a[start:stop:step] = array.array(self.typecode, data)
self.assertEqual(a, array.array(self.typecode, L))
del L[start:stop:step]
del a[start:stop:step]
self.assertEqual(a, array.array(self.typecode, L))
def test_index(self):
example = 2*self.example
a = array.array(self.typecode, example)
self.assertRaises(TypeError, a.index)
for x in example:
self.assertEqual(a.index(x), example.index(x))
self.assertRaises(ValueError, a.index, None)
self.assertRaises(ValueError, a.index, self.outside)
def test_count(self):
example = 2*self.example
a = array.array(self.typecode, example)
self.assertRaises(TypeError, a.count)
for x in example:
self.assertEqual(a.count(x), example.count(x))
self.assertEqual(a.count(self.outside), 0)
self.assertEqual(a.count(None), 0)
def test_remove(self):
for x in self.example:
example = 2*self.example
a = array.array(self.typecode, example)
pos = example.index(x)
example2 = example[:pos] + example[pos+1:]
a.remove(x)
self.assertEqual(a, array.array(self.typecode, example2))
a = array.array(self.typecode, self.example)
self.assertRaises(ValueError, a.remove, self.outside)
self.assertRaises(ValueError, a.remove, None)
def test_pop(self):
a = array.array(self.typecode)
self.assertRaises(IndexError, a.pop)
a = array.array(self.typecode, 2*self.example)
self.assertRaises(TypeError, a.pop, 42, 42)
self.assertRaises(TypeError, a.pop, None)
self.assertRaises(IndexError, a.pop, len(a))
self.assertRaises(IndexError, a.pop, -len(a)-1)
self.assertEntryEqual(a.pop(0), self.example[0])
self.assertEqual(
a,
array.array(self.typecode, self.example[1:]+self.example)
)
self.assertEntryEqual(a.pop(1), self.example[2])
self.assertEqual(
a,
array.array(self.typecode, self.example[1:2]+self.example[3:]+self.example)
)
self.assertEntryEqual(a.pop(0), self.example[1])
self.assertEntryEqual(a.pop(), self.example[-1])
self.assertEqual(
a,
array.array(self.typecode, self.example[3:]+self.example[:-1])
)
def test_reverse(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.reverse, 42)
a.reverse()
self.assertEqual(
a,
array.array(self.typecode, self.example[::-1])
)
def test_extend(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.extend)
a.extend(array.array(self.typecode, self.example[::-1]))
self.assertEqual(
a,
array.array(self.typecode, self.example+self.example[::-1])
)
a = array.array(self.typecode, self.example)
a.extend(a)
self.assertEqual(
a,
array.array(self.typecode, self.example+self.example)
)
b = array.array(self.badtypecode())
self.assertRaises(TypeError, a.extend, b)
a = array.array(self.typecode, self.example)
a.extend(self.example[::-1])
self.assertEqual(
a,
array.array(self.typecode, self.example+self.example[::-1])
)
def test_constructor_with_iterable_argument(self):
a = array.array(self.typecode, iter(self.example))
b = array.array(self.typecode, self.example)
self.assertEqual(a, b)
# non-iterable argument
self.assertRaises(TypeError, array.array, self.typecode, 10)
# pass through errors raised in __iter__
class A:
def __iter__(self):
raise UnicodeError
self.assertRaises(UnicodeError, array.array, self.typecode, A())
# pass through errors raised in next()
def B():
raise UnicodeError
yield None
self.assertRaises(UnicodeError, array.array, self.typecode, B())
def test_coveritertraverse(self):
try:
import gc
except ImportError:
self.skipTest('gc module not available')
a = array.array(self.typecode)
l = [iter(a)]
l.append(l)
gc.collect()
def test_buffer(self):
a = array.array(self.typecode, self.example)
m = memoryview(a)
expected = m.tobytes()
self.assertEqual(a.tobytes(), expected)
self.assertEqual(a.tobytes()[0], expected[0])
# Resizing is forbidden when there are buffer exports.
# For issue 4509, we also check after each error that
# the array was not modified.
self.assertRaises(BufferError, a.append, a[0])
self.assertEqual(m.tobytes(), expected)
self.assertRaises(BufferError, a.extend, a[0:1])
self.assertEqual(m.tobytes(), expected)
self.assertRaises(BufferError, a.remove, a[0])
self.assertEqual(m.tobytes(), expected)
self.assertRaises(BufferError, a.pop, 0)
self.assertEqual(m.tobytes(), expected)
self.assertRaises(BufferError, a.fromlist, a.tolist())
self.assertEqual(m.tobytes(), expected)
self.assertRaises(BufferError, a.frombytes, a.tobytes())
self.assertEqual(m.tobytes(), expected)
if self.typecode == 'u':
self.assertRaises(BufferError, a.fromunicode, a.tounicode())
self.assertEqual(m.tobytes(), expected)
self.assertRaises(BufferError, operator.imul, a, 2)
self.assertEqual(m.tobytes(), expected)
self.assertRaises(BufferError, operator.imul, a, 0)
self.assertEqual(m.tobytes(), expected)
self.assertRaises(BufferError, operator.setitem, a, slice(0, 0), a)
self.assertEqual(m.tobytes(), expected)
self.assertRaises(BufferError, operator.delitem, a, 0)
self.assertEqual(m.tobytes(), expected)
self.assertRaises(BufferError, operator.delitem, a, slice(0, 1))
self.assertEqual(m.tobytes(), expected)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_weakref(self):
s = array.array(self.typecode, self.example)
p = weakref.proxy(s)
self.assertEqual(p.tobytes(), s.tobytes())
s = None
self.assertRaises(ReferenceError, len, p)
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def test_bug_782369(self):
for i in range(10):
b = array.array('B', range(64))
rc = sys.getrefcount(10)
for i in range(10):
b = array.array('B', range(64))
self.assertEqual(rc, sys.getrefcount(10))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_subclass_with_kwargs(self):
# SF bug #1486663 -- this used to erroneously raise a TypeError
ArraySubclassWithKwargs('b', newarg=1)
def test_create_from_bytes(self):
# XXX This test probably needs to be moved in a subclass or
# generalized to use self.typecode.
a = array.array('H', b"1234")
self.assertEqual(len(a) * a.itemsize, 4)
@support.cpython_only
def test_sizeof_with_buffer(self):
a = array.array(self.typecode, self.example)
basesize = support.calcvobjsize('Pn2Pi')
buffer_size = a.buffer_info()[1] * a.itemsize
support.check_sizeof(self, a, basesize + buffer_size)
@support.cpython_only
def test_sizeof_without_buffer(self):
a = array.array(self.typecode)
basesize = support.calcvobjsize('Pn2Pi')
support.check_sizeof(self, a, basesize)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_initialize_with_unicode(self):
if self.typecode != 'u':
with self.assertRaises(TypeError) as cm:
a = array.array(self.typecode, 'foo')
self.assertIn("cannot use a str", str(cm.exception))
with self.assertRaises(TypeError) as cm:
a = array.array(self.typecode, array.array('u', 'foo'))
self.assertIn("cannot use a unicode array", str(cm.exception))
else:
a = array.array(self.typecode, "foo")
a = array.array(self.typecode, array.array('u', 'foo'))
@support.cpython_only
def test_obsolete_write_lock(self):
from _testcapi import getbuffer_with_null_view
a = array.array('B', b"")
self.assertRaises(BufferError, getbuffer_with_null_view, a)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_free_after_iterating(self):
support.check_free_after_iterating(self, iter, array.array,
(self.typecode,))
support.check_free_after_iterating(self, reversed, array.array,
(self.typecode,))
class StringTest(BaseTest):
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_setitem(self):
super().test_setitem()
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.__setitem__, 0, self.example[:2])
class UnicodeTest(StringTest, unittest.TestCase):
typecode = 'u'
example = '\x01\u263a\x00\ufeff'
smallerexample = '\x01\u263a\x00\ufefe'
biggerexample = '\x01\u263a\x01\ufeff'
outside = str('\x33')
minitemsize = 2
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_add(self):
super().test_add()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_buffer(self):
super().test_buffer()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_buffer_info(self):
super().test_buffer_info()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_byteswap(self):
super().test_byteswap()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_cmp(self):
super().test_cmp()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_constructor(self):
super().test_constructor()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_constructor_with_iterable_argument(self):
super().test_constructor_with_iterable_argument()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_copy(self):
super().test_copy()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_count(self):
super().test_count()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_coveritertraverse(self):
super().test_coveritertraverse()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_deepcopy(self):
super().test_deepcopy()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_delitem(self):
super().test_delitem()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_exhausted_iterator(self):
super().test_exhausted_iterator()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_extend(self):
super().test_extend()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_extended_getslice(self):
super().test_extended_getslice()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_extended_set_del_slice(self):
super().test_extended_set_del_slice()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_fromarray(self):
super().test_fromarray()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_getitem(self):
super().test_getitem()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_getslice(self):
super().test_getslice()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_iadd(self):
super().test_iadd()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_imul(self):
super().test_imul()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_index(self):
super().test_index()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_insert(self):
super().test_insert()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_len(self):
super().test_len()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_mul(self):
super().test_mul()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_pop(self):
super().test_pop()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_remove(self):
super().test_remove()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_repr(self):
super().test_repr()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reverse(self):
super().test_reverse()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_setslice(self):
super().test_setslice()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_str(self):
super().test_str()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tofrombytes(self):
super().test_tofrombytes()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tofromlist(self):
super().test_tofromlist()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_unicode(self):
self.assertRaises(TypeError, array.array, 'b', 'foo')
a = array.array('u', '\xa0\xc2\u1234')
a.fromunicode(' ')
a.fromunicode('')
a.fromunicode('')
a.fromunicode('\x11abc\xff\u1234')
s = a.tounicode()
self.assertEqual(s, '\xa0\xc2\u1234 \x11abc\xff\u1234')
self.assertEqual(a.itemsize, sizeof_wchar)
s = '\x00="\'a\\b\x80\xff\u0000\u0001\u1234'
a = array.array('u', s)
self.assertEqual(
repr(a),
"array('u', '\\x00=\"\\'a\\\\b\\x80\xff\\x00\\x01\u1234')")
self.assertRaises(TypeError, a.fromunicode)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue17223(self):
# this used to crash
if sizeof_wchar == 4:
# U+FFFFFFFF is an invalid code point in Unicode 6.0
invalid_str = b'\xff\xff\xff\xff'
else:
# PyUnicode_FromUnicode() cannot fail with 16-bit wchar_t
self.skipTest("specific to 32-bit wchar_t")
a = array.array('u', invalid_str)
self.assertRaises(ValueError, a.tounicode)
self.assertRaises(ValueError, str, a)
class NumberTest(BaseTest):
def test_extslice(self):
a = array.array(self.typecode, range(5))
self.assertEqual(a[::], a)
self.assertEqual(a[::2], array.array(self.typecode, [0,2,4]))
self.assertEqual(a[1::2], array.array(self.typecode, [1,3]))
self.assertEqual(a[::-1], array.array(self.typecode, [4,3,2,1,0]))
self.assertEqual(a[::-2], array.array(self.typecode, [4,2,0]))
self.assertEqual(a[3::-2], array.array(self.typecode, [3,1]))
self.assertEqual(a[-100:100:], a)
self.assertEqual(a[100:-100:-1], a[::-1])
self.assertEqual(a[-100:100:2], array.array(self.typecode, [0,2,4]))
self.assertEqual(a[1000:2000:2], array.array(self.typecode, []))
self.assertEqual(a[-1000:-2000:-2], array.array(self.typecode, []))
def test_delslice(self):
a = array.array(self.typecode, range(5))
del a[::2]
self.assertEqual(a, array.array(self.typecode, [1,3]))
a = array.array(self.typecode, range(5))
del a[1::2]
self.assertEqual(a, array.array(self.typecode, [0,2,4]))
a = array.array(self.typecode, range(5))
del a[1::-2]
self.assertEqual(a, array.array(self.typecode, [0,2,3,4]))
a = array.array(self.typecode, range(10))
del a[::1000]
self.assertEqual(a, array.array(self.typecode, [1,2,3,4,5,6,7,8,9]))
# test issue7788
a = array.array(self.typecode, range(10))
del a[9::1<<333]
def test_assignment(self):
a = array.array(self.typecode, range(10))
a[::2] = array.array(self.typecode, [42]*5)
self.assertEqual(a, array.array(self.typecode, [42, 1, 42, 3, 42, 5, 42, 7, 42, 9]))
a = array.array(self.typecode, range(10))
a[::-4] = array.array(self.typecode, [10]*3)
self.assertEqual(a, array.array(self.typecode, [0, 10, 2, 3, 4, 10, 6, 7, 8 ,10]))
a = array.array(self.typecode, range(4))
a[::-1] = a
self.assertEqual(a, array.array(self.typecode, [3, 2, 1, 0]))
a = array.array(self.typecode, range(10))
b = a[:]
c = a[:]
ins = array.array(self.typecode, range(2))
a[2:3] = ins
b[slice(2,3)] = ins
c[2:3:] = ins
def test_iterationcontains(self):
a = array.array(self.typecode, range(10))
self.assertEqual(list(a), list(range(10)))
b = array.array(self.typecode, [20])
self.assertEqual(a[-1] in a, True)
self.assertEqual(b[0] not in a, True)
def check_overflow(self, lower, upper):
# method to be used by subclasses
# should not overflow assigning lower limit
a = array.array(self.typecode, [lower])
a[0] = lower
# should overflow assigning less than lower limit
self.assertRaises(OverflowError, array.array, self.typecode, [lower-1])
self.assertRaises(OverflowError, a.__setitem__, 0, lower-1)
# should not overflow assigning upper limit
a = array.array(self.typecode, [upper])
a[0] = upper
# should overflow assigning more than upper limit
self.assertRaises(OverflowError, array.array, self.typecode, [upper+1])
self.assertRaises(OverflowError, a.__setitem__, 0, upper+1)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_subclassing(self):
typecode = self.typecode
class ExaggeratingArray(array.array):
__slots__ = ['offset']
def __new__(cls, typecode, data, offset):
return array.array.__new__(cls, typecode, data)
def __init__(self, typecode, data, offset):
self.offset = offset
def __getitem__(self, i):
return array.array.__getitem__(self, i) + self.offset
a = ExaggeratingArray(self.typecode, [3, 6, 7, 11], 4)
self.assertEntryEqual(a[0], 7)
self.assertRaises(AttributeError, setattr, a, "color", "blue")
def test_frombytearray(self):
a = array.array('b', range(10))
b = array.array(self.typecode, a)
self.assertEqual(a, b)
class IntegerNumberTest(NumberTest):
def test_type_error(self):
a = array.array(self.typecode)
a.append(42)
with self.assertRaises(TypeError):
a.append(42.0)
with self.assertRaises(TypeError):
a[0] = 42.0
class Intable:
def __init__(self, num):
self._num = num
def __index__(self):
return self._num
def __int__(self):
return self._num
def __sub__(self, other):
return Intable(int(self) - int(other))
def __add__(self, other):
return Intable(int(self) + int(other))
class SignedNumberTest(IntegerNumberTest):
example = [-1, 0, 1, 42, 0x7f]
smallerexample = [-1, 0, 1, 42, 0x7e]
biggerexample = [-1, 0, 1, 43, 0x7f]
outside = 23
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_overflow(self):
a = array.array(self.typecode)
lower = -1 * int(pow(2, a.itemsize * 8 - 1))
upper = int(pow(2, a.itemsize * 8 - 1)) - 1
self.check_overflow(lower, upper)
self.check_overflow(Intable(lower), Intable(upper))
class UnsignedNumberTest(IntegerNumberTest):
example = [0, 1, 17, 23, 42, 0xff]
smallerexample = [0, 1, 17, 23, 42, 0xfe]
biggerexample = [0, 1, 17, 23, 43, 0xff]
outside = 0xaa
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_overflow(self):
a = array.array(self.typecode)
lower = 0
upper = int(pow(2, a.itemsize * 8)) - 1
self.check_overflow(lower, upper)
self.check_overflow(Intable(lower), Intable(upper))
def test_bytes_extend(self):
s = bytes(self.example)
a = array.array(self.typecode, self.example)
a.extend(s)
self.assertEqual(
a,
array.array(self.typecode, self.example+self.example)
)
a = array.array(self.typecode, self.example)
a.extend(bytearray(reversed(s)))
self.assertEqual(
a,
array.array(self.typecode, self.example+self.example[::-1])
)
class ByteTest(SignedNumberTest, unittest.TestCase):
typecode = 'b'
minitemsize = 1
class UnsignedByteTest(UnsignedNumberTest, unittest.TestCase):
typecode = 'B'
minitemsize = 1
class ShortTest(SignedNumberTest, unittest.TestCase):
typecode = 'h'
minitemsize = 2
class UnsignedShortTest(UnsignedNumberTest, unittest.TestCase):
typecode = 'H'
minitemsize = 2
class IntTest(SignedNumberTest, unittest.TestCase):
typecode = 'i'
minitemsize = 2
class UnsignedIntTest(UnsignedNumberTest, unittest.TestCase):
typecode = 'I'
minitemsize = 2
class LongTest(SignedNumberTest, unittest.TestCase):
typecode = 'l'
minitemsize = 4
class UnsignedLongTest(UnsignedNumberTest, unittest.TestCase):
typecode = 'L'
minitemsize = 4
class LongLongTest(SignedNumberTest, unittest.TestCase):
typecode = 'q'
minitemsize = 8
class UnsignedLongLongTest(UnsignedNumberTest, unittest.TestCase):
typecode = 'Q'
minitemsize = 8
class FPTest(NumberTest):
example = [-42.0, 0, 42, 1e5, -1e10]
smallerexample = [-42.0, 0, 42, 1e5, -2e10]
biggerexample = [-42.0, 0, 42, 1e5, 1e10]
outside = 23
def assertEntryEqual(self, entry1, entry2):
self.assertAlmostEqual(entry1, entry2)
def test_nan(self):
a = array.array(self.typecode, [float('nan')])
b = array.array(self.typecode, [float('nan')])
self.assertIs(a != b, True)
self.assertIs(a == b, False)
self.assertIs(a > b, False)
self.assertIs(a >= b, False)
self.assertIs(a < b, False)
self.assertIs(a <= b, False)
def test_byteswap(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.byteswap, 42)
if a.itemsize in (1, 2, 4, 8):
b = array.array(self.typecode, self.example)
b.byteswap()
if a.itemsize==1:
self.assertEqual(a, b)
else:
# On alphas treating the byte swapped bit patters as
# floats/doubles results in floating point exceptions
# => compare the 8bit string values instead
self.assertNotEqual(a.tobytes(), b.tobytes())
b.byteswap()
self.assertEqual(a, b)
class FloatTest(FPTest, unittest.TestCase):
typecode = 'f'
minitemsize = 4
class DoubleTest(FPTest, unittest.TestCase):
typecode = 'd'
minitemsize = 8
@unittest.skip("TODO: RUSTPYTHON, thread 'main' panicked at 'capacity overflow'")
def test_alloc_overflow(self):
from sys import maxsize
a = array.array('d', [-1]*65536)
try:
a *= maxsize//65536 + 1
except MemoryError:
pass
else:
self.fail("Array of size > maxsize created - MemoryError expected")
b = array.array('d', [ 2.71828183, 3.14159265, -1])
try:
b * (maxsize//3 + 1)
except MemoryError:
pass
else:
self.fail("Array of size > maxsize created - MemoryError expected")
class LargeArrayTest(unittest.TestCase):
typecode = 'b'
def example(self, size):
# We assess a base memuse of <=2.125 for constructing this array
base = array.array(self.typecode, [0, 1, 2, 3, 4, 5, 6, 7]) * (size // 8)
base += array.array(self.typecode, [99]*(size % 8) + [8, 9, 10, 11])
return base
@support.bigmemtest(_2G, memuse=2.125)
def test_example_data(self, size):
example = self.example(size)
self.assertEqual(len(example), size+4)
@support.bigmemtest(_2G, memuse=2.125)
def test_access(self, size):
example = self.example(size)
self.assertEqual(example[0], 0)
self.assertEqual(example[-(size+4)], 0)
self.assertEqual(example[size], 8)
self.assertEqual(example[-4], 8)
self.assertEqual(example[size+3], 11)
self.assertEqual(example[-1], 11)
@support.bigmemtest(_2G, memuse=2.125+1)
def test_slice(self, size):
example = self.example(size)
self.assertEqual(list(example[:4]), [0, 1, 2, 3])
self.assertEqual(list(example[-4:]), [8, 9, 10, 11])
part = example[1:-1]
self.assertEqual(len(part), size+2)
self.assertEqual(part[0], 1)
self.assertEqual(part[-1], 10)
del part
part = example[::2]
self.assertEqual(len(part), (size+5)//2)
self.assertEqual(list(part[:4]), [0, 2, 4, 6])
if size % 2:
self.assertEqual(list(part[-2:]), [9, 11])
else:
self.assertEqual(list(part[-2:]), [8, 10])
@support.bigmemtest(_2G, memuse=2.125)
def test_count(self, size):
example = self.example(size)
self.assertEqual(example.count(0), size//8)
self.assertEqual(example.count(11), 1)
@support.bigmemtest(_2G, memuse=2.125)
def test_append(self, size):
example = self.example(size)
example.append(12)
self.assertEqual(example[-1], 12)
@support.bigmemtest(_2G, memuse=2.125)
def test_extend(self, size):
example = self.example(size)
example.extend(iter([12, 13, 14, 15]))
self.assertEqual(len(example), size+8)
self.assertEqual(list(example[-8:]), [8, 9, 10, 11, 12, 13, 14, 15])
@support.bigmemtest(_2G, memuse=2.125)
def test_frombytes(self, size):
example = self.example(size)
example.frombytes(b'abcd')
self.assertEqual(len(example), size+8)
self.assertEqual(list(example[-8:]), [8, 9, 10, 11] + list(b'abcd'))
@support.bigmemtest(_2G, memuse=2.125)
def test_fromlist(self, size):
example = self.example(size)
example.fromlist([12, 13, 14, 15])
self.assertEqual(len(example), size+8)
self.assertEqual(list(example[-8:]), [8, 9, 10, 11, 12, 13, 14, 15])
@support.bigmemtest(_2G, memuse=2.125)
def test_index(self, size):
example = self.example(size)
self.assertEqual(example.index(0), 0)
self.assertEqual(example.index(1), 1)
self.assertEqual(example.index(7), 7)
self.assertEqual(example.index(11), size+3)
@support.bigmemtest(_2G, memuse=2.125)
def test_insert(self, size):
example = self.example(size)
example.insert(0, 12)
example.insert(10, 13)
example.insert(size+1, 14)
self.assertEqual(len(example), size+7)
self.assertEqual(example[0], 12)
self.assertEqual(example[10], 13)
self.assertEqual(example[size+1], 14)
@support.bigmemtest(_2G, memuse=2.125)
def test_pop(self, size):
example = self.example(size)
self.assertEqual(example.pop(0), 0)
self.assertEqual(example[0], 1)
self.assertEqual(example.pop(size+1), 10)
self.assertEqual(example[size+1], 11)
self.assertEqual(example.pop(1), 2)
self.assertEqual(example[1], 3)
self.assertEqual(len(example), size+1)
self.assertEqual(example.pop(), 11)
self.assertEqual(len(example), size)
@support.bigmemtest(_2G, memuse=2.125)
def test_remove(self, size):
example = self.example(size)
example.remove(0)
self.assertEqual(len(example), size+3)
self.assertEqual(example[0], 1)
example.remove(10)
self.assertEqual(len(example), size+2)
self.assertEqual(example[size], 9)
self.assertEqual(example[size+1], 11)
@support.bigmemtest(_2G, memuse=2.125)
def test_reverse(self, size):
example = self.example(size)
example.reverse()
self.assertEqual(len(example), size+4)
self.assertEqual(example[0], 11)
self.assertEqual(example[3], 8)
self.assertEqual(example[-1], 0)
example.reverse()
self.assertEqual(len(example), size+4)
self.assertEqual(list(example[:4]), [0, 1, 2, 3])
self.assertEqual(list(example[-4:]), [8, 9, 10, 11])
# list takes about 9 bytes per element
@support.bigmemtest(_2G, memuse=2.125+9)
def test_tolist(self, size):
example = self.example(size)
ls = example.tolist()
self.assertEqual(len(ls), len(example))
self.assertEqual(ls[:8], list(example[:8]))
self.assertEqual(ls[-8:], list(example[-8:]))
if __name__ == "__main__":
unittest.main()
|
py | 1a52b3cccbf27e4b7388281826775fa4eedb4f8d | from base import BaseTrain
from tqdm import tqdm
import numpy as np
class TemplateTrainer(BaseTrain):
def __init__(self,name,sess,model,data,epochs,iter_per_epoch,batch_size):
super(TemplateTrainer,self).__init__(sess,model,data,epochs,iter_per_epoch,batch_size)
self.save_train_details(name)
def train(self):
pass
def train_epoch(self):
pass
def train_step(self):
pass
|
py | 1a52b71a8c1abc73da1d85d5301a90b799e72632 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['MachineGroupArgs', 'MachineGroup']
@pulumi.input_type
class MachineGroupArgs:
def __init__(__self__, *,
display_name: pulumi.Input[str],
kind: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
workspace_name: pulumi.Input[str],
count: Optional[pulumi.Input[int]] = None,
etag: Optional[pulumi.Input[str]] = None,
group_type: Optional[pulumi.Input[Union[str, 'MachineGroupType']]] = None,
machine_group_name: Optional[pulumi.Input[str]] = None,
machines: Optional[pulumi.Input[Sequence[pulumi.Input['MachineReferenceWithHintsArgs']]]] = None):
"""
The set of arguments for constructing a MachineGroup resource.
:param pulumi.Input[str] display_name: User defined name for the group
:param pulumi.Input[str] kind: Additional resource type qualifier.
Expected value is 'machineGroup'.
:param pulumi.Input[str] resource_group_name: Resource group name within the specified subscriptionId.
:param pulumi.Input[str] workspace_name: OMS workspace containing the resources of interest.
:param pulumi.Input[int] count: Count of machines in this group. The value of count may be bigger than the number of machines in case of the group has been truncated due to exceeding the max number of machines a group can handle.
:param pulumi.Input[str] etag: Resource ETAG.
:param pulumi.Input[Union[str, 'MachineGroupType']] group_type: Type of the machine group
:param pulumi.Input[str] machine_group_name: Machine Group resource name.
:param pulumi.Input[Sequence[pulumi.Input['MachineReferenceWithHintsArgs']]] machines: References of the machines in this group. The hints within each reference do not represent the current value of the corresponding fields. They are a snapshot created during the last time the machine group was updated.
"""
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "kind", 'machineGroup')
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "workspace_name", workspace_name)
if count is not None:
pulumi.set(__self__, "count", count)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if group_type is not None:
pulumi.set(__self__, "group_type", group_type)
if machine_group_name is not None:
pulumi.set(__self__, "machine_group_name", machine_group_name)
if machines is not None:
pulumi.set(__self__, "machines", machines)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Input[str]:
"""
User defined name for the group
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: pulumi.Input[str]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[str]:
"""
Additional resource type qualifier.
Expected value is 'machineGroup'.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[str]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Resource group name within the specified subscriptionId.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> pulumi.Input[str]:
"""
OMS workspace containing the resources of interest.
"""
return pulumi.get(self, "workspace_name")
@workspace_name.setter
def workspace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "workspace_name", value)
@property
@pulumi.getter
def count(self) -> Optional[pulumi.Input[int]]:
"""
Count of machines in this group. The value of count may be bigger than the number of machines in case of the group has been truncated due to exceeding the max number of machines a group can handle.
"""
return pulumi.get(self, "count")
@count.setter
def count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "count", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Resource ETAG.
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter(name="groupType")
def group_type(self) -> Optional[pulumi.Input[Union[str, 'MachineGroupType']]]:
"""
Type of the machine group
"""
return pulumi.get(self, "group_type")
@group_type.setter
def group_type(self, value: Optional[pulumi.Input[Union[str, 'MachineGroupType']]]):
pulumi.set(self, "group_type", value)
@property
@pulumi.getter(name="machineGroupName")
def machine_group_name(self) -> Optional[pulumi.Input[str]]:
"""
Machine Group resource name.
"""
return pulumi.get(self, "machine_group_name")
@machine_group_name.setter
def machine_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "machine_group_name", value)
@property
@pulumi.getter
def machines(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MachineReferenceWithHintsArgs']]]]:
"""
References of the machines in this group. The hints within each reference do not represent the current value of the corresponding fields. They are a snapshot created during the last time the machine group was updated.
"""
return pulumi.get(self, "machines")
@machines.setter
def machines(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MachineReferenceWithHintsArgs']]]]):
pulumi.set(self, "machines", value)
class MachineGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
count: Optional[pulumi.Input[int]] = None,
display_name: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
group_type: Optional[pulumi.Input[Union[str, 'MachineGroupType']]] = None,
kind: Optional[pulumi.Input[str]] = None,
machine_group_name: Optional[pulumi.Input[str]] = None,
machines: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MachineReferenceWithHintsArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A user-defined logical grouping of machines.
API Version: 2015-11-01-preview.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] count: Count of machines in this group. The value of count may be bigger than the number of machines in case of the group has been truncated due to exceeding the max number of machines a group can handle.
:param pulumi.Input[str] display_name: User defined name for the group
:param pulumi.Input[str] etag: Resource ETAG.
:param pulumi.Input[Union[str, 'MachineGroupType']] group_type: Type of the machine group
:param pulumi.Input[str] kind: Additional resource type qualifier.
Expected value is 'machineGroup'.
:param pulumi.Input[str] machine_group_name: Machine Group resource name.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MachineReferenceWithHintsArgs']]]] machines: References of the machines in this group. The hints within each reference do not represent the current value of the corresponding fields. They are a snapshot created during the last time the machine group was updated.
:param pulumi.Input[str] resource_group_name: Resource group name within the specified subscriptionId.
:param pulumi.Input[str] workspace_name: OMS workspace containing the resources of interest.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: MachineGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A user-defined logical grouping of machines.
API Version: 2015-11-01-preview.
:param str resource_name: The name of the resource.
:param MachineGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(MachineGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
count: Optional[pulumi.Input[int]] = None,
display_name: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
group_type: Optional[pulumi.Input[Union[str, 'MachineGroupType']]] = None,
kind: Optional[pulumi.Input[str]] = None,
machine_group_name: Optional[pulumi.Input[str]] = None,
machines: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MachineReferenceWithHintsArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = MachineGroupArgs.__new__(MachineGroupArgs)
__props__.__dict__["count"] = count
if display_name is None and not opts.urn:
raise TypeError("Missing required property 'display_name'")
__props__.__dict__["display_name"] = display_name
__props__.__dict__["etag"] = etag
__props__.__dict__["group_type"] = group_type
if kind is None and not opts.urn:
raise TypeError("Missing required property 'kind'")
__props__.__dict__["kind"] = 'machineGroup'
__props__.__dict__["machine_group_name"] = machine_group_name
__props__.__dict__["machines"] = machines
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__.__dict__["workspace_name"] = workspace_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:operationalinsights:MachineGroup"), pulumi.Alias(type_="azure-native:operationalinsights/v20151101preview:MachineGroup"), pulumi.Alias(type_="azure-nextgen:operationalinsights/v20151101preview:MachineGroup")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(MachineGroup, __self__).__init__(
'azure-native:operationalinsights:MachineGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'MachineGroup':
"""
Get an existing MachineGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = MachineGroupArgs.__new__(MachineGroupArgs)
__props__.__dict__["count"] = None
__props__.__dict__["display_name"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["group_type"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["machines"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
return MachineGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def count(self) -> pulumi.Output[Optional[int]]:
"""
Count of machines in this group. The value of count may be bigger than the number of machines in case of the group has been truncated due to exceeding the max number of machines a group can handle.
"""
return pulumi.get(self, "count")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
User defined name for the group
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
Resource ETAG.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="groupType")
def group_type(self) -> pulumi.Output[Optional[str]]:
"""
Type of the machine group
"""
return pulumi.get(self, "group_type")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Additional resource type qualifier.
Expected value is 'machineGroup'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def machines(self) -> pulumi.Output[Optional[Sequence['outputs.MachineReferenceWithHintsResponse']]]:
"""
References of the machines in this group. The hints within each reference do not represent the current value of the corresponding fields. They are a snapshot created during the last time the machine group was updated.
"""
return pulumi.get(self, "machines")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
|
py | 1a52b8c93c7104c11edfc060aed568255823ca5a | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-24 19:33
from __future__ import unicode_literals
import logging
from django.db import migrations
from osf.utils.migrations import ensure_schemas, remove_schemas
logger = logging.getLogger(__file__)
class Migration(migrations.Migration):
dependencies = [
('osf', '0076_action_rename'),
]
operations = [
migrations.RunPython(ensure_schemas, remove_schemas),
]
|
py | 1a52b8f9bbac1c068730f6c038e53b4276a0356a | from abjad import Duration
from abjad import show
from SegmentMaker import SegmentMaker
durations = [Duration(4, 4)] * 4
segment_maker = SegmentMaker(durations)
if __name__ == '__main__':
show(segment_maker)
|
py | 1a52bb4533c616e4422afa007fb2627b105b55e9 | """
Utilities for input/output operations.
It is important to have DFS as global variable in this class to take advantatges of singeltons
Info: https://python-3-patterns-idioms-test.readthedocs.io/en/latest/Singleton.html
All pages can import this file and retrive data by:
> from data_loader import DFS
> df_xx = DFS[xx] # xx is the name of the dataframe
"""
import io
import dropbox
import pandas as pd
import oyaml as yaml
import constants as c
import utilities as u
DBX = dropbox.Dropbox(u.get_secret(c.io.VAR_DROPBOX_TOKEN))
DFS = {}
YML = {}
def get_config():
""" retrives config yaml as ordered dict """
_, res = DBX.files_download(c.io.FILE_CONFIG)
return yaml.load(io.BytesIO(res.content), Loader=yaml.SafeLoader)
def get_money_lover_filename():
""" gets the name of the money lover excel file """
names = []
# Explore all files and save all that are valid
for x in DBX.files_list_folder(c.io.PATH_MONEY_LOVER).entries:
try:
# Try to parse date, if possible if a money lover file
pd.to_datetime(x.name.split(".")[0])
names.append(x.name)
except (TypeError, ValueError):
pass
return max(names)
def get_df_transactions():
"""
Retrives the df with transactions. It will read the newest money lover excel file
Returns:
raw dataframe with transactions
"""
_, res = DBX.files_download(c.io.FILE_TRANSACTIONS)
return pd.read_excel(io.BytesIO(res.content), index_col=0)
def get_data_without_transactions():
"""
Retrives all dataframes from data.xlsx file
Returns:
dict with raw dataframes from data.xlsx file
"""
_, res = DBX.files_download(c.io.FILE_DATA)
dfs = {x: pd.read_excel(io.BytesIO(res.content), sheet_name=x) for x in c.dfs.ALL_FROM_DATA}
return dfs
def sync():
""" Retrives all dataframes and update DFS global var """
DFS.update(get_data_without_transactions())
DFS[c.dfs.TRANS] = get_df_transactions()
YML.update(get_config())
# Do one sync when it is imported!
sync()
|
py | 1a52bc53fc7185e885a928b95bc87efc445e2bc9 | import sys
import hashlib
if len(sys.argv) <= 1:
print("Provide string argument")
exit(-1)
else:
s = sys.argv[1]
result = ""
for i in s:
result = result + hashlib.sha256(i.encode("utf-8")).hexdigest() + "\n"
print(result)
|
py | 1a52bd4d18e4b5021cf5f3f226099013bd35358f | import pycuda.driver as cuda
import pycuda.autoinit
from pycuda.compiler import SourceModule
import numpy
a = numpy.random.randn(4,4)
a = a.astype(numpy.int32)
a_gpu = cuda.mem_alloc(a.nbytes)
cuda.memcpy_htod(a_gpu, a)
mod = SourceModule("""
#include <stdio.h>
__global__ void doublify(int *a) {
int idx = threadIdx.x + threadIdx.y*4; a[idx] = 1;
a[threadIdx.x][threadIdx.y] = 1;
printf("( threadIdx.x , %i ) | ( threadIdx.y , %i ) | ( idx , %i ) \\n" , threadIdx.x , threadIdx.y , idx);
} """)
func = mod.get_function("doublify")
func(a_gpu, block=(4,4,1))
a_doubled = numpy.empty_like(a)
cuda.memcpy_dtoh(a_doubled, a_gpu)
print '\n'
print a_doubled
print a
|
py | 1a52bda1bfe92913a0251b27036cad2bb03a3602 | import configparser
import json
def write_config_to_file(config_dict, ini_fpath):
""" Writes a configuration to an ini file.
:param config_dict: (Dict) config to write
:param ini_fpath: (str) fpath to ini file
:return: (str) ini_file written to
"""
config = configparser.ConfigParser()
config["DEFAULT"] = {key: json.dumps(value) for key, value in config_dict.items()}
with open(ini_fpath, "w") as ini:
config.write(ini)
return ini_fpath
def read_config_from_file(ini_fpath):
"""
Reads a config file
:param ini_fpath:
:return: a dictionary of config parameters
"""
config = configparser.ConfigParser()
config.read(ini_fpath)
result = {}
for key in config["DEFAULT"]:
result[key] = json.loads(config["DEFAULT"][key])
return result
|
py | 1a52bea0f55716d8ea222bd7459097390b3349e3 | """show_l2vpn.py
show l2vpn parser class
"""
import re
from netaddr import EUI
from ipaddress import ip_address
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Any
from genie.libs.parser.base import *
class ShowL2vpnMacLearning(MetaParser):
"""Parser for show l2vpn mac-learning <mac_type> all location <location>"""
# TODO schema
def __init__(self, mac_type='mac', location='local', **kwargs):
self.location = location
self.mac_type = mac_type
super().__init__(**kwargs)
cli_command = 'show l2vpn mac-learning {mac_type} all location {location}'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command.format(
mac_type=self.mac_type,
location=self.location))
else:
out = output
result = {
'entries': [],
}
for line in out.splitlines():
line = line.rstrip()
# Topo ID Producer Next Hop(s) Mac Address IP Address
# ------- -------- ----------- -------------- ----------
# 1 0/0/CPU0 BE1.7 7777.7777.0002
# 0 0/0/CPU0 BV1 fc00.0001.0006 192.0.3.3
m = re.match(r'^(?P<topo_id>\d+)'
r' +(?P<producer>\S+)'
r' +(?:none|(?P<next_hop>\S+))'
r' +(?P<mac>[A-Za-z0-9]+\.[A-Za-z0-9]+\.[A-Za-z0-9]+)'
r'(?: +(?P<ip_address>\d+\.\d+\.\d+\.\d+|[A-Za-z0-9:]+))?$', line)
if m:
entry = {
'topo_id': eval(m.group('topo_id')),
'producer': m.group('producer'),
'next_hop': m.group('next_hop'),
'mac': EUI(m.group('mac')),
'ip_address': m.group('ip_address') \
and ip_address(m.group('ip_address')),
}
result['entries'].append(entry)
continue
return result
class ShowL2vpnForwardingBridgeDomainMacAddress(MetaParser):
"""Parser for:
show l2vpn forwarding bridge-domain mac-address location <location>
show l2vpn forwarding bridge-domain <bridge_domain> mac-address location <location>
"""
# TODO schema
def __init__(self,location=None,bridge_domain=None,**kwargs) :
assert location is not None
self.location = location
self.bridge_domain = bridge_domain
super().__init__(**kwargs)
cli_command = ['show l2vpn forwarding bridge-domain mac-address location {location}', \
'show l2vpn forwarding bridge-domain {bridge_domain} mac-address location {location}']
def cli(self,output=None):
if output is None:
if self.bridge_domain is None:
cmd = self.cli_command[0].format(location=self.location)
else:
cmd = self.cli_command[1].format(bridge_domain=self.bridge_domain,location=self.location)
out = self.device.execute(cmd)
else:
out = output
result = {
'entries' : []
}
## Sample Output
# To Resynchronize MAC table from the Network Processors, use the command...
# l2vpn resynchronize forwarding mac-address-table location <r/s/i>
#
# Mac Address Type Learned from/Filtered on LC learned Resync Age/Last Change Mapped to
# -------------- ------- --------------------------- ---------- ---------------------- --------------
# 0021.0001.0001 EVPN BD id: 0 N/A N/A N/A
# 0021.0001.0003 EVPN BD id: 0 N/A N/A N/A
# 0021.0001.0004 EVPN BD id: 0 N/A N/A N/A
# 0021.0001.0005 EVPN BD id: 0 N/A N/A N/A
# 1234.0001.0001 EVPN BD id: 0 N/A N/A N/A
# 1234.0001.0002 EVPN BD id: 0 N/A N/A N/A
# 1234.0001.0003 EVPN BD id: 0 N/A N/A N/A
# 1234.0001.0004 EVPN BD id: 0 N/A N/A N/A
# 0021.0001.0002 dynamic (40.40.40.40, 10007) N/A 14 Mar 12:46:04 N/A
# 1234.0001.0005 static (40.40.40.40, 10007) N/A N/A N/A
# 0021.0002.0005 dynamic BE1.2 N/A 14 Mar 12:46:04 N/A
# 1234.0002.0004 static BE1.2 N/A N/A N/A
title_found = False
header_processed = False
field_indice = []
def _retrieve_fields(line,field_indice):
res = []
for idx,(start,end) in enumerate(field_indice):
if idx == len(field_indice) - 1:
res.append(line[start:].strip())
else:
res.append(line[start:end].strip())
return res
lines = out.splitlines()
for idx,line in enumerate(lines):
if idx == len(lines) - 1:
break
line = line.rstrip()
if not header_processed:
# 1. check proper title header exist
if re.match(r"^Mac Address\s+Type\s+Learned from/Filtered on\s+LC learned\s+Resync Age/Last Change\s+Mapped to",line):
title_found = True
continue
# 2. get dash header line
if title_found and re.match(r"^(-+)( +)(-+)( +)(-+)( +)(-+)( +)(-+)( +)(-+)",line):
match = re.match(r"^(-+)( +)(-+)( +)(-+)( +)(-+)( +)(-+)( +)(-+)",line)
start = 0
for field in match.groups():
if '-' in field:
end = start + len(field)
field_indice.append((start,end))
start = end
else:
start += len(field)
end += len(field)
header_processed = True
continue
else:
mac,mac_type,learned_from,lc_learned,resync_age,mapped_to = _retrieve_fields(line,field_indice)
result['entries'].append({
'mac' : mac,
'mac_type' : mac_type,
'learned_from' : learned_from,
'lc_learned' : lc_learned,
'resync_age' : resync_age,
'mapped_to' : mapped_to,
})
return result
class ShowL2vpnForwardingProtectionMainInterface(MetaParser):
"""Parser for show l2vpn forwarding protection main-interface location <location>"""
# TODO schema
def __init__(self,location=None,**kwargs):
assert location is not None
self.location = location
super().__init__(**kwargs)
cli_command = 'show l2vpn forwarding protection main-interface location {location}'
def cli(self,output=None):
if output is None:
out = self.device.execute(self.cli_command.format(location=self.location))
else:
out = output
result = {
'entries' : []
}
## Sample Output
# Main Interface ID Instance State
# -------------------------------- ---------- ------------
# VFI:ves-vfi-1 0 FORWARDING
# VFI:ves-vfi-1 1 BLOCKED
# VFI:ves-vfi-2 0 FORWARDING
# VFI:ves-vfi-2 1 FORWARDING
# VFI:ves-vfi-3 0 FORWARDING
# VFI:ves-vfi-3 1 BLOCKED
# VFI:ves-vfi-4 0 FORWARDING
# VFI:ves-vfi-4 1 FORWARDING
# PW:40.40.40.40,10001 0 FORWARDING
# PW:40.40.40.40,10001 1 BLOCKED
# PW:40.40.40.40,10007 0 FORWARDING
# PW:40.40.40.40,10007 1 FORWARDING
# PW:40.40.40.40,10011 0 FORWARDING
# PW:40.40.40.40,10011 1 FORWARDING
# PW:40.40.40.40,10017 0 FORWARDING
title_found = False
header_processed = False
field_indice = []
def _retrieve_fields(line,field_indice):
res = []
for idx,(start,end) in enumerate(field_indice):
if idx == len(field_indice) - 1:
res.append(line[start:].strip())
else:
res.append(line[start:end].strip())
return res
lines = out.splitlines()
for idx,line in enumerate(lines):
if idx == len(lines) - 1:
break
line = line.rstrip()
if not header_processed:
# 1. check proper title header exist
if re.match(r"^Main Interface ID\s+Instance\s+State",line):
title_found = True
continue
# 2. get dash header line
if title_found and re.match(r"^(-+)( +)(-+)( +)(-+)",line):
match = re.match(r"^(-+)( +)(-+)( +)(-+)",line)
start = 0
for field in match.groups():
if '-' in field:
end = start + len(field)
field_indice.append((start,end))
start = end
else:
start += len(field)
end += len(field)
header_processed = True
continue
else:
interface,instance_id,state = _retrieve_fields(line,field_indice)
result['entries'].append({
'interface' : interface,
'instance_id' : instance_id,
'state' : state,
})
return result
# vim: ft=python ts=8 sw=4 et
|
py | 1a52c02817bc6c29f7fa5b0d5b62ebe824397a31 | # ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: radial_resample.py
#
# Tests: mesh - 2D rectilinear, single domain,
# 3D rectilinear, single domain
# 3D unstructured, multiple domain
# plots - pseudocolor
#
# Defect ID: 1827
#
# Programmer: Kevin Griffin
# Date: Tue Jun 3 11:00:41 EST 2014
#
# Modifications:
#
# ----------------------------------------------------------------------------
# 2D, Rectilinear
ds = silo_data_path("rect2d.silo")
OpenDatabase(ds)
# clean-up 1's
AddPlot("Mesh", "quadmesh2d", 1, 1)
AddOperator("RadialResample")
RadialResampleAttrs = RadialResampleAttributes()
RadialResampleAttrs.isFast = 0
RadialResampleAttrs.minTheta = -45
RadialResampleAttrs.maxTheta = 90
RadialResampleAttrs.deltaTheta = 5
RadialResampleAttrs.radius = 0.5
RadialResampleAttrs.deltaRadius = 0.05
RadialResampleAttrs.center = (0.5, 0.5, 0.5)
RadialResampleAttrs.is3D = 0
SetOperatorOptions(RadialResampleAttrs)
AddPlot("Pseudocolor", "t", 1, 1)
DrawPlots()
Test("ops_radialresampleop_rect2d")
DeleteAllPlots()
CloseDatabase(ds)
#3D, Rectilinear
ds = silo_data_path("rect3d.silo")
OpenDatabase(ds)
AddPlot("Mesh", "quadmesh3d", 1, 1)
AddOperator("RadialResample")
RadialResampleAtts = RadialResampleAttributes()
RadialResampleAtts.isFast = 0
RadialResampleAtts.minTheta = -90
RadialResampleAtts.maxTheta = 90
RadialResampleAtts.deltaTheta = 5
RadialResampleAtts.radius = 0.5
RadialResampleAtts.deltaRadius = 0.05
RadialResampleAtts.center = (0.5, 0.5, 0.5)
RadialResampleAtts.is3D = 1
RadialResampleAtts.minAzimuth = 0
RadialResampleAtts.maxAzimuth = 360
RadialResampleAtts.deltaAzimuth = 5
SetOperatorOptions(RadialResampleAtts, 1)
AddPlot("Pseudocolor", "w", 1, 1)
DrawPlots()
Test("ops_radialresampleop_rect3d")
DeleteAllPlots()
CloseDatabase(ds)
#2D, Rectilinear, Multiple Domains
ds = silo_data_path("multi_rect2d.silo")
OpenDatabase(ds)
AddPlot("Mesh", "mesh1", 1, 1)
AddOperator("RadialResample", 1)
RadialResampleAtts = RadialResampleAttributes()
RadialResampleAtts.isFast = 0
RadialResampleAtts.minTheta = 0
RadialResampleAtts.maxTheta = 360
RadialResampleAtts.deltaTheta = 5
RadialResampleAtts.radius = 1
RadialResampleAtts.deltaRadius = 0.05
RadialResampleAtts.center = (0.3, 0, 0)
RadialResampleAtts.is3D = 0
RadialResampleAtts.minAzimuth = 0
RadialResampleAtts.maxAzimuth = 180
RadialResampleAtts.deltaAzimuth = 5
SetOperatorOptions(RadialResampleAtts, 1)
AddPlot("Pseudocolor", "vec_magnitude", 1, 1)
DrawPlots()
Test("ops_radialresampleop_multi_rect2d")
DeleteAllPlots()
CloseDatabase(ds)
# 3D, Rectilinear, Multiple Domains
ds = silo_data_path("multi_rect3d.silo")
OpenDatabase(ds)
AddPlot("Mesh", "mesh1", 1, 1)
AddOperator("RadialResample", 1)
RadialResampleAtts = RadialResampleAttributes()
RadialResampleAtts.isFast = 0
RadialResampleAtts.minTheta = -90
RadialResampleAtts.maxTheta = 90
RadialResampleAtts.deltaTheta = 5
RadialResampleAtts.radius = 0.5
RadialResampleAtts.deltaRadius = 0.05
RadialResampleAtts.center = (0.5, 0.5, 0.5)
RadialResampleAtts.is3D = 1
RadialResampleAtts.minAzimuth = 0
RadialResampleAtts.maxAzimuth = 360
RadialResampleAtts.deltaAzimuth = 5
SetOperatorOptions(RadialResampleAtts)
AddPlot("Pseudocolor", "w")
DrawPlots()
Test("ops_radialresampleop_multi_rect3d")
DeleteAllPlots()
CloseDatabase(ds)
Exit()
|
py | 1a52c31cf478040f9ef0806e34ab630ad44ee553 | # SPDX-FileCopyrightText: 2020 Bryan Siepert, written for Adafruit Industries
# SPDX-License-Identifier: MIT
from time import sleep
import board
from adafruit_as7341 import AS7341
i2c = board.I2C() # uses board.SCL and board.SDA
sensor = AS7341(i2c)
def bar_graph(read_value):
scaled = int(read_value / 1000)
return "[%5d] " % read_value + (scaled * "*")
while True:
print("F1 - 415nm/Violet %s" % bar_graph(sensor.channel_415nm))
print("F2 - 445nm//Indigo %s" % bar_graph(sensor.channel_445nm))
print("F3 - 480nm//Blue %s" % bar_graph(sensor.channel_480nm))
print("F4 - 515nm//Cyan %s" % bar_graph(sensor.channel_515nm))
print("F5 - 555nm/Green %s" % bar_graph(sensor.channel_555nm))
print("F6 - 590nm/Yellow %s" % bar_graph(sensor.channel_590nm))
print("F7 - 630nm/Orange %s" % bar_graph(sensor.channel_630nm))
print("F8 - 680nm/Red %s" % bar_graph(sensor.channel_680nm))
print("\n------------------------------------------------")
sleep(1)
|
py | 1a52c4037c8d32008c89ffc90a8eec6fe9a4d49b | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union, Optional, Any
import torch
from pytorch_lightning.accelerators.accelerator import Accelerator, ReduceOp
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.distributed.dist import LightningDistributed
class GPUAccelerator(Accelerator):
amp_backend: AMPType
def __init__(self, trainer, cluster_environment=None):
"""
Runs training using a single GPU
Example::
# default
trainer = Trainer(accelerator=GPUAccelerator())
"""
super().__init__(trainer, cluster_environment)
self.dist = LightningDistributed()
self.nickname = None
def setup(self, model):
# call setup
self.trainer.call_setup_hook(model)
torch.cuda.set_device(self.trainer.root_gpu)
model.cuda(self.trainer.root_gpu)
# CHOOSE OPTIMIZER
# allow for lr schedulers as well
self.setup_optimizers(model)
# 16-bit
model = self.trainer.precision_connector.connect(model)
self.trainer.model = model
def train(self):
model = self.trainer.model
# set up training routine
self.trainer.train_loop.setup_training(model)
# train or test
results = self.train_or_test()
return results
def training_step(self, args):
if self.trainer.amp_backend == AMPType.NATIVE:
with torch.cuda.amp.autocast():
output = self.__training_step(args)
else:
output = self.__training_step(args)
return output
def __training_step(self, args):
batch = args[0]
batch = self.to_device(batch)
args[0] = batch
output = self.trainer.model.training_step(*args)
return output
def validation_step(self, args):
if self.trainer.amp_backend == AMPType.NATIVE:
with torch.cuda.amp.autocast():
output = self.__validation_step(args)
else:
output = self.__validation_step(args)
return output
def __validation_step(self, args):
batch = args[0]
batch = self.to_device(batch)
args[0] = batch
output = self.trainer.model.validation_step(*args)
return output
def test_step(self, args):
if self.trainer.amp_backend == AMPType.NATIVE:
with torch.cuda.amp.autocast():
output = self.__test_step(args)
else:
output = self.__test_step(args)
return output
def __test_step(self, args):
batch = args[0]
batch = self.to_device(batch)
args[0] = batch
output = self.trainer.model.test_step(*args)
return output
def to_device(self, batch):
gpu_id = 0
if isinstance(self.trainer.data_parallel_device_ids, list):
gpu_id = self.trainer.data_parallel_device_ids[0]
# Don't copy the batch since there is a single gpu that the batch could
# be referenced from and if there are multiple optimizers the batch will
# wind up copying it to the same device repeatedly.
return self.batch_to_device(batch, gpu_id)
def sync_tensor(self,
tensor: Union[torch.Tensor],
group: Optional[Any] = None,
reduce_op: Optional[Union[ReduceOp, str]] = None) -> torch.Tensor:
return tensor
|
py | 1a52c54a9e40f61a4da6ba4abfb45c9832912f98 | from .parser import parse # noqa: F401
|
py | 1a52c63e2da5c35dca642bcb61af1e1fe0cb276e | # 공공데이터 공영주차장 정보 DB에 저장 (총 14417개)
import json
from math import fsum
from pymongo import MongoClient
client = MongoClient('localhost', 27017)
db = client.get_database('parking_lot')
def get_data():
with open('static/seoul_park_lot.json', encoding='UTF8') as json_file:
result = json.load(json_file)
datas = result["DATA"]
count = 0
for data in datas:
# 필요한 정보 정의(주차장명, 주소, 유/무료 구분, 야간 무료 여부, 기본 요금, 기본 시간, 추가 요금, 주간 시작 시간, 주간 종료 시간, 위도, 경도)
park_id = count
name = data['parking_name']
tel = data['tel']
address = data['addr']
free = data['pay_nm']
night_free = data['night_free_open']
basic_cost = data['rates']
basic_time = data['time_rate']
add_cost = data['add_rates']
wbt = data['weekday_begin_time']
wet = data['weekday_end_time']
# 시간 표기 방식 변경 ex) 1200 -> 12:00
weekday_begin_time = wbt[:2] + ":" + wbt[2:]
weekday_end_time = wet[:2] + ":" + wet[2:]
lat = data['lat']
lng = data['lng']
doc = {
"park_id": park_id,
"Name": name,
"Tel": tel,
"Address": address,
"Free": free,
"Night free": night_free,
"Basic_cost": basic_cost,
"Basic_time": basic_time,
"Add cost": add_cost,
"Weekday begin time": weekday_begin_time,
"Weekday end time": weekday_end_time,
"location": {
"type": 'Point',
"coordinates": [lng, lat] # [경도,위도] 순서
},
}
count += 1
# document 삽입
db.park_info.insert_one(doc)
print(db.park_info.count())
# DB에서 겹치는 이름들은 하나로 모아 평균 위경도로 저장 (921개로 압축)
def remove_dup_name():
db_list = list(db.park_info.find({}, {'_id': False}))
count = db.park_info.count()
names = []
lngs = []
lats = []
# name, lng, lat만 뽑아 리스트에 저장
for i in db_list:
names.append(i["Name"])
lngs.append(i["location"]["coordinates"][0])
lats.append(i["location"]["coordinates"][1])
nll = [[''] * 3 for i in range(len(names))] # nll means name, lng, lat
for i in range(0, count):
nll[i][0] = names[i]
nll[i][1] = lngs[i]
nll[i][2] = lats[i]
# for i in range(2309, 2326):
# print(nll[i][0], nll[i][1], nll[i][2])
temp = 0
for i in range(0, count):
tmp_lng = [nll[i][1]]
tmp_lat = [nll[i][2]]
for j in range(i + 1, count):
if nll[i][0] != nll[j][0]:
continue
elif nll[i][0] == '':
continue
else:
temp += 1
# print(nll[j][0], '이 같네요', j, '번째 삭제합니다')
tmp_lng.append(nll[j][1])
tmp_lat.append(nll[j][2])
# 이름 겹치는 값들은 첫번째 인덱스 빼고 ''로 초기화
nll[j][0] = ''
nll[j][1] = 0
nll[j][2] = 0
mean_lng = round(fsum(tmp_lng) / len(tmp_lng), 8)
mean_lat = round(fsum(tmp_lat) / len(tmp_lat), 8)
nll[i][1] = mean_lng
nll[i][2] = mean_lat
tmp_count = 0
for i in range(0, count):
if nll[i][0] == '':
tmp_count += 1
# print(i, '번째 삭제 완료')
continue
print(i, nll[i][0], nll[i][1], nll[i][2])
print('총', tmp_count, '개 삭제합니다')
print(db.park_lot.count())
for i in range(0, count):
if nll[i][0] == '':
db.park_info.delete_one({'park_id': i})
# print(i, '번째 삭제완료')
print(temp, count, db.park_info.count())
while True:
if (db.park_info.count() == 0):
get_data()
else:
remove_dup_name()
break
|
py | 1a52c7def3066c57df47fd85de4fe652a6cf8617 | """pokerFace URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from app import views as vs
urlpatterns = [
path('app/', vs.index),
path('admin/', admin.site.urls),
path('app/index/', vs.index),
path('app/register/', vs.createUser),
path('app/login/', vs.myLogin),
path('app/logout/', vs.myLogout),
path('app/modify/', vs.modify),
path('app/Camera/', vs.CMR),
path('app/Camera2Server/', vs.CMR2server),
path('app/text2audio/', vs.text2audio),
path('app/rank/', vs.rank),
path('app/history/', vs.history),
path('app/deleteHistory/', vs.delete_history),
path('app/updateHistory/', vs.update_history),
path('app/thumbsUp/', vs.thumbs_up),
]
|
py | 1a52c809a49bde005f0ca07c9b5e8d0a2803f08b | """
Django settings for locallibrary project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
#SECRET_KEY = 'cg#p$g+j9tax!#a3cup@1$8obt2_+&k3q+pmu)5%asj6yjpkag'
import os
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'cg#p$g+j9tax!#a3cup@1$8obt2_+&k3q+pmu)5%asj6yjpkag')
# SECURITY WARNING: don't run with debug turned on in production!
#DEBUG = True
DEBUG = bool( os.environ.get('DJANGO_DEBUG', True) )
#Set hosts to allow any app on Heroku and the local testing URL
ALLOWED_HOSTS = ['.herokuapp.com','127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#Add our new application
'catalog.apps.CatalogConfig', #This object was created for us in /catalog/apps.py
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'locallibrary.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['./templates',],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'locallibrary.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Redirect to home URL after login (Default redirects to /accounts/profile/)
LOGIN_REDIRECT_URL = '/'
# Add to test email:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Heroku: Update database configuration from $DATABASE_URL.
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
# The absolute path to the directory where collectstatic will collect static files for deployment.
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# The URL to use when referring to static files (where they will be served from)
STATIC_URL = '/static/'
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
|
py | 1a52c8cbaff5ae5a506fff4153af0d847488e446 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, Optional, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
from .. import models
from ._configuration import AppPlatformManagementClientConfiguration
from .operations import AppsOperations, BindingsOperations, CertificatesOperations, ConfigServersOperations, CustomDomainsOperations, DeploymentsOperations, MonitoringSettingsOperations, Operations, RuntimeVersionsOperations, ServicesOperations, SkusOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class AppPlatformManagementClient:
"""REST API for Azure Spring Cloud.
:ivar services: ServicesOperations operations
:vartype services: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.ServicesOperations
:ivar config_servers: ConfigServersOperations operations
:vartype config_servers:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.ConfigServersOperations
:ivar monitoring_settings: MonitoringSettingsOperations operations
:vartype monitoring_settings:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.MonitoringSettingsOperations
:ivar apps: AppsOperations operations
:vartype apps: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.AppsOperations
:ivar bindings: BindingsOperations operations
:vartype bindings: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.BindingsOperations
:ivar certificates: CertificatesOperations operations
:vartype certificates:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.CertificatesOperations
:ivar custom_domains: CustomDomainsOperations operations
:vartype custom_domains:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.CustomDomainsOperations
:ivar deployments: DeploymentsOperations operations
:vartype deployments:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.DeploymentsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.Operations
:ivar runtime_versions: RuntimeVersionsOperations operations
:vartype runtime_versions:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.RuntimeVersionsOperations
:ivar skus: SkusOperations operations
:vartype skus: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.SkusOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Gets subscription ID which uniquely identify the Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param base_url: Service URL. Default value is 'https://management.azure.com'.
:type base_url: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = AppPlatformManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.services = ServicesOperations(self._client, self._config, self._serialize, self._deserialize)
self.config_servers = ConfigServersOperations(self._client, self._config, self._serialize, self._deserialize)
self.monitoring_settings = MonitoringSettingsOperations(self._client, self._config, self._serialize, self._deserialize)
self.apps = AppsOperations(self._client, self._config, self._serialize, self._deserialize)
self.bindings = BindingsOperations(self._client, self._config, self._serialize, self._deserialize)
self.certificates = CertificatesOperations(self._client, self._config, self._serialize, self._deserialize)
self.custom_domains = CustomDomainsOperations(self._client, self._config, self._serialize, self._deserialize)
self.deployments = DeploymentsOperations(self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.runtime_versions = RuntimeVersionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.skus = SkusOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "AppPlatformManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
py | 1a52c9e49b601e39010e5d44e139d2415ba7293e | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence
from torchmetrics import SSIM as _SSIM
from pytorch_lightning.metrics.utils import deprecated_metrics, void
class SSIM(_SSIM):
@deprecated_metrics(target=_SSIM)
def __init__(
self,
kernel_size: Sequence[int] = (11, 11),
sigma: Sequence[float] = (1.5, 1.5),
reduction: str = "elementwise_mean",
data_range: Optional[float] = None,
k1: float = 0.01,
k2: float = 0.03,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
):
"""
This implementation refers to :class:`~torchmetrics.SSIM`.
.. deprecated::
Use :class:`~torchmetrics.SSIM`. Will be removed in v1.5.0.
"""
void(kernel_size, sigma, reduction, data_range, k1, k2, compute_on_step, dist_sync_on_step, process_group)
|
py | 1a52ca99ebc0e1925dc98c1f9119212a66b6a62c | import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='sprofiler',
version='0.1.0',
author='Bryan Brzycki',
author_email='[email protected]',
description='Lightweight profiler with checkpoints',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/bbrzycki/sprofiler',
project_urls={
'Source': 'https://github.com/bbrzycki/sprofiler'
},
packages=setuptools.find_packages(),
# include_package_data=True,
install_requires=[
'numpy>=1.18.1',
# 'scipy>=1.4.1',
# 'astropy>=4.0',
# 'blimpy>=2.0.0',
# 'matplotlib>=3.1.3',
# 'tqdm>=4.47.0',
# 'sphinx-rtd-theme>=0.4.3'
],
classifiers=(
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
),
)
|
py | 1a52cacc39710a073808298a78ae8c0a5700f038 | # coding=utf-8
"""Search SoundCloud playlists for audio."""
from __future__ import absolute_import
import os
import string
import sys
import requests
import soundcloud
from tqdm import tqdm
def sanitize(s):
return ''.join(
c for c in s
if c in '-_.() {}{}'.format(string.ascii_letters, string.digits))
if 'SOUNDCLOUD_API_KEY' in os.environ:
API_KEY = os.environ['SOUNDCLOUD_API_KEY']
else:
API_KEY = "81f430860ad96d8170e3bf1639d4e072"
def scrape(query, include, exclude, quiet, overwrite):
"""Search SoundCloud and download audio from discovered playlists."""
# Launch SoundCloud client.
client = soundcloud.Client(client_id=API_KEY)
# Generator for yielding all results pages.
def pagination(x):
yield x
while x.next_href:
x = client.get(x.next_href)
yield x
# Search SoundCloud for playlists.
for playlists in pagination(
client.get('/playlists',
q=query,
tags=','.join(include) if include else '',
linked_partitioning=1,
representation='compact')):
# Download playlists.
for playlist in playlists.collection:
# Skip playlists containing filter terms.
haystack = (playlist.title +
(' ' + playlist.description
if playlist.description else '')).lower()
if any(needle in haystack for needle in exclude):
continue
# Create directory for playlist.
directory = sanitize(playlist.title)
if directory == '':
continue
if not os.path.exists(directory):
os.mkdir(directory)
# Download tracks in playlist.
for track in client.get(playlist.tracks_uri):
file = os.path.join(directory, sanitize(track.title) + '.mp3')
# Skip existing files.
if os.path.exists(file) and not overwrite:
continue
# Skip tracks that are not allowed to be streamed.
if not track.streamable:
continue
# Skip tracks named with filter terms.
haystack = (track.title + ' ' + track.description + ' ' +
track.tag_list).lower()
if any(needle in haystack for needle in exclude):
continue
# Download track.
r = requests.get(client.get(track.stream_url,
allow_redirects=False).location,
stream=True)
total_size = int(r.headers['content-length'])
chunk_size = 1000000 # 1 MB chunks
with open(file, 'wb') as f:
for data in tqdm(
r.iter_content(chunk_size),
desc=track.title,
total=total_size / chunk_size,
unit='MB',
file=sys.stdout):
f.write(data)
|
py | 1a52cc0f0b79b789559eec73efd09a980aa12da5 | from django import forms
from .models import Department, Province, District
class DepartmentForm(forms.Form):
department = forms.ModelChoiceField(
queryset=Department.objects.all()
)
class ProvinceForm(DepartmentForm):
province = forms.ModelChoiceField(
queryset=Province.objects.none()
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.is_bound:
department = self._get_field_value('department')
if department:
self.fields['province'].queryset = Province.objects.filter(
parent=department
)
def _get_field_value(self, name):
field = self.fields[name]
value = field.widget.value_from_datadict(
self.data,
self.files,
self.add_prefix(name)
)
try:
return field.clean(value)
except:
return None
class DistrictForm(ProvinceForm):
district = forms.ModelChoiceField(
queryset=District.objects.none()
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.is_bound:
province = self._get_field_value('province')
if province:
self.fields['district'].queryset = District.objects.filter(
parent=province
)
UbigeoForm = DistrictForm
|
py | 1a52cc29dcf7a6e11a89e597047043ed4fc8eda2 | import unittest
from katas.beta.how_many_stairs_will_suzuki_climb_in_20_years import \
stairs_in_20
class StairsIn20YearsTestCase(unittest.TestCase):
def test_something(self):
self.assertEqual(stairs_in_20([
[[6737, 7244, 5776, 9826, 7057, 9247, 5842, 5484, 6543, 5153,
6832, 8274, 7148, 6152, 5940, 8040, 9174, 7555, 7682, 5252,
8793, 8837, 7320, 8478, 6063, 5751, 9716, 5085, 7315, 7859,
6628, 5425, 6331, 7097, 6249, 8381, 5936, 8496, 6934, 8347,
7036, 6421, 6510, 5821, 8602, 5312, 7836, 8032, 9871, 5990,
6309, 7825]],
[[9175, 7883, 7596, 8635, 9274, 9675, 5603, 6863, 6442, 9500,
7468, 9719, 6648, 8180, 7944, 5190, 6209, 7175, 5984, 9737,
5548, 6803, 9254, 5932, 7360, 9221, 5702, 5252, 7041, 7287,
5185, 9139, 7187, 8855, 9310, 9105, 9769, 9679, 7842, 7466,
7321, 6785, 8770, 8108, 7985, 5186, 9021, 9098, 6099, 5828,
7217, 9387]],
[[8646, 6945, 6364, 9563, 5627, 5068, 9157, 9439, 5681, 8674,
6379, 8292, 7552, 5370, 7579, 9851, 8520, 5881, 7138, 7890,
6016, 5630, 5985, 9758, 8415, 7313, 7761, 9853, 7937, 9268,
7888, 6589, 9366, 9867, 5093, 6684, 8793, 8116, 8493, 5265,
5815, 7191, 9515, 7825, 9508, 6878, 7180, 8756, 5717, 7555,
9447, 7703]],
[[6353, 9605, 5464, 9752, 9915, 7446, 9419, 6520, 7438, 6512,
7102, 5047, 6601, 8303, 9118, 5093, 8463, 7116, 7378, 9738,
9998, 7125, 6445, 6031, 8710, 5182, 9142, 9415, 9710, 7342,
9425, 7927, 9030, 7742, 8394, 9652, 5783, 7698, 9492, 6973,
6531, 7698, 8994, 8058, 6406, 5738, 7500, 8357, 7378, 9598,
5405, 9493]],
[[6149, 6439, 9899, 5897, 8589, 7627, 6348, 9625, 9490, 5502,
5723, 8197, 9866, 6609, 6308, 7163, 9726, 7222, 7549, 6203,
5876, 8836, 6442, 6752, 8695, 8402, 9638, 9925, 5508, 8636,
5226, 9941, 8936, 5047, 6445, 8063, 6083, 7383, 7548, 5066,
7107, 6911, 9302, 5202, 7487, 5593, 8620, 8858, 5360, 6638,
8012, 8701]],
[[5000, 5642, 9143, 7731, 8477, 8000, 7411, 8813, 8288, 5637,
6244, 6589, 6362, 6200, 6781, 8371, 7082, 5348, 8842, 9513,
5896, 6628, 8164, 8473, 5663, 9501, 9177, 8384, 8229, 8781,
9160, 6955, 9407, 7443, 8934, 8072, 8942, 6859, 5617, 5078,
8910, 6732, 9848, 8951, 9407, 6699, 9842, 7455, 8720, 5725,
6960, 5127]],
[[5448, 8041, 6573, 8104, 6208, 5912, 7927, 8909, 7000, 5059,
6412, 6354, 8943, 5460, 9979, 5379, 8501, 6831, 7022, 7575,
5828, 5354, 5115, 9625, 7795, 7003, 5524, 9870, 6591, 8616,
5163, 6656, 8150, 8826, 6875, 5242, 9585, 9649, 9838, 7150,
6567, 8524, 7613, 7809, 5562, 7799, 7179, 5184, 7960, 9455,
5633, 9085]]
]), 54636040)
|
py | 1a52cc48d4c3f560fc750a2e002203be4870bdbc | #####EXERCÍCIOS 70 #######
print('-'*50)
print('{^} LOJA DO BARATÃO')
print('-'*50)
menor = cont = caro = total = 0
barato = 'a'
while True:
produto = str(input('DIGITE O NOME DO PRODUTO : '))
cont += 1
preço = float(input('Digite o valor do produto : '))
total += preço
if preço >=1000:
caro += 1
if cont == 1 or preço < menor:
menor = preço
barato = produto
resp = '1'
while resp not in "SN":
resp = str(input('Quer continuar ? [S/N]')).strip().upper()[0]
if resp == 'N':
break
print('-'*50)
print(f'{caro} Produtos estão acima de 1000 R$ ')
print(f'O total da compra foi de {total:10.2f}')
print(f'O produto mais barato é {barato} e custou {menor:10.2f}')
print('FIM DO PROGRAMA') |
py | 1a52cc945ad09e62098a7924f970901b90551304 | from datetime import date, datetime, timedelta
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import iNaT, period as libperiod
from pandas._libs.tslibs.ccalendar import DAYS, MONTHS
from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime
from pandas._libs.tslibs.parsing import DateParseError
from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG, IncompatibleFrequency
from pandas._libs.tslibs.timezones import dateutil_gettz, maybe_get_tz
from pandas.compat.numpy import np_datetime64_compat
import pandas as pd
from pandas import NaT, Period, Timedelta, Timestamp, offsets
import pandas._testing as tm
class TestPeriodConstruction:
def test_construction(self):
i1 = Period("1/1/2005", freq="M")
i2 = Period("Jan 2005")
assert i1 == i2
i1 = Period("2005", freq="A")
i2 = Period("2005")
i3 = Period("2005", freq="a")
assert i1 == i2
assert i1 == i3
i4 = Period("2005", freq="M")
i5 = Period("2005", freq="m")
assert i1 != i4
assert i4 == i5
i1 = Period.now("Q")
i2 = Period(datetime.now(), freq="Q")
i3 = Period.now("q")
assert i1 == i2
assert i1 == i3
i1 = Period("1982", freq="min")
i2 = Period("1982", freq="MIN")
assert i1 == i2
i1 = Period(year=2005, month=3, day=1, freq="D")
i2 = Period("3/1/2005", freq="D")
assert i1 == i2
i3 = Period(year=2005, month=3, day=1, freq="d")
assert i1 == i3
i1 = Period("2007-01-01 09:00:00.001")
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq="L")
assert i1 == expected
expected = Period(np_datetime64_compat("2007-01-01 09:00:00.001Z"), freq="L")
assert i1 == expected
i1 = Period("2007-01-01 09:00:00.00101")
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq="U")
assert i1 == expected
expected = Period(np_datetime64_compat("2007-01-01 09:00:00.00101Z"), freq="U")
assert i1 == expected
msg = "Must supply freq for ordinal value"
with pytest.raises(ValueError, match=msg):
Period(ordinal=200701)
msg = "Invalid frequency: X"
with pytest.raises(ValueError, match=msg):
Period("2007-1-1", freq="X")
# GH#34703 tuple freq disallowed
with pytest.raises(TypeError, match="pass as a string instead"):
Period("1982", freq=("Min", 1))
def test_construction_bday(self):
# Biz day construction, roll forward if non-weekday
i1 = Period("3/10/12", freq="B")
i2 = Period("3/10/12", freq="D")
assert i1 == i2.asfreq("B")
i2 = Period("3/11/12", freq="D")
assert i1 == i2.asfreq("B")
i2 = Period("3/12/12", freq="D")
assert i1 == i2.asfreq("B")
i3 = Period("3/10/12", freq="b")
assert i1 == i3
i1 = Period(year=2012, month=3, day=10, freq="B")
i2 = Period("3/12/12", freq="B")
assert i1 == i2
def test_construction_quarter(self):
i1 = Period(year=2005, quarter=1, freq="Q")
i2 = Period("1/1/2005", freq="Q")
assert i1 == i2
i1 = Period(year=2005, quarter=3, freq="Q")
i2 = Period("9/1/2005", freq="Q")
assert i1 == i2
i1 = Period("2005Q1")
i2 = Period(year=2005, quarter=1, freq="Q")
i3 = Period("2005q1")
assert i1 == i2
assert i1 == i3
i1 = Period("05Q1")
assert i1 == i2
lower = Period("05q1")
assert i1 == lower
i1 = Period("1Q2005")
assert i1 == i2
lower = Period("1q2005")
assert i1 == lower
i1 = Period("1Q05")
assert i1 == i2
lower = Period("1q05")
assert i1 == lower
i1 = Period("4Q1984")
assert i1.year == 1984
lower = Period("4q1984")
assert i1 == lower
def test_construction_month(self):
expected = Period("2007-01", freq="M")
i1 = Period("200701", freq="M")
assert i1 == expected
i1 = Period("200701", freq="M")
assert i1 == expected
i1 = Period(200701, freq="M")
assert i1 == expected
i1 = Period(ordinal=200701, freq="M")
assert i1.year == 18695
i1 = Period(datetime(2007, 1, 1), freq="M")
i2 = Period("200701", freq="M")
assert i1 == i2
i1 = Period(date(2007, 1, 1), freq="M")
i2 = Period(datetime(2007, 1, 1), freq="M")
i3 = Period(np.datetime64("2007-01-01"), freq="M")
i4 = Period(np_datetime64_compat("2007-01-01 00:00:00Z"), freq="M")
i5 = Period(np_datetime64_compat("2007-01-01 00:00:00.000Z"), freq="M")
assert i1 == i2
assert i1 == i3
assert i1 == i4
assert i1 == i5
def test_period_constructor_offsets(self):
assert Period("1/1/2005", freq=offsets.MonthEnd()) == Period(
"1/1/2005", freq="M"
)
assert Period("2005", freq=offsets.YearEnd()) == Period("2005", freq="A")
assert Period("2005", freq=offsets.MonthEnd()) == Period("2005", freq="M")
assert Period("3/10/12", freq=offsets.BusinessDay()) == Period(
"3/10/12", freq="B"
)
assert Period("3/10/12", freq=offsets.Day()) == Period("3/10/12", freq="D")
assert Period(
year=2005, quarter=1, freq=offsets.QuarterEnd(startingMonth=12)
) == Period(year=2005, quarter=1, freq="Q")
assert Period(
year=2005, quarter=2, freq=offsets.QuarterEnd(startingMonth=12)
) == Period(year=2005, quarter=2, freq="Q")
assert Period(year=2005, month=3, day=1, freq=offsets.Day()) == Period(
year=2005, month=3, day=1, freq="D"
)
assert Period(year=2012, month=3, day=10, freq=offsets.BDay()) == Period(
year=2012, month=3, day=10, freq="B"
)
expected = Period("2005-03-01", freq="3D")
assert Period(year=2005, month=3, day=1, freq=offsets.Day(3)) == expected
assert Period(year=2005, month=3, day=1, freq="3D") == expected
assert Period(year=2012, month=3, day=10, freq=offsets.BDay(3)) == Period(
year=2012, month=3, day=10, freq="3B"
)
assert Period(200701, freq=offsets.MonthEnd()) == Period(200701, freq="M")
i1 = Period(ordinal=200701, freq=offsets.MonthEnd())
i2 = Period(ordinal=200701, freq="M")
assert i1 == i2
assert i1.year == 18695
assert i2.year == 18695
i1 = Period(datetime(2007, 1, 1), freq="M")
i2 = Period("200701", freq="M")
assert i1 == i2
i1 = Period(date(2007, 1, 1), freq="M")
i2 = Period(datetime(2007, 1, 1), freq="M")
i3 = Period(np.datetime64("2007-01-01"), freq="M")
i4 = Period(np_datetime64_compat("2007-01-01 00:00:00Z"), freq="M")
i5 = Period(np_datetime64_compat("2007-01-01 00:00:00.000Z"), freq="M")
assert i1 == i2
assert i1 == i3
assert i1 == i4
assert i1 == i5
i1 = Period("2007-01-01 09:00:00.001")
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq="L")
assert i1 == expected
expected = Period(np_datetime64_compat("2007-01-01 09:00:00.001Z"), freq="L")
assert i1 == expected
i1 = Period("2007-01-01 09:00:00.00101")
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq="U")
assert i1 == expected
expected = Period(np_datetime64_compat("2007-01-01 09:00:00.00101Z"), freq="U")
assert i1 == expected
def test_invalid_arguments(self):
msg = "Must supply freq for datetime value"
with pytest.raises(ValueError, match=msg):
Period(datetime.now())
with pytest.raises(ValueError, match=msg):
Period(datetime.now().date())
msg = "Value must be Period, string, integer, or datetime"
with pytest.raises(ValueError, match=msg):
Period(1.6, freq="D")
msg = "Ordinal must be an integer"
with pytest.raises(ValueError, match=msg):
Period(ordinal=1.6, freq="D")
msg = "Only value or ordinal but not both should be given but not both"
with pytest.raises(ValueError, match=msg):
Period(ordinal=2, value=1, freq="D")
msg = "If value is None, freq cannot be None"
with pytest.raises(ValueError, match=msg):
Period(month=1)
msg = "Given date string not likely a datetime"
with pytest.raises(ValueError, match=msg):
Period("-2000", "A")
msg = "day is out of range for month"
with pytest.raises(DateParseError, match=msg):
Period("0", "A")
msg = "Unknown datetime string format, unable to parse"
with pytest.raises(DateParseError, match=msg):
Period("1/1/-2000", "A")
def test_constructor_corner(self):
expected = Period("2007-01", freq="2M")
assert Period(year=2007, month=1, freq="2M") == expected
assert Period(None) is NaT
p = Period("2007-01-01", freq="D")
result = Period(p, freq="A")
exp = Period("2007", freq="A")
assert result == exp
def test_constructor_infer_freq(self):
p = Period("2007-01-01")
assert p.freq == "D"
p = Period("2007-01-01 07")
assert p.freq == "H"
p = Period("2007-01-01 07:10")
assert p.freq == "T"
p = Period("2007-01-01 07:10:15")
assert p.freq == "S"
p = Period("2007-01-01 07:10:15.123")
assert p.freq == "L"
p = Period("2007-01-01 07:10:15.123000")
assert p.freq == "L"
p = Period("2007-01-01 07:10:15.123400")
assert p.freq == "U"
def test_multiples(self):
result1 = Period("1989", freq="2A")
result2 = Period("1989", freq="A")
assert result1.ordinal == result2.ordinal
assert result1.freqstr == "2A-DEC"
assert result2.freqstr == "A-DEC"
assert result1.freq == offsets.YearEnd(2)
assert result2.freq == offsets.YearEnd()
assert (result1 + 1).ordinal == result1.ordinal + 2
assert (1 + result1).ordinal == result1.ordinal + 2
assert (result1 - 1).ordinal == result2.ordinal - 2
assert (-1 + result1).ordinal == result2.ordinal - 2
@pytest.mark.parametrize("month", MONTHS)
def test_period_cons_quarterly(self, month):
# bugs in scikits.timeseries
freq = f"Q-{month}"
exp = Period("1989Q3", freq=freq)
assert "1989Q3" in str(exp)
stamp = exp.to_timestamp("D", how="end")
p = Period(stamp, freq=freq)
assert p == exp
stamp = exp.to_timestamp("3D", how="end")
p = Period(stamp, freq=freq)
assert p == exp
@pytest.mark.parametrize("month", MONTHS)
def test_period_cons_annual(self, month):
# bugs in scikits.timeseries
freq = f"A-{month}"
exp = Period("1989", freq=freq)
stamp = exp.to_timestamp("D", how="end") + timedelta(days=30)
p = Period(stamp, freq=freq)
assert p == exp + 1
assert isinstance(p, Period)
@pytest.mark.parametrize("day", DAYS)
@pytest.mark.parametrize("num", range(10, 17))
def test_period_cons_weekly(self, num, day):
daystr = f"2011-02-{num}"
freq = f"W-{day}"
result = Period(daystr, freq=freq)
expected = Period(daystr, freq="D").asfreq(freq)
assert result == expected
assert isinstance(result, Period)
def test_period_from_ordinal(self):
p = Period("2011-01", freq="M")
res = Period._from_ordinal(p.ordinal, freq="M")
assert p == res
assert isinstance(res, Period)
@pytest.mark.parametrize("freq", ["A", "M", "D", "H"])
def test_construct_from_nat_string_and_freq(self, freq):
per = Period("NaT", freq=freq)
assert per is NaT
per = Period("NaT", freq="2" + freq)
assert per is NaT
per = Period("NaT", freq="3" + freq)
assert per is NaT
def test_period_cons_nat(self):
p = Period("nat", freq="W-SUN")
assert p is NaT
p = Period(iNaT, freq="D")
assert p is NaT
p = Period(iNaT, freq="3D")
assert p is NaT
p = Period(iNaT, freq="1D1H")
assert p is NaT
p = Period("NaT")
assert p is NaT
p = Period(iNaT)
assert p is NaT
def test_period_cons_mult(self):
p1 = Period("2011-01", freq="3M")
p2 = Period("2011-01", freq="M")
assert p1.ordinal == p2.ordinal
assert p1.freq == offsets.MonthEnd(3)
assert p1.freqstr == "3M"
assert p2.freq == offsets.MonthEnd()
assert p2.freqstr == "M"
result = p1 + 1
assert result.ordinal == (p2 + 3).ordinal
assert result.freq == p1.freq
assert result.freqstr == "3M"
result = p1 - 1
assert result.ordinal == (p2 - 3).ordinal
assert result.freq == p1.freq
assert result.freqstr == "3M"
msg = "Frequency must be positive, because it represents span: -3M"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="-3M")
msg = "Frequency must be positive, because it represents span: 0M"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="0M")
def test_period_cons_combined(self):
p = [
(
Period("2011-01", freq="1D1H"),
Period("2011-01", freq="1H1D"),
Period("2011-01", freq="H"),
),
(
Period(ordinal=1, freq="1D1H"),
Period(ordinal=1, freq="1H1D"),
Period(ordinal=1, freq="H"),
),
]
for p1, p2, p3 in p:
assert p1.ordinal == p3.ordinal
assert p2.ordinal == p3.ordinal
assert p1.freq == offsets.Hour(25)
assert p1.freqstr == "25H"
assert p2.freq == offsets.Hour(25)
assert p2.freqstr == "25H"
assert p3.freq == offsets.Hour()
assert p3.freqstr == "H"
result = p1 + 1
assert result.ordinal == (p3 + 25).ordinal
assert result.freq == p1.freq
assert result.freqstr == "25H"
result = p2 + 1
assert result.ordinal == (p3 + 25).ordinal
assert result.freq == p2.freq
assert result.freqstr == "25H"
result = p1 - 1
assert result.ordinal == (p3 - 25).ordinal
assert result.freq == p1.freq
assert result.freqstr == "25H"
result = p2 - 1
assert result.ordinal == (p3 - 25).ordinal
assert result.freq == p2.freq
assert result.freqstr == "25H"
msg = "Frequency must be positive, because it represents span: -25H"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="-1D1H")
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="-1H1D")
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq="-1D1H")
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq="-1H1D")
msg = "Frequency must be positive, because it represents span: 0D"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="0D0H")
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq="0D0H")
# You can only combine together day and intraday offsets
msg = "Invalid frequency: 1W1D"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="1W1D")
msg = "Invalid frequency: 1D1W"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="1D1W")
@pytest.mark.parametrize("day", ["1970/01/01 ", "2020-12-31 ", "1981/09/13 "])
@pytest.mark.parametrize("hour", ["00:00:00", "00:00:01", "23:59:59", "12:00:59"])
@pytest.mark.parametrize(
"sec_float, expected",
[
(".000000001", 1),
(".000000999", 999),
(".123456789", 789),
(".999999999", 999),
],
)
def test_period_constructor_nanosecond(self, day, hour, sec_float, expected):
# GH 34621
assert Period(day + hour + sec_float).start_time.nanosecond == expected
@pytest.mark.parametrize("hour", range(24))
def test_period_large_ordinal(self, hour):
# Issue #36430
# Integer overflow for Period over the maximum timestamp
p = Period(ordinal=2562048 + hour, freq="1H")
assert p.hour == hour
class TestPeriodMethods:
def test_round_trip(self):
p = Period("2000Q1")
new_p = tm.round_trip_pickle(p)
assert new_p == p
def test_hash(self):
assert hash(Period("2011-01", freq="M")) == hash(Period("2011-01", freq="M"))
assert hash(Period("2011-01-01", freq="D")) != hash(Period("2011-01", freq="M"))
assert hash(Period("2011-01", freq="3M")) != hash(Period("2011-01", freq="2M"))
assert hash(Period("2011-01", freq="M")) != hash(Period("2011-02", freq="M"))
# --------------------------------------------------------------
# to_timestamp
@pytest.mark.parametrize("tzstr", ["Europe/Brussels", "Asia/Tokyo", "US/Pacific"])
def test_to_timestamp_tz_arg(self, tzstr):
# GH#34522 tz kwarg deprecated
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="M").to_timestamp(tz=tzstr)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="3H").to_timestamp(tz=tzstr)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="A").to_timestamp(freq="A", tz=tzstr)
exp = Timestamp("31/12/2005", tz="UTC").tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="A").to_timestamp(freq="3H", tz=tzstr)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
@pytest.mark.parametrize(
"tzstr",
["dateutil/Europe/Brussels", "dateutil/Asia/Tokyo", "dateutil/US/Pacific"],
)
def test_to_timestamp_tz_arg_dateutil(self, tzstr):
tz = maybe_get_tz(tzstr)
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="M").to_timestamp(tz=tz)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
assert p == exp
assert p.tz == dateutil_gettz(tzstr.split("/", 1)[1])
assert p.tz == exp.tz
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="M").to_timestamp(freq="3H", tz=tz)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
assert p == exp
assert p.tz == dateutil_gettz(tzstr.split("/", 1)[1])
assert p.tz == exp.tz
def test_to_timestamp_tz_arg_dateutil_from_string(self):
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="M").to_timestamp(tz="dateutil/Europe/Brussels")
assert p.tz == dateutil_gettz("Europe/Brussels")
def test_to_timestamp_mult(self):
p = Period("2011-01", freq="M")
assert p.to_timestamp(how="S") == Timestamp("2011-01-01")
expected = Timestamp("2011-02-01") - Timedelta(1, "ns")
assert p.to_timestamp(how="E") == expected
p = Period("2011-01", freq="3M")
assert p.to_timestamp(how="S") == Timestamp("2011-01-01")
expected = Timestamp("2011-04-01") - Timedelta(1, "ns")
assert p.to_timestamp(how="E") == expected
def test_to_timestamp(self):
p = Period("1982", freq="A")
start_ts = p.to_timestamp(how="S")
aliases = ["s", "StarT", "BEGIn"]
for a in aliases:
assert start_ts == p.to_timestamp("D", how=a)
# freq with mult should not affect to the result
assert start_ts == p.to_timestamp("3D", how=a)
end_ts = p.to_timestamp(how="E")
aliases = ["e", "end", "FINIsH"]
for a in aliases:
assert end_ts == p.to_timestamp("D", how=a)
assert end_ts == p.to_timestamp("3D", how=a)
from_lst = ["A", "Q", "M", "W", "B", "D", "H", "Min", "S"]
def _ex(p):
if p.freq == "B":
return p.start_time + Timedelta(days=1, nanoseconds=-1)
return Timestamp((p + p.freq).start_time.value - 1)
for i, fcode in enumerate(from_lst):
p = Period("1982", freq=fcode)
result = p.to_timestamp().to_period(fcode)
assert result == p
assert p.start_time == p.to_timestamp(how="S")
assert p.end_time == _ex(p)
# Frequency other than daily
p = Period("1985", freq="A")
result = p.to_timestamp("H", how="end")
expected = Timestamp(1986, 1, 1) - Timedelta(1, "ns")
assert result == expected
result = p.to_timestamp("3H", how="end")
assert result == expected
result = p.to_timestamp("T", how="end")
expected = Timestamp(1986, 1, 1) - Timedelta(1, "ns")
assert result == expected
result = p.to_timestamp("2T", how="end")
assert result == expected
result = p.to_timestamp(how="end")
expected = Timestamp(1986, 1, 1) - Timedelta(1, "ns")
assert result == expected
expected = datetime(1985, 1, 1)
result = p.to_timestamp("H", how="start")
assert result == expected
result = p.to_timestamp("T", how="start")
assert result == expected
result = p.to_timestamp("S", how="start")
assert result == expected
result = p.to_timestamp("3H", how="start")
assert result == expected
result = p.to_timestamp("5S", how="start")
assert result == expected
def test_to_timestamp_business_end(self):
per = Period("1990-01-05", "B") # Friday
result = per.to_timestamp("B", how="E")
expected = Timestamp("1990-01-06") - Timedelta(nanoseconds=1)
assert result == expected
@pytest.mark.parametrize(
"ts, expected",
[
("1970-01-01 00:00:00", 0),
("1970-01-01 00:00:00.000001", 1),
("1970-01-01 00:00:00.00001", 10),
("1970-01-01 00:00:00.499", 499000),
("1999-12-31 23:59:59.999", 999000),
("1999-12-31 23:59:59.999999", 999999),
("2050-12-31 23:59:59.5", 500000),
("2050-12-31 23:59:59.500001", 500001),
("2050-12-31 23:59:59.123456", 123456),
],
)
@pytest.mark.parametrize("freq", [None, "us", "ns"])
def test_to_timestamp_microsecond(self, ts, expected, freq):
# GH 24444
result = Period(ts).to_timestamp(freq=freq).microsecond
assert result == expected
# --------------------------------------------------------------
# Rendering: __repr__, strftime, etc
def test_repr(self):
p = Period("Jan-2000")
assert "2000-01" in repr(p)
p = Period("2000-12-15")
assert "2000-12-15" in repr(p)
def test_repr_nat(self):
p = Period("nat", freq="M")
assert repr(NaT) in repr(p)
def test_millisecond_repr(self):
p = Period("2000-01-01 12:15:02.123")
assert repr(p) == "Period('2000-01-01 12:15:02.123', 'L')"
def test_microsecond_repr(self):
p = Period("2000-01-01 12:15:02.123567")
assert repr(p) == "Period('2000-01-01 12:15:02.123567', 'U')"
def test_strftime(self):
# GH#3363
p = Period("2000-1-1 12:34:12", freq="S")
res = p.strftime("%Y-%m-%d %H:%M:%S")
assert res == "2000-01-01 12:34:12"
assert isinstance(res, str)
class TestPeriodProperties:
"""Test properties such as year, month, weekday, etc...."""
@pytest.mark.parametrize("freq", ["A", "M", "D", "H"])
def test_is_leap_year(self, freq):
# GH 13727
p = Period("2000-01-01 00:00:00", freq=freq)
assert p.is_leap_year
assert isinstance(p.is_leap_year, bool)
p = Period("1999-01-01 00:00:00", freq=freq)
assert not p.is_leap_year
p = Period("2004-01-01 00:00:00", freq=freq)
assert p.is_leap_year
p = Period("2100-01-01 00:00:00", freq=freq)
assert not p.is_leap_year
def test_quarterly_negative_ordinals(self):
p = Period(ordinal=-1, freq="Q-DEC")
assert p.year == 1969
assert p.quarter == 4
assert isinstance(p, Period)
p = Period(ordinal=-2, freq="Q-DEC")
assert p.year == 1969
assert p.quarter == 3
assert isinstance(p, Period)
p = Period(ordinal=-2, freq="M")
assert p.year == 1969
assert p.month == 11
assert isinstance(p, Period)
def test_freq_str(self):
i1 = Period("1982", freq="Min")
assert i1.freq == offsets.Minute()
assert i1.freqstr == "T"
def test_period_deprecated_freq(self):
cases = {
"M": ["MTH", "MONTH", "MONTHLY", "Mth", "month", "monthly"],
"B": ["BUS", "BUSINESS", "BUSINESSLY", "WEEKDAY", "bus"],
"D": ["DAY", "DLY", "DAILY", "Day", "Dly", "Daily"],
"H": ["HR", "HOUR", "HRLY", "HOURLY", "hr", "Hour", "HRly"],
"T": ["minute", "MINUTE", "MINUTELY", "minutely"],
"S": ["sec", "SEC", "SECOND", "SECONDLY", "second"],
"L": ["MILLISECOND", "MILLISECONDLY", "millisecond"],
"U": ["MICROSECOND", "MICROSECONDLY", "microsecond"],
"N": ["NANOSECOND", "NANOSECONDLY", "nanosecond"],
}
msg = INVALID_FREQ_ERR_MSG
for exp, freqs in cases.items():
for freq in freqs:
with pytest.raises(ValueError, match=msg):
Period("2016-03-01 09:00", freq=freq)
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq=freq)
# check supported freq-aliases still works
p1 = Period("2016-03-01 09:00", freq=exp)
p2 = Period(ordinal=1, freq=exp)
assert isinstance(p1, Period)
assert isinstance(p2, Period)
def _period_constructor(bound, offset):
return Period(
year=bound.year,
month=bound.month,
day=bound.day,
hour=bound.hour,
minute=bound.minute,
second=bound.second + offset,
freq="us",
)
@pytest.mark.parametrize("bound, offset", [(Timestamp.min, -1), (Timestamp.max, 1)])
@pytest.mark.parametrize("period_property", ["start_time", "end_time"])
def test_outter_bounds_start_and_end_time(self, bound, offset, period_property):
# GH #13346
period = TestPeriodProperties._period_constructor(bound, offset)
with pytest.raises(OutOfBoundsDatetime, match="Out of bounds nanosecond"):
getattr(period, period_property)
@pytest.mark.parametrize("bound, offset", [(Timestamp.min, -1), (Timestamp.max, 1)])
@pytest.mark.parametrize("period_property", ["start_time", "end_time"])
def test_inner_bounds_start_and_end_time(self, bound, offset, period_property):
# GH #13346
period = TestPeriodProperties._period_constructor(bound, -offset)
expected = period.to_timestamp().round(freq="S")
assert getattr(period, period_property).round(freq="S") == expected
expected = (bound - offset * Timedelta(1, unit="S")).floor("S")
assert getattr(period, period_property).floor("S") == expected
def test_start_time(self):
freq_lst = ["A", "Q", "M", "D", "H", "T", "S"]
xp = datetime(2012, 1, 1)
for f in freq_lst:
p = Period("2012", freq=f)
assert p.start_time == xp
assert Period("2012", freq="B").start_time == datetime(2012, 1, 2)
assert Period("2012", freq="W").start_time == datetime(2011, 12, 26)
def test_end_time(self):
p = Period("2012", freq="A")
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
xp = _ex(2013, 1, 1)
assert xp == p.end_time
p = Period("2012", freq="Q")
xp = _ex(2012, 4, 1)
assert xp == p.end_time
p = Period("2012", freq="M")
xp = _ex(2012, 2, 1)
assert xp == p.end_time
p = Period("2012", freq="D")
xp = _ex(2012, 1, 2)
assert xp == p.end_time
p = Period("2012", freq="H")
xp = _ex(2012, 1, 1, 1)
assert xp == p.end_time
p = Period("2012", freq="B")
xp = _ex(2012, 1, 3)
assert xp == p.end_time
p = Period("2012", freq="W")
xp = _ex(2012, 1, 2)
assert xp == p.end_time
# Test for GH 11738
p = Period("2012", freq="15D")
xp = _ex(2012, 1, 16)
assert xp == p.end_time
p = Period("2012", freq="1D1H")
xp = _ex(2012, 1, 2, 1)
assert xp == p.end_time
p = Period("2012", freq="1H1D")
xp = _ex(2012, 1, 2, 1)
assert xp == p.end_time
def test_end_time_business_friday(self):
# GH#34449
per = Period("1990-01-05", "B")
result = per.end_time
expected = Timestamp("1990-01-06") - Timedelta(nanoseconds=1)
assert result == expected
def test_anchor_week_end_time(self):
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
p = Period("2013-1-1", "W-SAT")
xp = _ex(2013, 1, 6)
assert p.end_time == xp
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq="A", year=2007)
assert a_date.year == 2007
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert (qd + x).qyear == 2007
assert (qd + x).quarter == x + 1
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq="M", year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert m_ival_x.year == 2007
if 1 <= x + 1 <= 3:
assert m_ival_x.quarter == 1
elif 4 <= x + 1 <= 6:
assert m_ival_x.quarter == 2
elif 7 <= x + 1 <= 9:
assert m_ival_x.quarter == 3
elif 10 <= x + 1 <= 12:
assert m_ival_x.quarter == 4
assert m_ival_x.month == x + 1
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq="W", year=2007, month=1, day=7)
#
assert w_date.year == 2007
assert w_date.quarter == 1
assert w_date.month == 1
assert w_date.week == 1
assert (w_date - 1).week == 52
assert w_date.days_in_month == 31
assert Period(freq="W", year=2012, month=2, day=1).days_in_month == 29
def test_properties_weekly_legacy(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq="W", year=2007, month=1, day=7)
assert w_date.year == 2007
assert w_date.quarter == 1
assert w_date.month == 1
assert w_date.week == 1
assert (w_date - 1).week == 52
assert w_date.days_in_month == 31
exp = Period(freq="W", year=2012, month=2, day=1)
assert exp.days_in_month == 29
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
Period(freq="WK", year=2007, month=1, day=7)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq="B", year=2007, month=1, day=1)
#
assert b_date.year == 2007
assert b_date.quarter == 1
assert b_date.month == 1
assert b_date.day == 1
assert b_date.weekday == 0
assert b_date.dayofyear == 1
assert b_date.days_in_month == 31
assert Period(freq="B", year=2012, month=2, day=1).days_in_month == 29
d_date = Period(freq="D", year=2007, month=1, day=1)
assert d_date.year == 2007
assert d_date.quarter == 1
assert d_date.month == 1
assert d_date.day == 1
assert d_date.weekday == 0
assert d_date.dayofyear == 1
assert d_date.days_in_month == 31
assert Period(freq="D", year=2012, month=2, day=1).days_in_month == 29
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date1 = Period(freq="H", year=2007, month=1, day=1, hour=0)
h_date2 = Period(freq="2H", year=2007, month=1, day=1, hour=0)
for h_date in [h_date1, h_date2]:
assert h_date.year == 2007
assert h_date.quarter == 1
assert h_date.month == 1
assert h_date.day == 1
assert h_date.weekday == 0
assert h_date.dayofyear == 1
assert h_date.hour == 0
assert h_date.days_in_month == 31
assert (
Period(freq="H", year=2012, month=2, day=1, hour=0).days_in_month == 29
)
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq="Min", year=2007, month=1, day=1, hour=0, minute=0)
#
assert t_date.quarter == 1
assert t_date.month == 1
assert t_date.day == 1
assert t_date.weekday == 0
assert t_date.dayofyear == 1
assert t_date.hour == 0
assert t_date.minute == 0
assert t_date.days_in_month == 31
assert (
Period(freq="D", year=2012, month=2, day=1, hour=0, minute=0).days_in_month
== 29
)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
#
assert s_date.year == 2007
assert s_date.quarter == 1
assert s_date.month == 1
assert s_date.day == 1
assert s_date.weekday == 0
assert s_date.dayofyear == 1
assert s_date.hour == 0
assert s_date.minute == 0
assert s_date.second == 0
assert s_date.days_in_month == 31
assert (
Period(
freq="Min", year=2012, month=2, day=1, hour=0, minute=0, second=0
).days_in_month
== 29
)
class TestPeriodField:
def test_get_period_field_array_raises_on_out_of_range(self):
msg = "Buffer dtype mismatch, expected 'const int64_t' but got 'double'"
with pytest.raises(ValueError, match=msg):
libperiod.get_period_field_arr(-1, np.empty(1), 0)
class TestPeriodComparisons:
def test_comparison_same_period_different_object(self):
# Separate Period objects for the same period
left = Period("2000-01", "M")
right = Period("2000-01", "M")
assert left == right
assert left >= right
assert left <= right
assert not left < right
assert not left > right
def test_comparison_same_freq(self):
jan = Period("2000-01", "M")
feb = Period("2000-02", "M")
assert not jan == feb
assert jan != feb
assert jan < feb
assert jan <= feb
assert not jan > feb
assert not jan >= feb
def test_comparison_mismatched_freq(self):
jan = Period("2000-01", "M")
day = Period("2012-01-01", "D")
assert not jan == day
assert jan != day
msg = r"Input has different freq=D from Period\(freq=M\)"
with pytest.raises(IncompatibleFrequency, match=msg):
jan < day
with pytest.raises(IncompatibleFrequency, match=msg):
jan <= day
with pytest.raises(IncompatibleFrequency, match=msg):
jan > day
with pytest.raises(IncompatibleFrequency, match=msg):
jan >= day
def test_comparison_invalid_type(self):
jan = Period("2000-01", "M")
assert not jan == 1
assert jan != 1
int_or_per = "'(Period|int)'"
msg = f"not supported between instances of {int_or_per} and {int_or_per}"
for left, right in [(jan, 1), (1, jan)]:
with pytest.raises(TypeError, match=msg):
left > right
with pytest.raises(TypeError, match=msg):
left >= right
with pytest.raises(TypeError, match=msg):
left < right
with pytest.raises(TypeError, match=msg):
left <= right
def test_sort_periods(self):
jan = Period("2000-01", "M")
feb = Period("2000-02", "M")
mar = Period("2000-03", "M")
periods = [mar, jan, feb]
correctPeriods = [jan, feb, mar]
assert sorted(periods) == correctPeriods
def test_period_cmp_nat(self):
p = Period("2011-01-01", freq="D")
t = Timestamp("2011-01-01")
# confirm Period('NaT') work identical with Timestamp('NaT')
for left, right in [
(NaT, p),
(p, NaT),
(NaT, t),
(t, NaT),
]:
assert not left < right
assert not left > right
assert not left == right
assert left != right
assert not left <= right
assert not left >= right
class TestArithmetic:
def test_sub_delta(self):
left, right = Period("2011", freq="A"), Period("2007", freq="A")
result = left - right
assert result == 4 * right.freq
msg = r"Input has different freq=M from Period\(freq=A-DEC\)"
with pytest.raises(IncompatibleFrequency, match=msg):
left - Period("2007-01", freq="M")
def test_add_integer(self):
per1 = Period(freq="D", year=2008, month=1, day=1)
per2 = Period(freq="D", year=2008, month=1, day=2)
assert per1 + 1 == per2
assert 1 + per1 == per2
def test_add_sub_nat(self):
# GH#13071
p = Period("2011-01", freq="M")
assert p + NaT is NaT
assert NaT + p is NaT
assert p - NaT is NaT
assert NaT - p is NaT
def test_add_invalid(self):
# GH#4731
per1 = Period(freq="D", year=2008, month=1, day=1)
per2 = Period(freq="D", year=2008, month=1, day=2)
msg = "|".join(
[
r"unsupported operand type\(s\)",
"can only concatenate str",
"must be str, not Period",
]
)
with pytest.raises(TypeError, match=msg):
per1 + "str"
with pytest.raises(TypeError, match=msg):
"str" + per1
with pytest.raises(TypeError, match=msg):
per1 + per2
boxes = [lambda x: x, lambda x: pd.Series([x]), lambda x: pd.Index([x])]
ids = ["identity", "Series", "Index"]
@pytest.mark.parametrize("lbox", boxes, ids=ids)
@pytest.mark.parametrize("rbox", boxes, ids=ids)
def test_add_timestamp_raises(self, rbox, lbox):
# GH#17983
ts = Timestamp("2017")
per = Period("2017", freq="M")
# We may get a different message depending on which class raises
# the error.
msg = "|".join(
[
"cannot add",
"unsupported operand",
"can only operate on a",
"incompatible type",
"ufunc add cannot use operands",
]
)
with pytest.raises(TypeError, match=msg):
lbox(ts) + rbox(per)
with pytest.raises(TypeError, match=msg):
lbox(per) + rbox(ts)
with pytest.raises(TypeError, match=msg):
lbox(per) + rbox(per)
def test_sub(self):
per1 = Period("2011-01-01", freq="D")
per2 = Period("2011-01-15", freq="D")
off = per1.freq
assert per1 - per2 == -14 * off
assert per2 - per1 == 14 * off
msg = r"Input has different freq=M from Period\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
per1 - Period("2011-02", freq="M")
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1 = Period("19910905", freq=tick_classes(n))
p2 = Period("19920406", freq=tick_classes(n))
expected = Period(str(p2), freq=p2.freq.base) - Period(
str(p1), freq=p1.freq.base
)
assert (p2 - p1) == expected
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(offsets.YearEnd, "month"),
(offsets.QuarterEnd, "startingMonth"),
(offsets.MonthEnd, None),
(offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n, normalize):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
p1 = Period(p1_d, freq=offset(n, normalize, **kwds))
p2 = Period(p2_d, freq=offset(n, normalize, **kwds))
expected = Period(p2_d, freq=p2.freq.base) - Period(p1_d, freq=p1.freq.base)
assert (p2 - p1) == expected
def test_add_offset(self):
# freq is DateOffset
for freq in ["A", "2A", "3A"]:
p = Period("2011", freq=freq)
exp = Period("2013", freq=freq)
assert p + offsets.YearEnd(2) == exp
assert offsets.YearEnd(2) + p == exp
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
msg = "Input has different freq|Input cannot be converted to Period"
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
if isinstance(o, np.timedelta64):
msg = "cannot use operands with types"
with pytest.raises(TypeError, match=msg):
o + p
else:
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
with pytest.raises(IncompatibleFrequency, match=msg):
o + p
for freq in ["M", "2M", "3M"]:
p = Period("2011-03", freq=freq)
exp = Period("2011-05", freq=freq)
assert p + offsets.MonthEnd(2) == exp
assert offsets.MonthEnd(2) + p == exp
exp = Period("2012-03", freq=freq)
assert p + offsets.MonthEnd(12) == exp
assert offsets.MonthEnd(12) + p == exp
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
msg = "Input has different freq|Input cannot be converted to Period"
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
if isinstance(o, np.timedelta64):
msg = "cannot use operands with types"
with pytest.raises(TypeError, match=msg):
o + p
else:
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
with pytest.raises(IncompatibleFrequency, match=msg):
o + p
# freq is Tick
for freq in ["D", "2D", "3D"]:
p = Period("2011-04-01", freq=freq)
exp = Period("2011-04-06", freq=freq)
assert p + offsets.Day(5) == exp
assert offsets.Day(5) + p == exp
exp = Period("2011-04-02", freq=freq)
assert p + offsets.Hour(24) == exp
assert offsets.Hour(24) + p == exp
exp = Period("2011-04-03", freq=freq)
assert p + np.timedelta64(2, "D") == exp
msg = "cannot use operands with types"
with pytest.raises(TypeError, match=msg):
np.timedelta64(2, "D") + p
exp = Period("2011-04-02", freq=freq)
assert p + np.timedelta64(3600 * 24, "s") == exp
with pytest.raises(TypeError, match=msg):
np.timedelta64(3600 * 24, "s") + p
exp = Period("2011-03-30", freq=freq)
assert p + timedelta(-2) == exp
assert timedelta(-2) + p == exp
exp = Period("2011-04-03", freq=freq)
assert p + timedelta(hours=48) == exp
assert timedelta(hours=48) + p == exp
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(4, "h"),
timedelta(hours=23),
]:
msg = "Input has different freq|Input cannot be converted to Period"
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
if isinstance(o, np.timedelta64):
msg = "cannot use operands with types"
with pytest.raises(TypeError, match=msg):
o + p
else:
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
with pytest.raises(IncompatibleFrequency, match=msg):
o + p
for freq in ["H", "2H", "3H"]:
p = Period("2011-04-01 09:00", freq=freq)
exp = Period("2011-04-03 09:00", freq=freq)
assert p + offsets.Day(2) == exp
assert offsets.Day(2) + p == exp
exp = Period("2011-04-01 12:00", freq=freq)
assert p + offsets.Hour(3) == exp
assert offsets.Hour(3) + p == exp
msg = "cannot use operands with types"
exp = Period("2011-04-01 12:00", freq=freq)
assert p + np.timedelta64(3, "h") == exp
with pytest.raises(TypeError, match=msg):
np.timedelta64(3, "h") + p
exp = Period("2011-04-01 10:00", freq=freq)
assert p + np.timedelta64(3600, "s") == exp
with pytest.raises(TypeError, match=msg):
np.timedelta64(3600, "s") + p
exp = Period("2011-04-01 11:00", freq=freq)
assert p + timedelta(minutes=120) == exp
assert timedelta(minutes=120) + p == exp
exp = Period("2011-04-05 12:00", freq=freq)
assert p + timedelta(days=4, minutes=180) == exp
assert timedelta(days=4, minutes=180) + p == exp
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(3200, "s"),
timedelta(hours=23, minutes=30),
]:
msg = "Input has different freq|Input cannot be converted to Period"
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
if isinstance(o, np.timedelta64):
msg = "cannot use operands with types"
with pytest.raises(TypeError, match=msg):
o + p
else:
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
with pytest.raises(IncompatibleFrequency, match=msg):
o + p
def test_sub_offset(self):
# freq is DateOffset
msg = "Input has different freq|Input cannot be converted to Period"
for freq in ["A", "2A", "3A"]:
p = Period("2011", freq=freq)
assert p - offsets.YearEnd(2) == Period("2009", freq=freq)
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
p - o
for freq in ["M", "2M", "3M"]:
p = Period("2011-03", freq=freq)
assert p - offsets.MonthEnd(2) == Period("2011-01", freq=freq)
assert p - offsets.MonthEnd(12) == Period("2010-03", freq=freq)
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
p - o
# freq is Tick
for freq in ["D", "2D", "3D"]:
p = Period("2011-04-01", freq=freq)
assert p - offsets.Day(5) == Period("2011-03-27", freq=freq)
assert p - offsets.Hour(24) == Period("2011-03-31", freq=freq)
assert p - np.timedelta64(2, "D") == Period("2011-03-30", freq=freq)
assert p - np.timedelta64(3600 * 24, "s") == Period("2011-03-31", freq=freq)
assert p - timedelta(-2) == Period("2011-04-03", freq=freq)
assert p - timedelta(hours=48) == Period("2011-03-30", freq=freq)
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(4, "h"),
timedelta(hours=23),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
p - o
for freq in ["H", "2H", "3H"]:
p = Period("2011-04-01 09:00", freq=freq)
assert p - offsets.Day(2) == Period("2011-03-30 09:00", freq=freq)
assert p - offsets.Hour(3) == Period("2011-04-01 06:00", freq=freq)
assert p - np.timedelta64(3, "h") == Period("2011-04-01 06:00", freq=freq)
assert p - np.timedelta64(3600, "s") == Period(
"2011-04-01 08:00", freq=freq
)
assert p - timedelta(minutes=120) == Period("2011-04-01 07:00", freq=freq)
assert p - timedelta(days=4, minutes=180) == Period(
"2011-03-28 06:00", freq=freq
)
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(3200, "s"),
timedelta(hours=23, minutes=30),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
p - o
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_period_addsub_nat(self, freq):
per = Period("2011-01", freq=freq)
# For subtraction, NaT is treated as another Period object
assert NaT - per is NaT
assert per - NaT is NaT
# For addition, NaT is treated as offset-like
assert NaT + per is NaT
assert per + NaT is NaT
def test_period_ops_offset(self):
p = Period("2011-04-01", freq="D")
result = p + offsets.Day()
exp = Period("2011-04-02", freq="D")
assert result == exp
result = p - offsets.Day(2)
exp = Period("2011-03-30", freq="D")
assert result == exp
msg = r"Input cannot be converted to Period\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
p + offsets.Hour(2)
with pytest.raises(IncompatibleFrequency, match=msg):
p - offsets.Hour(2)
def test_period_immutable():
# see gh-17116
msg = "not writable"
per = Period("2014Q1")
with pytest.raises(AttributeError, match=msg):
per.ordinal = 14
freq = per.freq
with pytest.raises(AttributeError, match=msg):
per.freq = 2 * freq
def test_small_year_parsing():
per1 = Period("0001-01-07", "D")
assert per1.year == 1
assert per1.day == 7
def test_negone_ordinals():
freqs = ["A", "M", "Q", "D", "H", "T", "S"]
period = Period(ordinal=-1, freq="D")
for freq in freqs:
repr(period.asfreq(freq))
for freq in freqs:
period = Period(ordinal=-1, freq=freq)
repr(period)
assert period.year == 1969
period = Period(ordinal=-1, freq="B")
repr(period)
period = Period(ordinal=-1, freq="W")
repr(period)
def test_invalid_frequency_error_message():
msg = "Invalid frequency: <WeekOfMonth: week=0, weekday=0>"
with pytest.raises(ValueError, match=msg):
Period("2012-01-02", freq="WOM-1MON")
|
py | 1a52ccac93615e78e1260692c8a05510835644d1 | # -*- coding: utf8 -*-
import sys
import unittest
import platform
IS_PYPY = "PyPy" == platform.python_implementation()
try:
from pygame.tests.test_utils import arrinter
except NameError:
pass
import pygame
init_called = quit_called = 0
def __PYGAMEinit__(): # called automatically by pygame.init()
global init_called
init_called = init_called + 1
pygame.register_quit(pygame_quit)
# Returning False indicates that the initialization has failed. It is
# purposely done here to test that failing modules are reported.
return False
def pygame_quit():
global quit_called
quit_called = quit_called + 1
quit_hook_ran = 0
def quit_hook():
global quit_hook_ran
quit_hook_ran = 1
class BaseModuleTest(unittest.TestCase):
def tearDown(self):
# Clean up after each test method.
pygame.quit()
def testAutoInit(self):
pygame.init()
pygame.quit()
self.assertEqual(init_called, 1)
self.assertEqual(quit_called, 1)
def test_get_sdl_byteorder(self):
"""Ensure the SDL byte order is valid"""
byte_order = pygame.get_sdl_byteorder()
expected_options = (pygame.LIL_ENDIAN, pygame.BIG_ENDIAN)
self.assertIn(byte_order, expected_options)
def test_get_sdl_version(self):
"""Ensure the SDL version is valid"""
self.assertEqual(len(pygame.get_sdl_version()), 3)
class ExporterBase(object):
def __init__(self, shape, typechar, itemsize):
import ctypes
ndim = len(shape)
self.ndim = ndim
self.shape = tuple(shape)
array_len = 1
for d in shape:
array_len *= d
self.size = itemsize * array_len
self.parent = ctypes.create_string_buffer(self.size)
self.itemsize = itemsize
strides = [itemsize] * ndim
for i in range(ndim - 1, 0, -1):
strides[i - 1] = strides[i] * shape[i]
self.strides = tuple(strides)
self.data = ctypes.addressof(self.parent), False
if self.itemsize == 1:
byteorder = "|"
elif sys.byteorder == "big":
byteorder = ">"
else:
byteorder = "<"
self.typestr = byteorder + typechar + str(self.itemsize)
def assertSame(self, proxy, obj):
self.assertEqual(proxy.length, obj.size)
iface = proxy.__array_interface__
self.assertEqual(iface["typestr"], obj.typestr)
self.assertEqual(iface["shape"], obj.shape)
self.assertEqual(iface["strides"], obj.strides)
self.assertEqual(iface["data"], obj.data)
def test_PgObject_GetBuffer_array_interface(self):
from pygame.bufferproxy import BufferProxy
class Exporter(self.ExporterBase):
def get__array_interface__(self):
return {
"version": 3,
"typestr": self.typestr,
"shape": self.shape,
"strides": self.strides,
"data": self.data,
}
__array_interface__ = property(get__array_interface__)
# Should be ignored by PgObject_GetBuffer
__array_struct__ = property(lambda self: None)
_shape = [2, 3, 5, 7, 11] # Some prime numbers
for ndim in range(1, len(_shape)):
o = Exporter(_shape[0:ndim], "i", 2)
v = BufferProxy(o)
self.assertSame(v, o)
ndim = 2
shape = _shape[0:ndim]
for typechar in ("i", "u"):
for itemsize in (1, 2, 4, 8):
o = Exporter(shape, typechar, itemsize)
v = BufferProxy(o)
self.assertSame(v, o)
for itemsize in (4, 8):
o = Exporter(shape, "f", itemsize)
v = BufferProxy(o)
self.assertSame(v, o)
# Is the dict received from an exporting object properly released?
# The dict should be freed before PgObject_GetBuffer returns.
# When the BufferProxy v's length property is referenced, v calls
# PgObject_GetBuffer, which in turn references Exporter2 o's
# __array_interface__ property. The Exporter2 instance o returns a
# dict subclass for which it keeps both a regular reference and a
# weak reference. The regular reference should be the only
# remaining reference when PgObject_GetBuffer returns. This is
# verified by first checking the weak reference both before and
# after the regular reference held by o is removed.
import weakref, gc
class NoDictError(RuntimeError):
pass
class WRDict(dict):
"""Weak referenceable dict"""
pass
class Exporter2(Exporter):
def get__array_interface__2(self):
self.d = WRDict(Exporter.get__array_interface__(self))
self.dict_ref = weakref.ref(self.d)
return self.d
__array_interface__ = property(get__array_interface__2)
def free_dict(self):
self.d = None
def is_dict_alive(self):
try:
return self.dict_ref() is not None
except AttributeError:
raise NoDictError("__array_interface__ is unread")
o = Exporter2((2, 4), "u", 4)
v = BufferProxy(o)
self.assertRaises(NoDictError, o.is_dict_alive)
length = v.length
self.assertTrue(o.is_dict_alive())
o.free_dict()
gc.collect()
self.assertFalse(o.is_dict_alive())
def test_GetView_array_struct(self):
from pygame.bufferproxy import BufferProxy
class Exporter(self.ExporterBase):
def __init__(self, shape, typechar, itemsize):
super(Exporter, self).__init__(shape, typechar, itemsize)
self.view = BufferProxy(self.__dict__)
def get__array_struct__(self):
return self.view.__array_struct__
__array_struct__ = property(get__array_struct__)
# Should not cause PgObject_GetBuffer to fail
__array_interface__ = property(lambda self: None)
_shape = [2, 3, 5, 7, 11] # Some prime numbers
for ndim in range(1, len(_shape)):
o = Exporter(_shape[0:ndim], "i", 2)
v = BufferProxy(o)
self.assertSame(v, o)
ndim = 2
shape = _shape[0:ndim]
for typechar in ("i", "u"):
for itemsize in (1, 2, 4, 8):
o = Exporter(shape, typechar, itemsize)
v = BufferProxy(o)
self.assertSame(v, o)
for itemsize in (4, 8):
o = Exporter(shape, "f", itemsize)
v = BufferProxy(o)
self.assertSame(v, o)
# Check returned cobject/capsule reference count
try:
from sys import getrefcount
except ImportError:
# PyPy: no reference counting
pass
else:
o = Exporter(shape, typechar, itemsize)
self.assertEqual(getrefcount(o.__array_struct__), 1)
if pygame.HAVE_NEWBUF:
from pygame.tests.test_utils import buftools
def NEWBUF_assertSame(self, proxy, exp):
buftools = self.buftools
Importer = buftools.Importer
self.assertEqual(proxy.length, exp.len)
imp = Importer(proxy, buftools.PyBUF_RECORDS_RO)
self.assertEqual(imp.readonly, exp.readonly)
self.assertEqual(imp.format, exp.format)
self.assertEqual(imp.itemsize, exp.itemsize)
self.assertEqual(imp.ndim, exp.ndim)
self.assertEqual(imp.shape, exp.shape)
self.assertEqual(imp.strides, exp.strides)
self.assertTrue(imp.suboffsets is None)
@unittest.skipIf(not pygame.HAVE_NEWBUF, "newbuf not implemented")
@unittest.skipIf(IS_PYPY, "pypy2 no likey")
def test_newbuf(self):
from pygame.bufferproxy import BufferProxy
Exporter = self.buftools.Exporter
_shape = [2, 3, 5, 7, 11] # Some prime numbers
for ndim in range(1, len(_shape)):
o = Exporter(_shape[0:ndim], "=h")
v = BufferProxy(o)
self.NEWBUF_assertSame(v, o)
ndim = 2
shape = _shape[0:ndim]
for format in [
"b",
"B",
"=h",
"=H",
"=i",
"=I",
"=q",
"=Q",
"f",
"d",
"1h",
"=1h",
"x",
"1x",
"2x",
"3x",
"4x",
"5x",
"6x",
"7x",
"8x",
"9x",
]:
o = Exporter(shape, format)
v = BufferProxy(o)
self.NEWBUF_assertSame(v, o)
@unittest.skipIf(not pygame.HAVE_NEWBUF, "newbuf not implemented")
def test_bad_format(self):
from pygame.bufferproxy import BufferProxy
from pygame.newbuffer import BufferMixin
from ctypes import create_string_buffer, addressof
buftools = self.buftools
Exporter = buftools.Exporter
Importer = buftools.Importer
PyBUF_FORMAT = buftools.PyBUF_FORMAT
for format in [
"",
"=",
"1",
" ",
"2h",
"=2h",
"0x",
"11x",
"=!",
"h ",
" h",
"hh",
"?",
]:
exp = Exporter((1,), format, itemsize=2)
b = BufferProxy(exp)
self.assertRaises(ValueError, Importer, b, PyBUF_FORMAT)
@unittest.skipIf(not pygame.HAVE_NEWBUF, "newbuf not implemented")
@unittest.skipIf(IS_PYPY, "fails on pypy")
def test_PgDict_AsBuffer_PyBUF_flags(self):
from pygame.bufferproxy import BufferProxy
is_lil_endian = pygame.get_sdl_byteorder() == pygame.LIL_ENDIAN
fsys, frev = ("<", ">") if is_lil_endian else (">", "<")
buftools = self.buftools
Importer = buftools.Importer
a = BufferProxy(
{"typestr": "|u4", "shape": (10, 2), "data": (9, False)}
) # 9? No data accesses.
b = Importer(a, buftools.PyBUF_SIMPLE)
self.assertEqual(b.ndim, 0)
self.assertTrue(b.format is None)
self.assertEqual(b.len, a.length)
self.assertEqual(b.itemsize, 4)
self.assertTrue(b.shape is None)
self.assertTrue(b.strides is None)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, 9)
b = Importer(a, buftools.PyBUF_WRITABLE)
self.assertEqual(b.ndim, 0)
self.assertTrue(b.format is None)
self.assertEqual(b.len, a.length)
self.assertEqual(b.itemsize, 4)
self.assertTrue(b.shape is None)
self.assertTrue(b.strides is None)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, 9)
b = Importer(a, buftools.PyBUF_ND)
self.assertEqual(b.ndim, 2)
self.assertTrue(b.format is None)
self.assertEqual(b.len, a.length)
self.assertEqual(b.itemsize, 4)
self.assertEqual(b.shape, (10, 2))
self.assertTrue(b.strides is None)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, 9)
a = BufferProxy(
{
"typestr": fsys + "i2",
"shape": (5, 10),
"strides": (24, 2),
"data": (42, False),
}
) # 42? No data accesses.
b = Importer(a, buftools.PyBUF_STRIDES)
self.assertEqual(b.ndim, 2)
self.assertTrue(b.format is None)
self.assertEqual(b.len, 100)
self.assertEqual(b.itemsize, 2)
self.assertEqual(b.shape, (5, 10))
self.assertEqual(b.strides, (24, 2))
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, 42)
b = Importer(a, buftools.PyBUF_FULL_RO)
self.assertEqual(b.ndim, 2)
self.assertEqual(b.format, "=h")
self.assertEqual(b.len, 100)
self.assertEqual(b.itemsize, 2)
self.assertEqual(b.shape, (5, 10))
self.assertEqual(b.strides, (24, 2))
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, 42)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_SIMPLE)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ND)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_C_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_F_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ANY_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_CONTIG)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_SIMPLE)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ND)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_C_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_F_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ANY_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_CONTIG)
a = BufferProxy(
{
"typestr": frev + "i2",
"shape": (3, 5, 10),
"strides": (120, 24, 2),
"data": (1000000, True),
}
) # 1000000? No data accesses.
b = Importer(a, buftools.PyBUF_FULL_RO)
self.assertEqual(b.ndim, 3)
self.assertEqual(b.format, frev + "h")
self.assertEqual(b.len, 300)
self.assertEqual(b.itemsize, 2)
self.assertEqual(b.shape, (3, 5, 10))
self.assertEqual(b.strides, (120, 24, 2))
self.assertTrue(b.suboffsets is None)
self.assertTrue(b.readonly)
self.assertEqual(b.buf, 1000000)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_FULL)
@unittest.skipIf(IS_PYPY or (not pygame.HAVE_NEWBUF), "newbuf with ctypes")
def test_PgObject_AsBuffer_PyBUF_flags(self):
from pygame.bufferproxy import BufferProxy
import ctypes
is_lil_endian = pygame.get_sdl_byteorder() == pygame.LIL_ENDIAN
fsys, frev = ("<", ">") if is_lil_endian else (">", "<")
buftools = self.buftools
Importer = buftools.Importer
e = arrinter.Exporter(
(10, 2), typekind="f", itemsize=ctypes.sizeof(ctypes.c_double)
)
a = BufferProxy(e)
b = Importer(a, buftools.PyBUF_SIMPLE)
self.assertEqual(b.ndim, 0)
self.assertTrue(b.format is None)
self.assertEqual(b.len, e.len)
self.assertEqual(b.itemsize, e.itemsize)
self.assertTrue(b.shape is None)
self.assertTrue(b.strides is None)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, e.data)
b = Importer(a, buftools.PyBUF_WRITABLE)
self.assertEqual(b.ndim, 0)
self.assertTrue(b.format is None)
self.assertEqual(b.len, e.len)
self.assertEqual(b.itemsize, e.itemsize)
self.assertTrue(b.shape is None)
self.assertTrue(b.strides is None)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, e.data)
b = Importer(a, buftools.PyBUF_ND)
self.assertEqual(b.ndim, e.nd)
self.assertTrue(b.format is None)
self.assertEqual(b.len, a.length)
self.assertEqual(b.itemsize, e.itemsize)
self.assertEqual(b.shape, e.shape)
self.assertTrue(b.strides is None)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, e.data)
e = arrinter.Exporter((5, 10), typekind="i", itemsize=2, strides=(24, 2))
a = BufferProxy(e)
b = Importer(a, buftools.PyBUF_STRIDES)
self.assertEqual(b.ndim, e.nd)
self.assertTrue(b.format is None)
self.assertEqual(b.len, e.len)
self.assertEqual(b.itemsize, e.itemsize)
self.assertEqual(b.shape, e.shape)
self.assertEqual(b.strides, e.strides)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, e.data)
b = Importer(a, buftools.PyBUF_FULL_RO)
self.assertEqual(b.ndim, e.nd)
self.assertEqual(b.format, "=h")
self.assertEqual(b.len, e.len)
self.assertEqual(b.itemsize, e.itemsize)
self.assertEqual(b.shape, e.shape)
self.assertEqual(b.strides, e.strides)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, e.data)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_SIMPLE)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_WRITABLE)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_WRITABLE)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ND)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_C_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_F_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ANY_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_CONTIG)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_SIMPLE)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ND)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_C_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_F_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ANY_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_CONTIG)
e = arrinter.Exporter(
(3, 5, 10),
typekind="i",
itemsize=2,
strides=(120, 24, 2),
flags=arrinter.PAI_ALIGNED,
)
a = BufferProxy(e)
b = Importer(a, buftools.PyBUF_FULL_RO)
self.assertEqual(b.ndim, e.nd)
self.assertEqual(b.format, frev + "h")
self.assertEqual(b.len, e.len)
self.assertEqual(b.itemsize, e.itemsize)
self.assertEqual(b.shape, e.shape)
self.assertEqual(b.strides, e.strides)
self.assertTrue(b.suboffsets is None)
self.assertTrue(b.readonly)
self.assertEqual(b.buf, e.data)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_FULL)
def test_PgObject_GetBuffer_exception(self):
# For consistency with surfarray
from pygame.bufferproxy import BufferProxy
bp = BufferProxy(1)
self.assertRaises(ValueError, getattr, bp, "length")
def not_init_assertions(self):
self.assertFalse(pygame.get_init(), "pygame shouldn't be initialized")
self.assertFalse(pygame.display.get_init(), "display shouldn't be initialized")
if "pygame.mixer" in sys.modules:
self.assertFalse(pygame.mixer.get_init(), "mixer shouldn't be initialized")
if "pygame.font" in sys.modules:
self.assertFalse(pygame.font.get_init(), "init shouldn't be initialized")
## !!! TODO : Remove when scrap works for OS X
import platform
if platform.system().startswith("Darwin"):
return
try:
self.assertRaises(pygame.error, pygame.scrap.get)
except NotImplementedError:
# Scrap is optional.
pass
# pygame.cdrom
# pygame.joystick
def init_assertions(self):
self.assertTrue(pygame.get_init())
self.assertTrue(pygame.display.get_init())
if "pygame.mixer" in sys.modules:
self.assertTrue(pygame.mixer.get_init())
if "pygame.font" in sys.modules:
self.assertTrue(pygame.font.get_init())
def test_quit__and_init(self):
# __doc__ (as of 2008-06-25) for pygame.base.quit:
# pygame.quit(): return None
# uninitialize all pygame modules
# Make sure everything is not init
self.not_init_assertions()
# Initiate it
pygame.init()
# Check
self.init_assertions()
# Quit
pygame.quit()
# All modules have quit
self.not_init_assertions()
def test_register_quit(self):
"""Ensure that a registered function is called on quit()"""
self.assertFalse(quit_hook_ran)
pygame.init()
pygame.register_quit(quit_hook)
pygame.quit()
self.assertTrue(quit_hook_ran)
def test_get_error(self):
# __doc__ (as of 2008-08-02) for pygame.base.get_error:
# pygame.get_error(): return errorstr
# get the current error message
#
# SDL maintains an internal error message. This message will usually
# be given to you when pygame.error is raised. You will rarely need to
# call this function.
#
# The first error could be all sorts of nonsense or empty.
e = pygame.get_error()
pygame.set_error("hi")
self.assertEqual(pygame.get_error(), "hi")
pygame.set_error("")
self.assertEqual(pygame.get_error(), "")
def test_set_error(self):
# The first error could be all sorts of nonsense or empty.
e = pygame.get_error()
pygame.set_error("hi")
self.assertEqual(pygame.get_error(), "hi")
pygame.set_error("")
self.assertEqual(pygame.get_error(), "")
def test_unicode_error(self):
if sys.version_info.major > 2:
pygame.set_error(u"你好")
self.assertEqual(u"你好", pygame.get_error())
else:
# no unicode objects for now
pygame.set_error(u"你好")
encstr = u"你好".encode("utf8")
self.assertEqual(encstr, pygame.get_error())
def test_init(self):
"""Ensures init() works properly."""
# Make sure nothing initialized.
self.not_init_assertions()
# The exact number of modules can change, but it should never be < 0.
expected_min_passes = 0
# The __PYGAMEinit__ function in this module returns False, so this
# should give a fail count of 1. All other modules should pass.
expected_fails = 1
passes, fails = pygame.init()
self.init_assertions()
self.assertGreaterEqual(passes, expected_min_passes)
self.assertEqual(fails, expected_fails)
def test_get_init(self):
# Test if get_init() gets the init state.
self.assertFalse(pygame.get_init())
def test_get_init__after_init(self):
# Test if get_init() gets the init state after pygame.init() called.
pygame.init()
self.assertTrue(pygame.get_init())
def test_get_init__after_quit(self):
# Test if get_init() gets the init state after pygame.quit() called.
pygame.init()
pygame.quit()
self.assertFalse(pygame.get_init())
def todo_test_segfault(self):
# __doc__ (as of 2008-08-02) for pygame.base.segfault:
# crash
self.fail()
if __name__ == "__main__":
unittest.main()
|
py | 1a52cd42073d8be6e7b47f3bb3fab5f025439d5f | #!/usr/bin/env python3
from Bio import SeqIO
def count(fasta):
"""Counts sequences in an open FASTA file handle.
Iterates file and counts header lines. Then, seeks to start
of the file and returns the count.
Parameters:
fasta (file pointer): An open file handle corresponding to a FASTA file.
Returns:
count (int): Total number of sequences in the file.
"""
count = 0
for line in fasta:
if line.startswith(">"):
count += 1
fasta.seek(0)
return count
def wrap(sequence, limit=80):
"""Wraps sequences to `limit` characters per line.
Parameters:
sequence (str): Sequence to be wrapped.
limit (int): Total characters per line.
Returns:
(str): Sequence wrapped to maximum `limit` characters per line.
"""
return "\n".join(sequence[i: i + limit] for i in range(0, len(sequence), limit))
def create(header, sequence, limit=80):
"""Creates a FASTA format string from a header and sequence.
For example:
>>> fasta = create_fasta('header', 'AAAAABBBBBCCCCC', wrap=5)
>>> print(fasta)
>header
AAAAA
BBBBB
CCCCC
Parameters:
header (str): Name to use in FASTA definition line (i.e. >header).
sequence (str): The sequence corresponding to the `header`.
limit (int): Total characters per line before sequence is wrapped.
Returns:
(str): FASTA format string.
"""
return ">{}\n{}".format(header, wrap(sequence, limit=limit))
def parse(handle):
return {
record.name: str(record.seq)
for record in SeqIO.parse(handle, 'fasta')
}
|
py | 1a52ce684c838019498a7d86ce2a9e0a026f98f8 | from datetime import datetime, timedelta
import fundamental_analysis.financial_statements_entries as fi
import fundamental_analysis.supporting_metrics as me
from fundamental_analysis.metrics_helpers import FundamentalMetricsHelpers
from matilda.quantitative_analysis.risk_factor_modeling import asset_pricing_model
import macroeconomic_analysis.macroeconomic_analysis as macro
import numpy as np
from functools import partial
def cost_of_preferred_stock(stock, date=datetime.now(), lookback_period=timedelta(days=0), period: str = 'FY'):
preferred_dividends = fi.preferred_dividends(stock=stock, date=date, lookback_period=lookback_period, period=period)
market_price_of_preferred = fi.preferred_stock_value(stock=stock, date=date, lookback_period=lookback_period,
period=period)
return preferred_dividends / market_price_of_preferred
def cost_of_debt(stock, date=datetime.now(), lookback_period=timedelta(days=0), period: str = 'FY'):
interest_rate = fi.interest_expense(stock=stock, date=date, lookback_period=lookback_period,
period=period) / fi.total_long_term_debt(stock=stock, date=date,
lookback_period=lookback_period,
period=period)
tax_rate = fi.income_tax_expense(stock=stock, date=date, lookback_period=lookback_period,
period=period) / me.earnings_before_taxes(stock=stock, date=date,
lookback_period=lookback_period,
period=period)
return abs(interest_rate * (1 - tax_rate))
def cost_of_equity_capm(stock: str, from_date: datetime = datetime.now() - timedelta(days=365 * 5),
to_date: datetime = datetime.now(),
beta_period='Monthly',
benchmark: str = '^GSPC'):
beta = required_rr.asset_pricing_wrapper(model='CAPM', portfolio=stock, benchmark=benchmark, period=beta_period,
from_date=from_date, to_date=to_date).params[1]
risk_free_rate = macro.cumulative_risk_free_rate(from_date=to_date - timedelta(days=365), to_date=to_date)
risk_premium = macro.cumulative_market_premium(from_date=to_date - timedelta(days=365), to_date=to_date)
return risk_free_rate + beta * risk_premium
def cost_of_equity_ddm(stock, date=datetime.now(), lookback_period=timedelta(days=0), period: str = 'FY',
diluted_shares=True):
stock_price = me.market_price(stock=stock, date=date, lookback_period=lookback_period)
this_period_dividend = me.dividend_per_share(stock=stock, date=date, lookback_period=lookback_period, period=period,
diluted_shares=diluted_shares)
growth_rate = FundamentalMetricsHelpers(metric=partial(me.dividend_per_share), stock=stock, date=date) \
.metric_growth_rate(lookback_period=lookback_period, period=period)
next_period_dividend = this_period_dividend * (1 + growth_rate)
return (next_period_dividend / stock_price) + growth_rate
"""
Summary: Calculate the cost of equity for WACC using the Bond yield plus risk premium method.
PARA bond_yield: The company's interest rate on long-term debt.
PARA risk_premium: The company's risk premium usually 3% to 5%.
"""
def cost_of_equity_byprp(bond_yield: float, risk_premium: float):
return bond_yield + risk_premium
def weighted_average_cost_of_capital(stock, date=datetime.now(), lookback_period: timedelta = timedelta(days=0),
lookback_lookback_period: timedelta = timedelta(days=365 * 5),
period: str = 'FY', beta_period='Monthly', benchmark: str = '^GSPC'):
from_date = date - lookback_period - lookback_lookback_period
to_date = date - lookback_period
dictio = {'Common Equity': (cost_of_equity_capm(stock=stock, from_date=from_date, to_date=to_date,
beta_period=beta_period,
benchmark=benchmark),
fi.total_shareholders_equity(stock=stock, date=date, lookback_period=lookback_period,
period=period)),
'Preferred Equity': (
cost_of_preferred_stock(stock=stock, date=date, lookback_period=lookback_period, period=period),
fi.preferred_stock_value(stock=stock, date=date, lookback_period=lookback_period,
period=period)),
'Debt': (cost_of_debt(stock=stock, date=date, lookback_period=lookback_period, period=period),
fi.total_long_term_debt(stock=stock, date=date, lookback_period=lookback_period, period=period))}
capitals = [np.nan_to_num(v[1]) for k, v in dictio.items()]
weights = [part / sum(capitals) for part in capitals]
costs = [np.nan_to_num(v[0]) for k, v in dictio.items()]
return np.sum([weight * cost for weight, cost in zip(weights, costs)])
if __name__ == '__main__':
print(weighted_average_cost_of_capital('AAPL'))
|
py | 1a52cee932043c5ba1860b0d1e0e65bf312c009d | # coding: utf-8
# %% [markdown]
# # 📃 Solution for Exercise M2.01
#
# The aim of this exercise is to make the following experiments:
#
# * train and test a support vector machine classifier through
# cross-validation;
# * study the effect of the parameter gamma of this classifier using a
# validation curve;
# * use a learning curve to determine the usefulness of adding new
# samples in the dataset when building a classifier.
#
# To make these experiments we will first load the blood transfusion dataset.
# %% [markdown]
# ```{note}
# If you want a deeper overview regarding this dataset, you can refer to the
# Appendix - Datasets description section at the end of this MOOC.
# ```
# %%
import pandas as pd
blood_transfusion = pd.read_csv("../datasets/blood_transfusion.csv")
data = blood_transfusion.drop(columns="Class")
target = blood_transfusion["Class"]
# %% [markdown]
# We will use a support vector machine classifier (SVM). In its most simple
# form, a SVM classifier is a linear classifier behaving similarly to a
# logistic regression. Indeed, the optimization used to find the optimal
# weights of the linear model are different but we don't need to know these
# details for the exercise.
#
# Also, this classifier can become more flexible/expressive by using a
# so-called kernel that makes the model become non-linear. Again, no requirement
# regarding the mathematics is required to accomplish this exercise.
#
# We will use an RBF kernel where a parameter `gamma` allows to tune the
# flexibility of the model.
#
# First let's create a predictive pipeline made of:
#
# * a [`sklearn.preprocessing.StandardScaler`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html)
# with default parameter;
# * a [`sklearn.svm.SVC`](https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html)
# where the parameter `kernel` could be set to `"rbf"`. Note that this is the
# default.
# %%
# solution
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
model = make_pipeline(StandardScaler(), SVC())
# %% [markdown]
# Evaluate the generalization performance of your model by cross-validation with a
# `ShuffleSplit` scheme. Thus, you can use
# [`sklearn.model_selection.cross_validate`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_validate.html)
# and pass a [`sklearn.model_selection.ShuffleSplit`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.ShuffleSplit.html)
# to the `cv` parameter. Only fix the `random_state=0` in the `ShuffleSplit`
# and let the other parameters to the default.
# %%
# solution
from sklearn.model_selection import cross_validate, ShuffleSplit
cv = ShuffleSplit(random_state=0)
cv_results = cross_validate(model, data, target, cv=cv, n_jobs=2)
cv_results = pd.DataFrame(cv_results)
cv_results
# %% tags=["solution"]
print(
f"Accuracy score of our model:\n"
f"{cv_results['test_score'].mean():.3f} +/- "
f"{cv_results['test_score'].std():.3f}"
)
# %% [markdown]
# As previously mentioned, the parameter `gamma` is one of the parameters
# controlling under/over-fitting in support vector machine with an RBF kernel.
#
# Evaluate the effect of the parameter `gamma` by using the
# [`sklearn.model_selection.validation_curve`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.validation_curve.html) function.
# You can leave the default `scoring=None` which is equivalent to
# `scoring="accuracy"` for classification problems. You can vary `gamma`
# between `10e-3` and `10e2` by generating samples on a logarithmic scale
# with the help of `np.logspace(-3, 2, num=30)`.
#
# Since we are manipulating a `Pipeline` the parameter name will be set to
# `svc__gamma` instead of only `gamma`. You can retrieve the parameter name
# using `model.get_params().keys()`. We will go more into detail regarding
# accessing and setting hyperparameter in the next section.
# %%
# solution
import numpy as np
from sklearn.model_selection import validation_curve
gammas = np.logspace(-3, 2, num=30)
param_name = "svc__gamma"
train_scores, test_scores = validation_curve(
model, data, target, param_name=param_name, param_range=gammas, cv=cv,
n_jobs=2)
# %% [markdown]
# Plot the validation curve for the train and test scores.
# %%
# solution
import matplotlib.pyplot as plt
plt.errorbar(gammas, train_scores.mean(axis=1),
yerr=train_scores.std(axis=1), label='Training score')
plt.errorbar(gammas, test_scores.mean(axis=1),
yerr=test_scores.std(axis=1), label='Testing score')
plt.legend()
plt.xscale("log")
plt.xlabel(r"Value of hyperparameter $\gamma$")
plt.ylabel("Accuracy score")
_ = plt.title("Validation score of support vector machine")
# %% [markdown] tags=["solution"]
# Looking at the curve, we can clearly identify the over-fitting regime of
# the SVC classifier when `gamma > 1`.
# The best setting is around `gamma = 1` while for `gamma < 1`,
# it is not very clear if the classifier is under-fitting but the
# testing score is worse than for `gamma = 1`.
# %% [markdown]
# Now, you can perform an analysis to check whether adding new samples to the
# dataset could help our model to better generalize. Compute the learning curve
# (using [`sklearn.model_selection.learning_curve`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.learning_curve.html))
# by computing the train and test scores for different training dataset size.
# Plot the train and test scores with respect to the number of samples.
# %%
# solution
from sklearn.model_selection import learning_curve
train_sizes = np.linspace(0.1, 1, num=10)
results = learning_curve(
model, data, target, train_sizes=train_sizes, cv=cv, n_jobs=2)
train_size, train_scores, test_scores = results[:3]
# %% tags=["solution"]
plt.errorbar(train_size, train_scores.mean(axis=1),
yerr=train_scores.std(axis=1), label='Training score')
plt.errorbar(train_size, test_scores.mean(axis=1),
yerr=test_scores.std(axis=1), label='Testing score')
plt.legend(bbox_to_anchor=(1.05, 0.8), loc="upper left")
plt.xlabel("Number of samples in the training set")
plt.ylabel("Accuracy")
_ = plt.title("Learning curve for support vector machine")
# %% [markdown] tags=["solution"]
# We observe that adding new samples in the dataset does not improve the
# testing score. We can only conclude that the standard deviation of
# the training error is decreasing when adding more samples which is not a
# surprise.
|
py | 1a52cf901acd959109a4c9d0b3e4b7bf5f65da7b | #!/usr/bin/env python3
import ssl
from pwncat.channel import ChannelError
from pwncat.channel.connect import Connect
class SSLConnect(Connect):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _socket_connected(self, client):
try:
self.context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.context.check_hostname = False
self.context.verify_mode = ssl.VerifyMode.CERT_NONE
client = self.context.wrap_socket(client)
except ssl.SSLError as exc:
raise ChannelError(self, str(exc))
super()._socket_connected(client)
|
py | 1a52d02d68becd4398cf147be06145bd2c118528 | import logging
def derive_initial_state_model(max_repeats,
num_symbols,
max_extra_states=15,
start_symbol=1000,
end_symbol=1001,
num_random_starts=20
):
"""derives initial state model using Expectation-Maximization (E-M) algorithm
Parameters
----------
max_extra_states : int
Maximum number of states to allow for each symbol.
Default is 15.
start_symbol : int
Numerical symbol for the start state. Default is 1000.
end_symbol : int
Numerical symbol for the end state. Default is 1001.
max_repeats : dict
where each key is a symbol and the corresponding value is
the maximum number of consecutive repeats of that symbol
found in any of the sequences
num_symbols : int
number of unique symbols, i.e., len(symbols)
num_random_starts : int
Number of random starts to derive the best model. Default is 20.
Returns
-------
"""
for num_extra_states in range(1, max_extra_states+1):
logging.info(f'Trying model with {num_extra_states} extra_states.')
state_symbols = [start_symbol, end_symbol]
max_repeat_nums = [0, 0]
for symbol in range(0,num_symbols):
number_states = 1 + num_extra_states
state_symbols.extend([symbol] * number_states)
max_repeat_nums.extend([max_repeats[symbol]] * number_states)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.