id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
21469 | '''
Preorder Binary Tree
For a given Binary Tree of integers, print the pre-order traversal.
Input Format:
The first and the only line of input will contain the nodes data, all separated by a single space. Since -1 is used as an indication whether the left or right node data exist for root, it will not be a part of the node data.
Output Format:
The only line of output prints the pre-order traversal of the given binary tree.
Constraints:
1 <= N <= 10^6
Where N is the total number of nodes in the binary tree.
Time Limit: 1 sec
Sample Input 1:
5 6 10 2 3 -1 -1 -1 -1 -1 9 -1 -1
Sample Ouptut 1:
5 6 2 3 9 10
Sample Input 2:
1 2 3 4 5 6 7 -1 -1 -1 -1 -1 -1 -1 -1
Sample Ouptut 2:
1 2 4 5 3 6 7
'''
from sys import stdin, setrecursionlimit
import queue
setrecursionlimit(10 ** 6)
#Following the structure used for Binary Tree
class BinaryTreeNode:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def preOrder(root):
#Your code goes here
if root is None:
return
print(root.data, end=' ')
preOrder(root.left)
preOrder(root.right)
#Taking level-order input using fast I/O method
def takeInput():
levelOrder = list(map(int, stdin.readline().strip().split(" ")))
start = 0
length = len(levelOrder)
root = BinaryTreeNode(levelOrder[start])
start += 1
q = queue.Queue()
q.put(root)
while not q.empty():
currentNode = q.get()
leftChild = levelOrder[start]
start += 1
if leftChild != -1:
leftNode = BinaryTreeNode(leftChild)
currentNode.left =leftNode
q.put(leftNode)
rightChild = levelOrder[start]
start += 1
if rightChild != -1:
rightNode = BinaryTreeNode(rightChild)
currentNode.right =rightNode
q.put(rightNode)
return root
# Main
root = takeInput()
preOrder(root) | StarcoderdataPython |
1666686 | <filename>exercicios.py/ex053.py<gh_stars>1-10
frase = str(input('Digite uma frase: ')).strip()
# fc = len(frase) - frase.count(' ')
fs = frase.replace(' ','')
cont = 0
for c in range (0, len(fs)):
if fs [c] == fs [-c-1]:
cont += 1
if cont == fs:
print('O inverso de {} é {}'.format(frase, frase[::-1]))
print('Temos um PALINDROMO')
else:
print('O inverso de {} é {}'.format(frase, frase[::-1]))
print('A frase digitada não é um PALINDROMO')
| StarcoderdataPython |
4811262 | <reponame>t27/adversarial-detection<gh_stars>0
import torch
import torch.nn as nn
class SkipBlock2(nn.Module):
"""Skip block from resnet paper(Figure2) with additional 1x1 conv on input(to "select" and weight good channels)
Args:
nn ([type]): [description]
"""
def __init__(
self, in_channels, out_channels,
):
super().__init__()
self.block1 = nn.Sequential(
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=1,
padding=1,
),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
self.block2 = nn.Sequential(
nn.Conv2d(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=3,
stride=1,
padding=1,
),
nn.BatchNorm2d(out_channels),
)
self.input_conv = nn.Conv2d(
in_channels=out_channels, out_channels=out_channels, kernel_size=1, stride=1
)
self.final_activation = nn.Sequential(nn.ReLU())
def forward(self, x):
x = self.block1(x)
# since our kernels are 3x3, stride1 and pad=1, the size of the feature map should be the same
x = self.block2(x) + self.input_conv(x)
x = self.final_activation(x)
return x
class BaseModel(nn.Module):
def __init__(self, num_classes=10):
super().__init__()
# fmt: off
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1),nn.BatchNorm2d(16),nn.ReLU()
)
self.conv2 = nn.Sequential(
SkipBlock2(16,16),
SkipBlock2(16,16),
SkipBlock2(16,16),
SkipBlock2(16,16),
SkipBlock2(16,16),
# nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),nn.BatchNorm2d(128),nn.ReLU(),
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=2, padding=1),nn.BatchNorm2d(32),nn.ReLU(),
SkipBlock2(32,32),
SkipBlock2(32,32),
SkipBlock2(32,32),
SkipBlock2(32,32),
SkipBlock2(32,32),
# nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),nn.BatchNorm2d(128),nn.ReLU(),
)
self.conv4 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=2, padding=1),nn.BatchNorm2d(64),nn.ReLU(),
SkipBlock2(64,64),
SkipBlock2(64,64),
SkipBlock2(64,64),
SkipBlock2(64,64),
SkipBlock2(64,64),
# nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),nn.BatchNorm2d(128),nn.ReLU(),
)
self.gap = nn.AvgPool2d(kernel_size=8)
self.linear = nn.Linear(64,num_classes)
# self.convlayers3 = nn.Sequential(
# SkipBlock2(128,128),
# SkipBlock2(128,128),
# nn.MaxPool2d(2,2),
# nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1),nn.BatchNorm2d(256),nn.ReLU(),
# SkipBlock2(256,256),
# SkipBlock2(256,256),
# nn.Dropout2d(0.2),
# nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=2, padding=1),nn.BatchNorm2d(256),nn.ReLU(),
# nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=2, padding=1),nn.BatchNorm2d(256),nn.ReLU(),
# nn.MaxPool2d(2,2)
# )
# fmt: on
def forward(self, x, layer_outputs=False):
c1 = self.conv1(x)
c2 = self.conv2(c1)
c3 = self.conv3(c2)
c4 = self.conv4(c3)
gap = self.gap(c4)
gap = torch.flatten(gap, start_dim=1)
out = self.linear(gap)
if layer_outputs:
return out, c1, c2, c3, c4
else:
return out
if __name__ == "__main__":
model = BaseModel()
im = torch.ones((1, 3, 32, 32))
breakpoint()
model(im)
# # or
# outval, l1,l2 = model(img,True)
# AD1 = AD(1,)
# out = AD1(l1)
# loss = criterion(out,target)
| StarcoderdataPython |
3285190 | <gh_stars>10-100
""" Input functions """
from .read_nc_emodnet import read_nc_emodnet
from .read_nc import read_nc
from .read_nc_ooi import read_nc_ooi
from .read_nc_imos import read_nc_imos
from .read_pkl import read_pkl
from .read_json import read_json
from .read_nc_moist import read_nc_moist
from .from_emso import from_emso
from .from_erddap import from_erddap
from .read_df import read_df
from .read_dat_td_pati import read_dat_td_pati
| StarcoderdataPython |
1747211 | <filename>twitter_api_v2/TwitterAPI.py
import json
import logging
from logging import Logger
from typing import Dict, List, Optional
import requests
from requests.models import Response
from twitter_api_v2 import Media, Poll, Tweet, User
logger: Logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class TwitterAPI:
def __init__(self, bearer_token: str) -> None:
self.__BEARER_TOKEN: str = bearer_token
self.__REQUEST_HEADERS: Dict = {
"Authorization": f"Bearer {self.__BEARER_TOKEN}"
}
self.__API_URL: str = "https://api.twitter.com/2"
def get_tweet(
self,
id: str,
expansions: List[Tweet.Expantion] = [],
tweet_fields: List[Tweet.Field] = [],
media_fields: List[Media.Field] = [],
poll_fields: List[Poll.Field] = [],
) -> Tweet.Tweet:
params: Optional[Dict[str, str]] = self._make_params(
expansions, tweet_fields, media_fields, poll_fields
)
logger.debug(params)
response: Response = requests.get(
url=f"{self.__API_URL}/tweets/{id}",
params=params,
headers=self.__REQUEST_HEADERS,
)
if response.status_code != 200:
raise Exception(
f"Request returned an error: {response.status_code} {response.text}"
)
res_json = json.loads(response.text)
logger.debug(res_json)
if "includes" in res_json.keys():
return Tweet.Tweet(**res_json["data"], **res_json["includes"])
else:
return Tweet.Tweet(**res_json["data"])
def get_user_by_id(self, id: str, user_fields: List[User.Field] = []) -> User.User:
params: Optional[Dict[str, str]] = None
if user_fields:
params = {}
params["user.fields"] = ",".join(list(map(str, user_fields)))
response: Response = requests.get(
f"{self.__API_URL}/users/{id}",
params=params,
headers=self.__REQUEST_HEADERS,
)
if response.status_code != 200:
raise Exception(
f"Request returned an error: {response.status_code} {response.text}"
)
res_json = json.loads(response.text)
logger.debug(res_json)
return User.User(**res_json["data"])
def get_user_by_username(
self, username: str, user_fields: List[User.Field] = []
) -> User.User:
params: Optional[Dict[str, str]] = None
if user_fields:
params = {}
params["user.fields"] = ",".join(list(map(str, user_fields)))
response: Response = requests.get(
f"{self.__API_URL}/users/by/username/{username}",
params=params,
headers=self.__REQUEST_HEADERS,
)
if response.status_code != 200:
raise Exception(
f"Request returned an error: {response.status_code} {response.text}"
)
res_json = json.loads(response.text)
logger.debug(res_json)
return User.User(**res_json["data"])
def _make_params(
self,
expansions: List[Tweet.Expantion],
tweet_fields: List[Tweet.Field],
media_fields: List[Media.Field],
poll_fields: List[Poll.Field],
) -> Optional[Dict[str, str]]:
if (
(not expansions)
and (not tweet_fields)
and (not media_fields)
and (not poll_fields)
):
return None
params: Dict[str, str] = {}
if expansions:
params["expansions"] = ",".join(list(map(str, expansions)))
if tweet_fields:
params["tweet.fields"] = ",".join(list(map(str, tweet_fields)))
if media_fields:
params["media.fields"] = ",".join(list(map(str, media_fields)))
if poll_fields:
params["poll.fields"] = ",".join(list(map(str, poll_fields)))
return params
| StarcoderdataPython |
1686583 | '''
#!/usr/bin/python
#Author: <NAME>
#Version: 2.0
#Date: 27th March 2014
does the ceaser decryption of the output of ceaser.py, the cipher and shift are taken
from the user and the plain text is printed on the console
'''
import string
cipher=raw_input('enter cipher')
key=raw_input('enter key')
letters = string.ascii_letters
dig = string.digits
for i in range(0,len(cipher)):
if dig.find(cipher[i])!=-1:
print "invalid input"
exit()
for i in range(0,len(cipher)):
a = letters.find(cipher[i])
if a!=-1:
print str(letters[(a-key)%26]),
| StarcoderdataPython |
3210984 | <filename>tests/serializers.py
from drf_toolbox.compat import django_pgfields_installed
from drf_toolbox.serializers import ModelSerializer
from tests import models as test_models
class ExplicitAPIEndpointsSerializer(ModelSerializer):
class Meta:
model = test_models.ExplicitAPIEndpointsModel
class NormalSerializer(ModelSerializer):
class Meta:
model = test_models.NormalModel
class ChildSerializer(ModelSerializer):
class Meta:
model = test_models.ChildModel
class ChildSerializerII(ModelSerializer):
class Meta:
model = test_models.ChildModel
fields = {
'normal': ('id', 'bacon'),
}
exclude = {}
class ChildSerializerIII(ModelSerializer):
class Meta:
model = test_models.ChildModel
exclude = ('normal',)
class ReverseSerializer(ModelSerializer):
class Meta:
fields = ('bar', 'baz', 'bacon', 'related_model')
model = test_models.NormalModel
class CreatedSerializer(ModelSerializer):
class Meta:
fields = ('created',)
model = test_models.CreatedModel
if django_pgfields_installed:
class PGFieldsSerializer(ModelSerializer):
class Meta:
model = test_models.PGFieldsModel
| StarcoderdataPython |
3359412 | from __future__ import division
import os, scipy.io
from test_Sony import toimage
import tensorflow.compat.v1 as tf
from test_Sony import network
from d2s_numpy import depth_to_space
tf.disable_v2_behavior()
import numpy as np
output_filepath = "./qemu_output.data"
processed_output_filepath = "./qemu_output.png"
output_shape = (1, 2128, 2128, 12)
output_data = np.fromfile(output_filepath, dtype=np.float32).reshape(output_shape)
output_data = depth_to_space(output_data, 2)
# np.save("test_output_rawdata_nopost_{}".format(dtype), output_data[0, :, :, :])
output_data = np.minimum(np.maximum(output_data, 0), 1) # Fits values between 0 and 1
# if "float" not in dtype or "bfloat" in dtype:
# output_data = output_data / (1/np.max) # map to the maximum
output = output_data[0, :, :, :]
# np.save("test_output_rawdata_withpost_{}".format(dtype), output_data[0, :, : , :])
# print(max(output))
toimage(output * 255, high=255, low=0, cmin=0, cmax=255).save(processed_output_filepath) | StarcoderdataPython |
3334435 | # Generated by Django 3.1.2 on 2021-04-27 12:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sis', '0004_auto_20210427_1546'),
]
operations = [
migrations.CreateModel(
name='ClassLevel',
fields=[
('name', models.CharField(blank=True, max_length=15, null=True, verbose_name='Class Name')),
('id', models.IntegerField(primary_key=True, serialize=False, unique=True, verbose_name='Grade Number')),
('shortname', models.CharField(blank=True, max_length=15, null=True)),
],
options={
'ordering': ('id',),
},
),
migrations.AddField(
model_name='student',
name='class_level',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sis.classlevel', verbose_name='Class level'),
),
]
| StarcoderdataPython |
1615585 | import unittest
from pynecone import Shell, Cmd
class TestCmd(Cmd):
def __init__(self):
super().__init__('cmd')
def add_arguments(self, parser):
parser.add_argument('arg')
def get_help(self):
return 'test cmd'
def run(self, args):
return args.arg
class TestShell(Shell):
def __init__(self):
super().__init__('shell')
def get_commands(self):
return [TestCmd()]
def add_arguments(self, parser):
pass
def get_help(self):
return 'test shell'
class TestRootShell(Shell):
def __init__(self):
super().__init__('rootshell')
def get_commands(self):
return [TestShell()]
def add_arguments(self, parser):
pass
def get_help(self):
return 'test root shell'
class TestSuperRootShell(Shell):
def __init__(self):
super().__init__('superrootshell')
def get_commands(self):
return [TestRootShell()]
def add_arguments(self, parser):
pass
def get_help(self):
return 'test superroot shell'
class ShellTestCase(unittest.TestCase):
def test_command_should_return_value(self):
self.assertEqual(TestCmd()(['hello']), 'hello')
def test_shell_should_return_value(self):
self.assertEqual(TestShell()(['cmd', 'hello']), 'hello')
def test_root_shell_should_return_value(self):
self.assertEqual(TestRootShell()(['shell', 'cmd', 'hello']), 'hello')
def test_superroot_shell_should_return_value(self):
self.assertEqual(TestSuperRootShell()(['rootshell', 'shell', 'cmd', 'hello']), 'hello')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4825144 | <reponame>007sya/Project2021<filename>src/PythonLib/lib/utils/download_data/datasets.py
# %%
import os
from typing import List
import pandas as pd
from utils.download_data import data_dtypes as dtypes
from utils.download_data import download_safegraph_data
from utils.file_utils import file_type
from utils.path_utils import path_utils, paths
def get_brand_info_dataset():
for f in path_utils.list_files_recursively(paths.brand_info):
df = pd.read_csv(f)
return df
def get_core_poi_by_city(city, region=None, save_data=True):
file_name = os.path.join(paths.processed_datasets,
city,
"core_poi.csv")
if os.path.isfile(file_name):
return pd.read_csv(file_name, encoding="utf-8")
chunks = []
for f in path_utils.list_files_recursively(paths.core_poi):
for chunk in pd.read_csv(f, chunksize=10_000):
city = "Houston"
region = "TX"
chunk = chunk[chunk["city"] == city]
if region != None:
chunk = chunk[chunk["region"] == region]
chunks.append(chunk)
final_df = pd.concat(chunks)
if save_data:
path_utils.create_dir_if_necessary(file_name)
final_df.to_csv(file_name, encoding="utf-8", index=False)
return final_df
def filter_census_df(path: str, columns: List[str], cbgs: List[str]):
""" Filters census .csv given the columns and the cbgs
"""
dfs = []
# read the file per parts because it is 1gb large
for chunk in pd.read_csv(path,
encoding="utf-8",
chunksize=10000,
dtype=dtypes.census_dtypes):
chunk["census_block_group"] = (chunk["census_block_group"]
.astype(int).astype(str))
chunk = chunk[chunk["census_block_group"].isin(cbgs)]
chunk = chunk[columns]
dfs.append(chunk.copy())
# concat the filtered chunks
return pd.concat(dfs)
def get_census_metadata():
files = [f for f in path_utils.list_files_recursively(paths.open_census_dir)
if file_type.is_census_metadata(f)]
for file in files:
if "description" in file:
break
return pd.read_csv(file)
def get_lastest_home_pannel_summary(cbgs=None, donwload_most_recent=True):
if donwload_most_recent:
path = download_safegraph_data.download_lastest_home_pannel_summary()
if cbgs is not None:
return filter_census_df(path, ["census_block_group",
"number_devices_residing"],
cbgs)
return pd.read_csv(path, encoding="utf-8")
# %%
| StarcoderdataPython |
20584 | <gh_stars>1-10
#! /usr/bin/env python
from pylib import *
CopyConfigForDistribution(InstallRoot)
| StarcoderdataPython |
1732911 | """
constance file, generate all constance values from json file
"""
from NBprocessing.src._constance_dict import data
class Const(object):
def __init__(self):
# check input file
self.CHECK_DATABASE_INPUT = data["CHECK_INPUT"]["CHECK_DATABASE_INPUT"]
self.CHECK_COLUMN_NAME = data["CHECK_INPUT"]["CHECK_COLUMN_NAME"]
self.CHECK_COLUMN_IN_DATABASE = data["CHECK_INPUT"]["CHECK_COLUMN_IN_DATABASE"]
self.CHECK_LIST_TUPLE_NONE = data["CHECK_INPUT"]["CHECK_LIST_TUPLE_NONE"]
self.CHECK_THRESHOLD = data["CHECK_INPUT"]["CHECK_THRESHOLD"]
self.CHECK_TYPE_DATE_TIME = data["CHECK_INPUT"]["CHECK_TYPE_DATE_TIME"]
self.CHECK_BOUNDARIES = data["CHECK_INPUT"]["CHECK_BOUNDARIES"]
self.CHECK_DICT = data["CHECK_INPUT"]["CHECK_DICT"]
self.CHECK_NUM_CATEGORIES = data["CHECK_INPUT"]["CHECK_NUM_CATEGORIES"]
self.CHECK_TITLE = data["CHECK_INPUT"]["CHECK_TITLE"]
# categorical
self.RED = data["CATEGORICAL"]["GENERAL_FUNCTIONS_CATEGORICAL"]["RED"]
self.BLACK = data["CATEGORICAL"]["GENERAL_FUNCTIONS_CATEGORICAL"]["BLACK"]
self.OUTPUT = data["CATEGORICAL"]["GENERAL_FUNCTIONS_CATEGORICAL"]["OUTPUT"]
self.USER_INPUT = data["CATEGORICAL"]["NBCATEGORICAL_CLASS"]["REMOVE_CATEGORIES"]["USER_INPUT"]
self.DATABASE_SHAPE = data["CATEGORICAL"]["NBCATEGORICAL_CLASS"]["REMOVE_CATEGORIES"]["DATABASE_SHAPE"]
self.FIRST = data["CATEGORICAL"]["NBCATEGORICAL_CLASS"]["CATEGORIES_NOT_IN_COMMON"]["FIRST"]
self.SECOND = data["CATEGORICAL"]["NBCATEGORICAL_CLASS"]["CATEGORIES_NOT_IN_COMMON"]["SECOND"]
# continues
self.DROP_ROW = data["CONTINUOUS"]["NBCONTINUOUS_CLASS"]["REMOVE_OUTLIERS_BY_BOUNDARIES"]["DROP_ROW"]
self.TYPE_ERROR = data["CONTINUOUS"]["NBCONTINUOUS_CLASS"]["REMOVE_OUTLIERS_BY_BOUNDARIES"]["TYPE_ERROR"]
self.OUTLIERS_ABOVE = data["CONTINUOUS"]["NBCONTINUOUS_CLASS"]["GET_NUM_OUTLIERS_BY_VALUE"]["OUTLIERS_ABOVE"]
self.SUM_OUTLIERS_ABOVE = data["CONTINUOUS"]["NBCONTINUOUS_CLASS"]["GET_NUM_OUTLIERS_BY_VALUE"] \
["SUM_OUTLIERS_ABOVE"]
self.OUTLIERS_UNDER = data["CONTINUOUS"]["NBCONTINUOUS_CLASS"]["GET_NUM_OUTLIERS_BY_VALUE"] \
["OUTLIERS_UNDER"]
self.SUM_OUTLIERS_UNDER = data["CONTINUOUS"]["NBCONTINUOUS_CLASS"]["GET_NUM_OUTLIERS_BY_VALUE"] \
["SUM_OUTLIERS_UNDER"]
self.SUM_OUTLIERS_TOT = data["CONTINUOUS"]["NBCONTINUOUS_CLASS"]["GET_NUM_OUTLIERS_BY_VALUE"] \
["SUM_OUTLIERS_TOT"]
self.KEY_ERROR = data["CONTINUOUS"]["NBCONTINUOUS_CLASS"]["GET_NUM_OUTLIERS_BY_VALUE"]["KEY_ERROR"]
self.SHAPE_BEFORE = data["CONTINUOUS"]["NBCONTINUOUS_CLASS"]["REMOVE_OUTLIERS_BY_VALUE"]["SHAPE_BEFORE"]
self.SHAPE_AFTER = data["CONTINUOUS"]["NBCONTINUOUS_CLASS"]["REMOVE_OUTLIERS_BY_VALUE"]["SHAPE_AFTER"]
self.DATA_LOST = data["CONTINUOUS"]["NBCONTINUOUS_CLASS"]["REMOVE_OUTLIERS_BY_VALUE"]["DATA_LOST"]
# plot
self.NULL_HEAT_MAP_TITLE = data["PLOT"]["NBPLOT_CLASS"]["NULL_HEAT_MAP_TITLE"]
self.FEATURE_CORR = data["PLOT"]["NBPLOT_CLASS"]["FEATURE_CORR"]
#general
self.SPLIT_AND_CHECK = data["GENERAL"]["SPLIT_AND_CHECK"]
| StarcoderdataPython |
196114 | <gh_stars>1-10
# Create list baseball
baseball = [180, 215, 210, 210, 188, 176, 209, 200]
# Import the numpy package as np
import numpy as np
# Create a numpy array from baseball: np_baseball
np_baseball = np.array(baseball)
# Print out type of np_baseball
print(type(np_baseball)) | StarcoderdataPython |
1615860 | <gh_stars>1-10
class Room():
def __init__(self, array_pos):
self.pos_room = array_pos | StarcoderdataPython |
1767325 | <reponame>GustavoTelles/Python_Coursera
'''
Somar todas as unidades de um número inteiro digitado usando a estrutura de repetição while
'''
n1 = int(input('Digite um número: '))
soma = 0
total = 0
while n1 != 0:
soma = int(n1 % 10)
total = soma + total
n1 = n1 / 10
print('A soma dos números é {}' .format(total))
| StarcoderdataPython |
132679 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import random
import numpy as np
import pandas as pd
from pandas.compat import lrange
from pandas.api.types import CategoricalDtype
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range, NaT, IntervalIndex, Categorical)
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSorting(TestData):
def test_sort_values(self):
frame = DataFrame([[1, 1, 2], [3, 1, 0], [4, 5, 6]],
index=[1, 2, 3], columns=list('ABC'))
# by column (axis=0)
sorted_df = frame.sort_values(by='A')
indexer = frame['A'].argsort().values
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=['A'], ascending=[False])
assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=['B', 'C'])
expected = frame.loc[[2, 1, 3]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=['B', 'C'], ascending=False)
assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=['B', 'A'], ascending=[True, False])
assert_frame_equal(sorted_df, expected)
pytest.raises(ValueError, lambda: frame.sort_values(
by=['A', 'B'], axis=2, inplace=True))
# by row (axis=1): GH 10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis='columns')
expected = frame.reindex(columns=['B', 'A', 'C'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1,
ascending=[True, False])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
msg = r'Length of ascending \(5\) != length of by \(2\)'
with tm.assert_raises_regex(ValueError, msg):
frame.sort_values(by=['A', 'B'], axis=0, ascending=[True] * 5)
def test_sort_values_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
sorted_df = frame.copy()
sorted_df.sort_values(by='A', inplace=True)
expected = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=1, axis=1, inplace=True)
expected = frame.sort_values(by=1, axis=1)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by='A', ascending=False, inplace=True)
expected = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=['A', 'B'], ascending=False, inplace=True)
expected = frame.sort_values(by=['A', 'B'], ascending=False)
assert_frame_equal(sorted_df, expected)
def test_sort_nan(self):
# GH3917
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# sort one column only
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A'], na_position='first')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A'], na_position='first', ascending=False)
assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=['B', 'A'])
sorted_df = df.sort_values(by=1, axis=1, na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{'A': [1, 1, 2, 4, 6, 8, nan],
'B': [2, 9, nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2])
sorted_df = df.sort_values(['A', 'B'])
assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 2, 9, nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], ascending=[
1, 0], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{'A': [8, 6, 4, 2, 1, 1, nan],
'B': [4, 5, 5, nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2])
sorted_df = df.sort_values(['A', 'B'], ascending=[
0, 1], na_position='last')
assert_frame_equal(sorted_df, expected)
# Test DataFrame with nan label
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(
kind='quicksort', ascending=True, na_position='last')
expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position='first')
expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8],
'B': [5, 9, nan, 5, 2, 5, 4]},
index=[nan, 1, 2, 3, 4, 5, 6])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind='quicksort', ascending=False)
expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4],
'B': [4, 5, 2, 5, nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind='quicksort', ascending=False, na_position='first')
expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1],
'B': [5, 4, 5, 2, 5, nan, 9]},
index=[nan, 6, 5, 4, 3, 2, 1])
assert_frame_equal(sorted_df, expected)
def test_stable_descending_sort(self):
# GH #6399
df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']],
columns=['sort_col', 'order'])
sorted_df = df.sort_values(by='sort_col', kind='mergesort',
ascending=False)
assert_frame_equal(df, sorted_df)
def test_stable_descending_multicolumn_sort(self):
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# test stable mergesort
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 2, 9]},
index=[2, 5, 4, 6, 1, 3, 0])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 1],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 0],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_stable_categorial(self):
# GH 16793
df = DataFrame({
'x': pd.Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)
})
expected = df.copy()
sorted_df = df.sort_values('x', kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_sort_datetimes(self):
# GH 3461, argsort / lexsort differences for a datetime column
df = DataFrame(['a', 'a', 'a', 'b', 'c', 'd', 'e', 'f', 'g'],
columns=['A'],
index=date_range('20130101', periods=9))
dts = [Timestamp(x)
for x in ['2004-02-11', '2004-01-21', '2004-01-26',
'2005-09-20', '2010-10-04', '2009-05-12',
'2008-11-12', '2010-09-28', '2010-09-28']]
df['B'] = dts[::2] + dts[1::2]
df['C'] = 2.
df['A1'] = 3.
df1 = df.sort_values(by='A')
df2 = df.sort_values(by=['A'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['B'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['C', 'B'])
assert_frame_equal(df1, df2)
def test_frame_column_inplace_sort_exception(self):
s = self.frame['A']
with tm.assert_raises_regex(ValueError, "This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_nat_values_in_int_column(self):
# GH 14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT))
float_values = (2.0, -1.797693e308)
df = DataFrame(dict(int=int_values, float=float_values),
columns=["int", "float"])
df_reversed = DataFrame(dict(int=int_values[::-1],
float=float_values[::-1]),
columns=["int", "float"],
index=[1, 0])
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(dict(datetime=[Timestamp("2016-01-01"), NaT],
float=float_values), columns=["datetime", "float"])
df_reversed = DataFrame(dict(datetime=[NaT, Timestamp("2016-01-01")],
float=float_values[::-1]),
columns=["datetime", "float"],
index=[1, 0])
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
assert_frame_equal(df_sorted, df)
# Ascending should not affect the results.
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
assert_frame_equal(df_sorted, df)
def test_sort_nat(self):
# GH 16836
d1 = [Timestamp(x) for x in ['2016-01-01', '2015-01-01',
np.nan, '2016-01-01']]
d2 = [Timestamp(x) for x in ['2017-01-01', '2014-01-01',
'2016-01-01', '2015-01-01']]
df = pd.DataFrame({'a': d1, 'b': d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ['2015-01-01', '2016-01-01',
'2016-01-01', np.nan]]
d4 = [Timestamp(x) for x in ['2014-01-01', '2015-01-01',
'2017-01-01', '2016-01-01']]
expected = pd.DataFrame({'a': d3, 'b': d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=['a', 'b'], )
tm.assert_frame_equal(sorted_df, expected)
class TestDataFrameSortIndexKinds(TestData):
def test_sort_index_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'])
result = frame.sort_values(by=['A', 'B'])
indexer = np.lexsort((frame['B'], frame['A']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'], ascending=False)
result = frame.sort_values(by=['A', 'B'], ascending=False)
indexer = np.lexsort((frame['B'].rank(ascending=False),
frame['A'].rank(ascending=False)))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['B', 'A'])
result = frame.sort_values(by=['B', 'A'])
indexer = np.lexsort((frame['A'], frame['B']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0
unordered = frame.loc[[3, 2, 4, 1]]
a_id = id(unordered['A'])
df = unordered.copy()
df.sort_index(inplace=True)
expected = frame
assert_frame_equal(df, expected)
assert a_id != id(df['A'])
df = unordered.copy()
df.sort_index(ascending=False, inplace=True)
expected = frame[::-1]
assert_frame_equal(df, expected)
# axis=1
unordered = frame.loc[:, ['D', 'B', 'C', 'A']]
df = unordered.copy()
df.sort_index(axis=1, inplace=True)
expected = frame
assert_frame_equal(df, expected)
df = unordered.copy()
df.sort_index(axis=1, ascending=False, inplace=True)
expected = frame.iloc[:, ::-1]
assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['A', 'B'], ascending=[1, 0])
result = df.sort_values(by=['A', 'B'], ascending=[1, 0])
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
assert_frame_equal(result, expected)
# test with multiindex, too
idf = df.set_index(['A', 'B'])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
assert_frame_equal(result, expected)
# also, Series!
result = idf['C'].sort_index(ascending=[1, 0])
assert_series_equal(result, expected['C'])
def test_sort_index_duplicates(self):
# with 9816, these are all translated to .sort_values
df = DataFrame([lrange(5, 9), lrange(4)],
columns=['a', 'a', 'b', 'b'])
with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with tm.assert_raises_regex(ValueError, 'not unique'):
df.sort_values(by='a')
with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['a'])
with tm.assert_raises_regex(ValueError, 'not unique'):
df.sort_values(by=['a'])
with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
# multi-column 'by' is separate codepath
df.sort_index(by=['a', 'b'])
with tm.assert_raises_regex(ValueError, 'not unique'):
# multi-column 'by' is separate codepath
df.sort_values(by=['a', 'b'])
# with multi-index
# GH4370
df = DataFrame(np.random.randn(4, 2),
columns=MultiIndex.from_tuples([('a', 0), ('a', 1)]))
with tm.assert_raises_regex(ValueError, 'level'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with tm.assert_raises_regex(ValueError, 'level'):
df.sort_values(by='a')
# convert tuples to a list of tuples
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=[('a', 1)])
expected = df.sort_values(by=[('a', 1)])
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=('a', 1))
result = df.sort_values(by=('a', 1))
assert_frame_equal(result, expected)
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
res = df.sort_index(level='A', sort_remaining=False)
assert_frame_equal(df, res)
res = df.sort_index(level=['A', 'B'], sort_remaining=False)
assert_frame_equal(df, res)
def test_sort_index_categorical_index(self):
df = (DataFrame({'A': np.arange(6, dtype='int64'),
'B': Series(list('aabbca'))
.astype(CategoricalDtype(list('cab')))})
.set_index('B'))
result = df.sort_index()
expected = df.iloc[[4, 0, 1, 5, 2, 3]]
assert_frame_equal(result, expected)
result = df.sort_index(ascending=False)
expected = df.iloc[[3, 2, 5, 1, 0, 4]]
assert_frame_equal(result, expected)
def test_sort_index(self):
# GH13496
frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0 : sort rows by index labels
unordered = frame.loc[[3, 2, 4, 1]]
result = unordered.sort_index(axis=0)
expected = frame
assert_frame_equal(result, expected)
result = unordered.sort_index(ascending=False)
expected = frame[::-1]
assert_frame_equal(result, expected)
# axis=1 : sort columns by column names
unordered = frame.iloc[:, [2, 1, 3, 0]]
result = unordered.sort_index(axis=1)
assert_frame_equal(result, frame)
result = unordered.sort_index(axis=1, ascending=False)
expected = frame.iloc[:, ::-1]
assert_frame_equal(result, expected)
@pytest.mark.parametrize("level", ['A', 0]) # GH 21052
def test_sort_index_multiindex(self, level):
# GH13496
# sort rows by specified level of multi-index
mi = MultiIndex.from_tuples([
[2, 1, 3], [2, 1, 2], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mi)
expected_mi = MultiIndex.from_tuples([
[1, 1, 1],
[2, 1, 2],
[2, 1, 3]], names=list('ABC'))
expected = pd.DataFrame([
[5, 6],
[3, 4],
[1, 2]], index=expected_mi)
result = df.sort_index(level=level)
assert_frame_equal(result, expected)
# sort_remaining=False
expected_mi = MultiIndex.from_tuples([
[1, 1, 1],
[2, 1, 3],
[2, 1, 2]], names=list('ABC'))
expected = pd.DataFrame([
[5, 6],
[1, 2],
[3, 4]], index=expected_mi)
result = df.sort_index(level=level, sort_remaining=False)
assert_frame_equal(result, expected)
def test_sort_index_intervalindex(self):
# this is a de-facto sort via unstack
# confirming that we sort in the order of the bins
y = Series(np.random.randn(100))
x1 = Series(np.sign(np.random.randn(100)))
x2 = pd.cut(Series(np.random.randn(100)),
bins=[-3, -0.5, 0, 0.5, 3])
model = pd.concat([y, x1, x2], axis=1, keys=['Y', 'X1', 'X2'])
result = model.groupby(['X1', 'X2'], observed=True).mean().unstack()
expected = IntervalIndex.from_tuples(
[(-3.0, -0.5), (-0.5, 0.0),
(0.0, 0.5), (0.5, 3.0)],
closed='right')
result = result.columns.levels[1].categories
tm.assert_index_equal(result, expected)
def test_sort_index_na_position_with_categories(self):
# GH 22556
# Positioning missing value properly when column is Categorical.
categories = ['A', 'B', 'C']
category_indices = [0, 2, 4]
list_of_nans = [np.nan, np.nan]
na_indices = [1, 3]
na_position_first = 'first'
na_position_last = 'last'
column_name = 'c'
reversed_categories = sorted(categories, reverse=True)
reversed_category_indices = sorted(category_indices, reverse=True)
reversed_na_indices = sorted(na_indices, reverse=True)
df = pd.DataFrame({
column_name: pd.Categorical(['A', np.nan, 'B', np.nan, 'C'],
categories=categories,
ordered=True)})
# sort ascending with na first
result = df.sort_values(by=column_name,
ascending=True,
na_position=na_position_first)
expected = DataFrame({
column_name: Categorical(list_of_nans + categories,
categories=categories,
ordered=True)
}, index=na_indices + category_indices)
assert_frame_equal(result, expected)
# sort ascending with na last
result = df.sort_values(by=column_name,
ascending=True,
na_position=na_position_last)
expected = DataFrame({
column_name: Categorical(categories + list_of_nans,
categories=categories,
ordered=True)
}, index=category_indices + na_indices)
assert_frame_equal(result, expected)
# sort descending with na first
result = df.sort_values(by=column_name,
ascending=False,
na_position=na_position_first)
expected = DataFrame({
column_name: Categorical(list_of_nans + reversed_categories,
categories=categories,
ordered=True)
}, index=reversed_na_indices + reversed_category_indices)
assert_frame_equal(result, expected)
# sort descending with na last
result = df.sort_values(by=column_name,
ascending=False,
na_position=na_position_last)
expected = DataFrame({
column_name: Categorical(reversed_categories + list_of_nans,
categories=categories,
ordered=True)
}, index=reversed_category_indices + reversed_na_indices)
assert_frame_equal(result, expected)
def test_sort_index_na_position_with_categories_raises(self):
df = pd.DataFrame({
'c': pd.Categorical(['A', np.nan, 'B', np.nan, 'C'],
categories=['A', 'B', 'C'],
ordered=True)})
with pytest.raises(ValueError):
df.sort_values(by='c',
ascending=False,
na_position='bad_position')
| StarcoderdataPython |
1686733 | from flask import current_app
from fnmatch import fnmatch
class Manager(object):
def __init__(self):
self.handlers = []
def register(self, cls, matches):
self.handlers.append((cls, matches))
def process(self, artifact):
job = artifact.job
artifact_name = artifact.name
matches = []
if not artifact.type:
for cls, patterns in self.handlers:
for pattern in patterns:
if fnmatch(artifact_name, pattern):
matches.append(cls)
else:
for cls, _ in self.handlers:
if artifact.type in cls.supported_types:
matches.append(cls)
for cls in matches:
handler = cls(job)
fp = artifact.file.get_file()
try:
current_app.logger.info(
"artifact.process",
extra={"handler_cls": cls.__name__, "artifact_id": artifact.id},
)
handler.process(fp)
finally:
fp.close()
| StarcoderdataPython |
57970 | <reponame>jazeved0/cinema-system<gh_stars>1-10
from flask import Flask, g, jsonify
from flask.json import JSONEncoder
from flask_restful import Api, inputs
from flask_cors import CORS
from sqlalchemy.exc import SQLAlchemyError
from datetime import datetime, date
from auth import authenticated, get_failed_auth_resp, hash_password, \
provision_jwt, requires_admin, requires_manager, requires_customer
from config import states
from models import TUserDerived, Company, Visit, User, TCompanyDerived, \
Theater, Manager, Movie, TUsed, Creditcard
from register import registration
from util import to_dict, remove_non_numeric, DBResource, parse_args, Param
"""
Contains the core of the API logic, including RESTful endpoints and custom
decorator functions
"""
class ISOEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, date):
return o.isoformat()
return super().default(o)
Flask.json_encoder = ISOEncoder
app = Flask(__name__)
cors = CORS(app)
@app.teardown_appcontext
def teardown_db(exec):
db = g.pop('db', None)
if db is not None:
db.close()
class Login(DBResource):
def post(self):
username, password = parse_args("username", "password")
user = self.db.query(TUserDerived).filter(
TUserDerived.c.username == username).first()
if user is None:
return get_failed_auth_resp(message="Incorrect username or password")
# Perform password hash equality check
password_hash = hash_password(password, user)
if user.password != password_hash:
return get_failed_auth_resp(message="Incorrect username or password")
else:
print("Successful authentication: {}".format(user.username))
token = provision_jwt(user).get_token().decode()
return jsonify({"token": token})
class Companies(DBResource):
def get(self):
only_names, = parse_args(Param("only_names", optional=True, type=inputs.boolean))
if only_names:
companies = self.db.query(Company.name).all()
return jsonify({"companies": [c.name for c in companies]})
else:
return self.get_all_auth()
@authenticated
@requires_admin
def get_all_auth(self):
companies = self.db.query(TCompanyDerived).all()
return jsonify({"companies": to_dict(companies, table=TCompanyDerived)})
class CompaniesTheaters(DBResource):
@authenticated
@requires_admin
def get(self, name):
theaters = self.db.query(Theater).filter(
Theater.companyname == name
)
return jsonify({"theaters": to_dict(theaters)})
class CompaniesManagers(DBResource):
@authenticated
@requires_admin
def get(self, name):
managers = self.db.query(Manager).filter(
Manager.companyname == name
)
# Remove hashed password from output
return jsonify({"managers": to_dict(managers, scrub=["password"])})
class CompaniesNumTheaters(DBResource):
def get(self, company):
num = self.db.query(Theater).filter(Theater.companyname == company.replace("%20", " ")).count()
return jsonify({"num_theaters": num})
class CompaniesNumEmployees(DBResource):
def get(self, company):
num = self.db.query(Manager).filter(Manager.companyname == company).count()
return jsonify({"num_employees": num})
class CompaniesNumCities(DBResource):
def get(self, company):
num = self.db.query(Theater).distinct(Theater.city).filter(
Theater.companyname == company.replace("%20", " ")).count()
return jsonify({"num_cities": num})
class Users(DBResource):
@authenticated
@requires_admin
def get(self):
users = self.db.query(TUserDerived).all()
# Remove hashed passwords from API output
return jsonify({"users": to_dict(users, table=TUserDerived, scrub=["password"])})
class UserApproveResource(DBResource):
@authenticated
@requires_admin
def put(self, username):
user = self.db.query(User).filter(User.username == username).first()
if not user:
return f"Cannot find user {username}", 404
if user.status == "Approved":
# Not changed
return "Cannot approve already approved user", 304
try:
self.db.query(User).filter(User.username == username).update(
{"status": "Approved"})
except SQLAlchemyError:
# Forbidden
return "Could not approve user", 403
else:
self.db.commit()
return "Success", 200
class UserDeclineResource(DBResource):
@authenticated
@requires_admin
def put(self, username):
user = self.db.query(User).filter(User.username == username).first()
if not user:
return f"Cannot find user {username}", 404
if user.status == "Approved":
# Bad request
return "Cannot decline already approved user", 400
elif user.status == "Declined":
# Not changed
return "Cannot decline already declined user", 304
try:
self.db.query(User).filter(User.username == username).update(
{"status": "Approved"})
except SQLAlchemyError:
# Forbidden
return "Could not decline user", 403
else:
self.db.commit()
return "Success", 200
class EligibleManagers(DBResource):
@authenticated
@requires_admin
def get(self):
try:
managers = self.db.query(Manager).outerjoin(Theater).filter(
Theater.theatername == None).all() # noqa E711
except SQLAlchemyError:
return "Could not find eligible managers", 403
else:
return jsonify({"managers": to_dict(managers, fields=[
"username", "companyname", "firstname", "lastname"])})
class Theaters(DBResource):
@authenticated
@requires_admin
def post(self):
theatername, companyname, state, city, zipcode, capacity, manager, street = parse_args(
"theatername",
"companyname",
"state",
"city",
"zipcode",
"capacity",
"manager",
"street"
)
# Validate that company is valid
company = self.db.query(Company).filter(Company.name == companyname).first()
if not company:
return f"Company name {companyname} does not exist", 400
# Validate that name is unique within company
theater = self.db.query(Theater).filter(
Theater.theatername == theatername, Theater.companyname == companyname).first()
if theater:
return f"Theater name {theatername} must be unique", 400
# Validate that zipcode is in the correct format
formatted_zipcode = remove_non_numeric(zipcode)
if len(formatted_zipcode) != 5 or formatted_zipcode != zipcode:
return f"Zipcode {formatted_zipcode} must be 5 digits long", 400
# Validate state
if state.upper() not in states:
return "State must be a valid two-letter state", 400
# Validate capacity
if not capacity.isdigit() or int(capacity) <= 0:
return "Capacity must be an integer greater than 1", 400
# Validate that manager exists
manager_object = self.db.query(Manager).filter(Manager.username == manager).first()
if not manager_object:
return f"Manager @{manager} does not exist", 400
# Validate valid manager-company pairing
if manager_object.companyname != companyname:
return f"Manager @{manager} does not work for company {companyname}", 400
# Validate that manager is not managing any other theaters
manager_object = self.db.query(Manager).outerjoin(Theater).filter(
Theater.theatername != None, Manager.username == manager).first() # noqa E711
if manager_object:
return f"Manager @{manager} is already managing a theater", 400
try:
theater = Theater(
theatername=theatername,
companyname=companyname,
state=state,
city=city,
zipcode=zipcode,
capacity=capacity,
manager=manager,
street=street
)
self.db.add(theater)
self.db.commit()
except SQLAlchemyError:
return "Could not create theater", 403
else:
return 201
@authenticated
def get(self, jwt):
theaters = self.db.query(Theater).all()
return jsonify({"theaters": to_dict(theaters)})
class Movies(DBResource):
@authenticated
@requires_admin
def post(self):
name, duration, releasedate = parse_args("name", "duration", "releasedate")
# Validate that date is valid
try:
date = datetime.strptime(releasedate, "%Y-%m-%d")
except ValueError:
return "Duration must be a date in the format YYYY-MM-DD", 400
# Validate that name/release date is unique
movie = self.db.query(Movie).filter(
Movie.name == name, Movie.releasedate == date).first()
if movie:
return f"Movie name and release date must be unique", 400
# Validate that duration is valid
if not duration.isdigit():
return "Duration must be an integer", 400
try:
# Add to database
movie = Movie(name=name, duration=duration, releasedate=date)
self.db.add(movie)
self.db.commit()
except SQLAlchemyError:
return "Could not create movie", 403
else:
return 201
@authenticated
@requires_manager
def get(self, jwt):
movies = self.db.query(Movie).all()
return jsonify({'movies': to_dict(movies)})
class MoviesSchedule(DBResource):
@authenticated
@requires_manager
def post(self, jwt):
moviename, releasedate, playdate = parse_args("moviename", "releasedate", "playdate")
# Validate that the movie exists
movie = self.db.query(Movie).filter(
Movie.name == moviename, Movie.releasedate == releasedate).first()
if not movie:
return f"Movie name and release date must correspond to an already-created movie", 400
try:
self.db.execute(
"INSERT INTO movieplay (Date, MovieName, ReleaseDate, TheaterName, CompanyName) "
"VALUES (:playdate, :moviename, :releasedate, ("
" SELECT TheaterName FROM Theater WHERE Manager = :username), ("
" SELECT CompanyName FROM Theater WHERE Manager = :username));",
{"playdate": playdate, "moviename": moviename, "releasedate": releasedate, "username": jwt.username}
)
self.db.commit()
except SQLAlchemyError:
return "Could not schedule movie", 403
else:
return 204
class ExploreMovie(DBResource):
@authenticated
@requires_customer
def get(self, jwt):
try:
result = self.db.execute("select * from movieplay natural join theater").fetchall()
except SQLAlchemyError:
return "Could not get movies", 403
else:
return jsonify({'movies': to_dict(result)})
class TheaterOverview(DBResource):
@authenticated
@requires_manager
def get(self, jwt):
result = self.db.execute(
"SELECT movie.Name AS movName, movie.Duration as movDuration,"
" movie.ReleaseDate AS movReleaseDate, t1.Date as movPlayDate "
"FROM ("
" SELECT movieplay.*"
" FROM movieplay"
" LEFT JOIN theater ON movieplay.TheaterName = theater.TheaterName"
" AND movieplay.CompanyName = theater.CompanyName"
" WHERE :username = theater.Manager"
") AS t1 "
"RIGHT JOIN movie ON t1.MovieName = movie.Name",
{"username": jwt.username}
).fetchall()
return jsonify({'movies': to_dict(result)})
class UserCreditCards(DBResource):
@authenticated
@requires_customer
def get(self, jwt):
try:
result = self.db.query(Creditcard).filter(
Creditcard.owner == jwt.username).all()
except SQLAlchemyError:
return "Could not get credit cards", 403
else:
return jsonify({'creditCards': [c.creditcardnum for c in result]})
class Visits(DBResource):
@authenticated
def get(self, jwt):
visits = self.db.query(Visit, Theater).join(
Visit, Theater.theatername == Visit.theatername).all()
return jsonify({'visits': to_dict(visits)})
@authenticated
def post(self, jwt):
date, theatername, companyname = parse_args("date", "theatername", "companyname")
visit = Visit(date=date, username=jwt.username, theatername=theatername,
companyname=companyname)
try:
self.db.add(visit)
self.db.commit()
except SQLAlchemyError:
return "Could not visit theater", 403
else:
return 201
class MovieViews(DBResource):
@authenticated
@requires_customer
def get(self, jwt):
views = self.db.execute(
"select * from used natural join creditcard where owner=:user", {"user": jwt.username}).fetchall()
return jsonify({'views': to_dict(views)})
@authenticated
@requires_customer
def post(self, jwt):
moviename, releasedate, playdate, theatername, companyname, creditcardnum = parse_args(
"moviename", "releasedate", "playdate", "theatername", "companyname", "creditcardnum")
# Validate that the user hasn't watched 3 movies on the same date
result = self.db.execute("SELECT COUNT(*) as playcount, PlayDate FROM used "
"NATURAL JOIN creditcard "
"WHERE creditcard.Owner = :owner "
"AND PlayDate = :playdate "
"GROUP BY Owner, PlayDate "
"HAVING COUNT(*) >= 3", {
"playdate": playdate,
"owner": jwt.username
}).fetchall()
if result:
return "Can not watch more than 3 movies on the same day", 403
# Validate that the user hasn't already seen the movie
views = self.db.query(TUsed).filter(TUsed.c.moviename == moviename, TUsed.c.playdate == playdate,
TUsed.c.releasedate == releasedate, TUsed.c.theatername == theatername,
TUsed.c.companyname == companyname).all()
creditcards = {v.creditcardnum for v in views}
user_creditcards = {c.creditcardnum for c in self.db.query(Creditcard).filter(
Creditcard.owner == jwt.username).all()}
if creditcards.intersection(user_creditcards):
return "Can not watch movie that you have already seen", 403
try:
self.db.execute(
"INSERT INTO used (creditcardnum, playdate, moviename, releasedate, theatername, companyname) "
"VALUES (:ccn, :pd, :mn, :rd, :tn, :cn)", {
"ccn": creditcardnum,
"pd": playdate,
"tn": theatername,
"mn": moviename,
"rd": releasedate,
"cn": companyname,
})
self.db.execute(
"INSERT INTO visit (date, username, theatername, companyname) "
"VALUES (:date, :user, :tn, :cn)", {
"date": playdate,
"user": jwt.username,
"tn": theatername,
"cn": companyname,
})
self.db.commit()
except SQLAlchemyError:
return "Could not view movie", 403
else:
return 201
# Uptime checker route
@app.route('/status', methods=['GET'])
def status():
return "All systems operational", 204
def app_factory():
api = Api(app)
api.add_resource(Login, "/login")
app.register_blueprint(registration, url_prefix="/register")
api.add_resource(Visits, "/visits")
api.add_resource(Theaters, "/theaters")
api.add_resource(EligibleManagers, "/managers/eligible")
api.add_resource(TheaterOverview, "/manager/overview")
api.add_resource(Companies, "/companies")
api.add_resource(CompaniesManagers, "/companies/<string:name>/managers")
api.add_resource(CompaniesTheaters, "/companies/<string:name>/theaters")
api.add_resource(Movies, "/movies")
api.add_resource(MovieViews, "/movies/views")
api.add_resource(MoviesSchedule, "/movies/schedule")
api.add_resource(ExploreMovie, "/movies/explore")
api.add_resource(Users, "/users")
api.add_resource(UserCreditCards, "/users/credit-cards")
api.add_resource(UserApproveResource, "/users/<username>/approve")
api.add_resource(UserDeclineResource, "/users/<username>/decline")
return app
| StarcoderdataPython |
3230870 | <reponame>yngtodd/seeds
"""
Tests for `seeds` module.
"""
import pytest
from seeds import seeds
class TestSeeds(object):
@classmethod
def setup_class(cls):
pass
def test_something(self):
pass
@classmethod
def teardown_class(cls):
pass
| StarcoderdataPython |
3226242 | <reponame>kagemeka/atcoder-submissions<gh_stars>1-10
n = int(input())
ls = []
for i in range(n):
ls.append(int(input()))
ls.sort()
layer = 1
current = ls[0]
for i in range(n):
if current != ls[i]:
layer += 1
current = ls[i]
print(layer)
| StarcoderdataPython |
1797765 | <filename>puls/views/admin/suppliers.py
# coding=utf-8
from __future__ import absolute_import, unicode_literals, division
from puls.models import Supplier, SupplierForm
from puls.compat import unquote_plus
from puls import app, paginate
import flask
@app.route("/admin/suppliers/", methods=["GET", "POST"],
endpoint="manage_suppliers")
@app.route("/admin/suppliers/<int:page>/")
@app.template("admin/suppliers/list.html")
@app.logged_in
def list_suppliers(page=1):
term = flask.request.form.get("term", "")
if term:
page = Supplier.search(term)
else:
page = paginate(Supplier.objects, page, 20)
return {"term": term,
"page": page}
@app.route("/admin/manufacturers/search/")
@app.logged_in
def search_suppliers():
term = flask.request.args.get("term", "")
if term:
results = Supplier.search(term)
else:
results = Supplier.objects.limit(100)
return flask.jsonify({"results": [{"id": str(item.id),
"text": str(item.name)}
for item in results]})
@app.route("/admin/suppliers/new/", methods=["GET", "POST"],
endpoint="add_supplier")
@app.route("/admin/suppliers/<id>/edit/", methods=["GET", "POST"])
@app.template("admin/suppliers/form.html")
@app.logged_in
def edit_supplier(id=None):
if id is None:
item = None
else:
item = Supplier.objects.get_or_404(id=unquote_plus(id))
form = SupplierForm(obj=item)
if form.validate_on_submit():
if not item:
item = Supplier()
form.populate_obj(item)
item.save()
flask.flash("The supplier was saved", "success")
return flask.redirect(flask.url_for("manage_suppliers"))
return {"form": form,
"item": item}
@app.route("/admin/suppliers/<id>/delete/")
@app.logged_in
def delete_supplier(id):
item = Supplier.objects.get_or_404(id=unquote_plus(id))
item.delete()
flask.flash("Your supplier has been deleted!", "warning")
return flask.redirect(flask.url_for("manage_suppliers"))
| StarcoderdataPython |
3368648 | <filename>contrapartes/views.py
from django.shortcuts import render
from .models import *
from .forms import *
from notas.models import *
from notas.forms import *
from agendas.models import *
from agendas.forms import *
from foros.forms import *
from publicaciones.models import *
from publicaciones.forms import *
from galerias.models import *
from galerias.forms import *
from catalogo.models import *
from catalogo.forms import *
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import get_object_or_404, redirect
from django.forms import inlineformset_factory
from django.core.mail import send_mail, EmailMultiAlternatives
from django.template.loader import render_to_string
# Create your views here.
@login_required
def perfil_editar(request,template='admin/editar_user.html'):
object = get_object_or_404(UserProfile, user=request.user)
if request.method == 'POST':
form = UserForm(request.POST, instance=request.user)
form_avatar = UserProfileForm(request.POST,files=request.FILES,instance=object)
if form.is_valid() and form_avatar.is_valid():
form.save()
form_avatar.save()
return HttpResponseRedirect('/accounts/profile/')
else:
form = UserForm(instance=request.user)
form_avatar = UserProfileForm(instance=object)
return render(request, template, locals())
@login_required
def editar_contraparte(request, slug, template='admin/editar_contraparte.html'):
contra = get_object_or_404(Contraparte, slug=slug)
FormSetInit = inlineformset_factory(Contraparte, Redes, form=RedesFrom,extra=11,max_num=11)
if request.method == 'POST':
form = ContraparteForms(data=request.POST,instance=contra,files=request.FILES)
formset = FormSetInit(request.POST,request.FILES,instance=contra)
if form.is_valid() and formset.is_valid():
form_uncommited = form.save(commit=False)
form_uncommited.user = request.user
form_uncommited.save()
formset.save()
return HttpResponseRedirect('/accounts/profile/')
else:
form = ContraparteForms(instance=contra)
formset = FormSetInit(instance=contra)
return render(request, template, locals())
@login_required
def notas_contraparte(request, template='admin/notaadmin.html'):
object_list = Notas.objects.filter(user_id = request.user.id)
dic_temas = {}
for tema in Temas.objects.all():
count = Notas.objects.filter(temas = tema,user = request.user).count()
if count != 0:
dic_temas[tema] = count
return render(request, template, locals())
@login_required
def redactar_notas_contraparte(request, template='admin/redactar_notaadmin.html'):
if request.method == 'POST':
form = NotasForms(request.POST, request.FILES)
if form.is_valid():
nota = form.save(commit=False)
nota.user = request.user
nota.correo_enviado = False
nota.save()
form.save_m2m()
if nota.publicada == True:
try:
subject, from_email = 'Nueva nota', '<EMAIL>'
text_content = render_to_string('email/nota.txt', {'nota': nota,})
html_content = render_to_string('email/nota.txt', {'nota': nota,})
list_mail = UserProfile.objects.exclude(user__id = request.user.id).values_list('user__email',flat=True)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
enviado = 1
nota.correo_enviado = True
nota.save()
return HttpResponseRedirect('/contrapartes/notas/')
except:
pass
else:
form = NotasForms()
return render(request, template, locals())
@login_required
def filtro_temas_contra(request, temas, template='admin/notaadmin.html'):
object_list = Notas.objects.filter(user_id = request.user.id,temas__nombre = temas).order_by('-id')
dic_temas = {}
for tema in Temas.objects.all():
count = Notas.objects.filter(temas = tema).count()
dic_temas[tema] = count
return render(request, template, locals())
@login_required
def editar_nota(request, slug, template='admin/editar_nota.html'):
object = get_object_or_404(Notas, slug=slug)
if request.method == 'POST':
form = NotasForms(request.POST, request.FILES, instance=object)
if form.is_valid():
form_uncommited = form.save()
form_uncommited.user = request.user
form_uncommited.save()
if form_uncommited.publicada == True and form_uncommited.correo_enviado == False:
try:
subject, from_email = 'Nueva nota', '<EMAIL>'
text_content = render_to_string('email/nota.txt', {'nota': form_uncommited,})
html_content = render_to_string('email/nota.txt', {'nota': form_uncommited,})
list_mail = UserProfile.objects.exclude(user__id = request.user.id).values_list('user__email',flat=True)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
form_uncommited.correo_enviado = True
form_uncommited.save()
return HttpResponseRedirect('/contrapartes/notas/')
except:
pass
return HttpResponseRedirect('/contrapartes/notas/')
else:
form = NotasForms(instance=object)
return render(request, template, locals())
@login_required
def eliminar_notas_contraparte(request, slug):
nota = Notas.objects.filter(slug = slug).delete()
return HttpResponseRedirect('/contrapartes/notas/')
@login_required
def eventos_contraparte(request, template='admin/list_eventos.html'):
object_list = Agendas.objects.filter(user_id = request.user.id)
return render(request, template, locals())
@login_required
def nuevo_evento_contraparte(request, template='admin/nuevo_evento.html'):
FormSetInit = inlineformset_factory(Agendas,AgendaEvento,form=AgendaEventoForm,extra=12,max_num=12)
FormSetInit2 = inlineformset_factory(Agendas,DocumentosEvento,form=DocuForm,extra=6,max_num=6)
if request.method == 'POST':
form = AgendaForm(request.POST, request.FILES)
formset = FormSetInit(request.POST,request.FILES)
formset2 = FormSetInit2(request.POST,request.FILES)
if form.is_valid() and formset.is_valid() and formset2.is_valid():
evento = form.save(commit=False)
evento.user = request.user
evento.correo_enviado = False
evento.save()
instances = formset.save(commit=False)
for instance in instances:
instance.evento = evento
instance.save()
formset.save_m2m()
instances2 = formset2.save(commit=False)
for instance2 in instances2:
instance2.evento = evento
instance2.save()
formset2.save_m2m()
if evento.publico == True:
try:
subject, from_email = 'Nuevo evento', '<EMAIL>'
text_content = render_to_string('email/evento.txt', {'evento': evento,})
html_content = render_to_string('email/evento.txt', {'evento': evento,})
list_mail = UserProfile.objects.exclude(user__id = request.user.id).values_list('user__email',flat=True)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
evento.correo_enviado = True
evento.save()
return HttpResponseRedirect('/contrapartes/eventos/')
except:
pass
else:
form = AgendaForm()
formset = FormSetInit()
formset2 = FormSetInit2()
return render(request, template, locals())
@login_required
def eliminar_evento_contraparte(request, slug):
evento = Agendas.objects.get(slug = slug).delete()
return HttpResponseRedirect('/contrapartes/eventos/')
@login_required
def editar_evento(request, slug, template='admin/editar_evento.html'):
object = get_object_or_404(Agendas, slug=slug)
FormSetInit = inlineformset_factory(Agendas,AgendaEvento,form=AgendaEventoForm,extra=12,max_num=12)
FormSetInit2 = inlineformset_factory(Agendas,DocumentosEvento,form=DocuForm,extra=6,max_num=6)
if request.method == 'POST':
form = AgendaForm(request.POST, request.FILES,instance=object)
formset = FormSetInit(request.POST,request.FILES,instance=object)
formset2 = FormSetInit2(request.POST,request.FILES,instance=object)
if form.is_valid() and formset.is_valid() and formset2.is_valid():
evento = form.save(commit=False)
evento.user = request.user
evento.correo_enviado = False
evento.save()
instances = formset.save(commit=False)
for instance in instances:
instance.evento = evento
instance.save()
formset.save_m2m()
instances2 = formset2.save(commit=False)
for instance2 in instances2:
instance2.evento = evento
instance2.save()
formset2.save_m2m()
if evento.publico == True and evento.correo_enviado == False:
try:
subject, from_email = 'Nuevo evento', '<EMAIL>'
text_content = render_to_string('email/evento.txt', {'evento': evento,})
html_content = render_to_string('email/evento.txt', {'evento': evento,})
list_mail = UserProfile.objects.exclude(user__id = request.user.id).values_list('user__email',flat=True)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
evento.correo_enviado = True
evento.save()
return HttpResponseRedirect('/contrapartes/eventos/')
except:
pass
return HttpResponseRedirect('/contrapartes/eventos/')
else:
form = AgendaForm(instance=object)
formset = FormSetInit(instance=object)
formset2 = FormSetInit2(instance=object)
return render(request, template, locals())
#foros
@login_required
def list_foros(request, template='admin/list_foros.html'):
current_date = datetime.date.today()
object_list = Foros.objects.order_by('-id')
mis_foros = Foros.objects.filter(contraparte = request.user.id).order_by('-id')
return render(request, template, locals())
@login_required
def eliminar_foro(request, id):
foro = Foros.objects.filter(id = id).delete()
return HttpResponseRedirect('/contrapartes/foros/')
@login_required
def editar_foro(request, id, template='admin/editar_foro.html'):
object = get_object_or_404(Foros, id=id)
if request.method == 'POST':
form = ForosForm(request.POST, request.FILES, instance=object)
if form.is_valid():
form_uncommited = form.save()
form_uncommited.contraparte = request.user
form_uncommited.save()
return HttpResponseRedirect('/contrapartes/foros/')
else:
form = ForosForm(instance=object)
return render(request, template, locals())
@login_required
def ver_foro(request, id, template='admin/ver_foro.html'):
current_date = datetime.date.today()
discusion = get_object_or_404(Foros, id=id)
aportes = Aportes.objects.filter(foro = id).order_by('-id')
if request.method == 'POST':
form = AporteForm(request.POST)
if form.is_valid():
aporte = form.save(commit=False)
aporte.foro = discusion
aporte.user = request.user
aporte.save()
try:
subject, from_email = 'Nuevo aporte al foro ' + discusion.nombre, '<EMAIL>'
text_content = render_to_string('email/aporte.txt', {'aporte': aporte,})
html_content = render_to_string('email/aporte.txt', {'aporte': aporte,})
list_mail = UserProfile.objects.exclude(user__id = request.user.id).values_list('user__email',flat=True)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
return redirect('ver-foro', id=discusion.id)
except:
pass
else:
form = AporteForm()
return render(request, template, locals())
from interteam.tasks import *
import datetime
@login_required
def agregar_foro(request, template='admin/nuevo_foro.html'):
if request.method == 'POST':
form = ForosForm(request.POST, request.FILES)
if form.is_valid():
foro = form.save(commit=False)
foro.contraparte = request.user
foro.correo_enviado = False
foro.save()
hoy = datetime.date.today()
if foro.apertura == hoy and foro.correo_enviado == False:
try:
subject, from_email = 'Nuevo foro', '<EMAIL>'
text_content = render_to_string('email/foro.txt', {'foro': foro,})
html_content = render_to_string('email/foro.txt', {'foro': foro,})
list_mail = UserProfile.objects.exclude(user__id = request.user.id).values_list('user__email',flat=True)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
foro.correo_enviado = True
foro.save()
return HttpResponseRedirect('/contrapartes/foros/')
except:
pass
else:
id = foro.id
user = request.user.id
send_mail_foro.apply_async((id,user),eta=foro.apertura)
return HttpResponseRedirect('/contrapartes/foros/')
else:
form = ForosForm()
return render(request, template, locals())
#publicaciones
@login_required
def publicaciones_contraparte(request, template='admin/list_publicaciones.html'):
object_list = Publicacion.objects.filter(usuario = request.user.id).order_by('-id')
return render(request, template, locals())
@login_required
def eliminar_publicacion(request, id):
evento = Publicacion.objects.filter(id = id).delete()
return HttpResponseRedirect('/contrapartes/publicaciones/')
@login_required
def editar_publicacion(request, id, template='admin/editar_publicacion.html'):
object = get_object_or_404(Publicacion, id=id)
FormSetInit = inlineformset_factory(Publicacion,ArchivosPublicacion,form=ArchivosPubliForm,extra=9,max_num=9)
FormSetInit2 = inlineformset_factory(Publicacion,AudiosPublicacion,form=AudiosPubliForm,extra=6,max_num=6)
FormSetInit3 = inlineformset_factory(Publicacion,VideosPublicacion,form=VideosPubliForm,extra=6,max_num=6)
if request.method == 'POST':
form = PublicacionForm(request.POST, request.FILES, instance=object)
formset = FormSetInit(request.POST,request.FILES, instance=object)
formset2 = FormSetInit2(request.POST,request.FILES, instance=object)
formset3 = FormSetInit3(request.POST,request.FILES, instance=object)
if form.is_valid() and formset.is_valid() and formset2.is_valid() and formset3.is_valid():
form_uncommited = form.save()
form_uncommited.usuario = request.user
form_uncommited.save()
instances = formset.save(commit=False)
for instance in instances:
instance.publicacion = form_uncommited
instance.save()
formset.save_m2m()
instances2 = formset2.save(commit=False)
for instance2 in instances2:
instance2.publicacion = form_uncommited
instance2.save()
formset2.save_m2m()
instances3 = formset3.save(commit=False)
for instance3 in instances3:
instance3.publicacion = form_uncommited
instance3.save()
formset3.save_m2m()
if form_uncommited.publicada == True and form_uncommited.correo_enviado == False:
try:
subject, from_email = 'Nueva publicación', '<EMAIL>'
text_content = render_to_string('email/publicacion.txt', {'publi': form_uncommited,})
html_content = render_to_string('email/publicacion.txt', {'publi': form_uncommited,})
list_mail = UserProfile.objects.exclude(user__id = request.user.id).values_list('user__email',flat=True)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
form_uncommited.correo_enviado = True
form_uncommited.save()
return HttpResponseRedirect('/contrapartes/publicaciones/')
except:
pass
return HttpResponseRedirect('/contrapartes/publicaciones/')
else:
form = PublicacionForm(instance=object)
formset = FormSetInit(instance=object)
formset2 = FormSetInit2(instance=object)
formset3 = FormSetInit3(instance=object)
return render(request, template, locals())
@login_required
def agregar_publicacion(request, template='admin/nueva_publicacion.html'):
FormSetInit = inlineformset_factory(Publicacion,ArchivosPublicacion,form=ArchivosPubliForm,extra=9,max_num=9)
FormSetInit2 = inlineformset_factory(Publicacion,AudiosPublicacion,form=AudiosPubliForm,extra=6,max_num=6)
FormSetInit3 = inlineformset_factory(Publicacion,VideosPublicacion,form=VideosPubliForm,extra=6,max_num=6)
if request.method == 'POST':
form = PublicacionForm(request.POST, request.FILES)
formset = FormSetInit(request.POST,request.FILES)
formset2 = FormSetInit2(request.POST,request.FILES)
formset3 = FormSetInit3(request.POST,request.FILES)
if form.is_valid() and formset.is_valid() and formset2.is_valid() and formset3.is_valid():
publi = form.save(commit=False)
publi.usuario = request.user
publi.correo_enviado = False
publi.save()
instances = formset.save(commit=False)
for instance in instances:
instance.publicacion = publi
instance.save()
formset.save_m2m()
instances2 = formset2.save(commit=False)
for instance2 in instances2:
instance2.publicacion = publi
instance2.save()
formset2.save_m2m()
instances3 = formset3.save(commit=False)
for instance3 in instances3:
instance3.publicacion = publi
instance3.save()
formset3.save_m2m()
if publi.publicada == True and publi.correo_enviado == False:
try:
subject, from_email = 'Nueva publicación', '<EMAIL>'
text_content = render_to_string('email/publicacion.txt', {'publi': publi,})
html_content = render_to_string('email/publicacion.txt', {'publi': publi,})
list_mail = UserProfile.objects.exclude(user__id = request.user.id).values_list('user__email',flat=True)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
publi.correo_enviado = True
publi.save()
return HttpResponseRedirect('/contrapartes/publicaciones/')
except:
pass
else:
form = PublicacionForm()
formset = FormSetInit()
formset2 = FormSetInit2()
formset3 = FormSetInit3()
return render(request, template, locals())
@login_required
def editar_aporte(request, id, template='admin/editar_aporte.html'):
object = get_object_or_404(Aportes, id=id)
if request.method == 'POST':
form = AporteForm(request.POST, request.FILES, instance=object)
if form.is_valid():
form_uncommited = form.save()
form_uncommited.user = request.user
form_uncommited.save()
return redirect('ver-foro', id=object.foro.id)
else:
form = AporteForm(instance=object)
return render(request, template, locals())
@login_required
def eliminar_aporte(request, id):
aporte = Aportes.objects.get(id = id)
foro = aporte.foro.id
aporte.delete()
return redirect('ver-foro', id=foro)
@login_required
def agregar_comentario(request, id, template='admin/comentario.html'):
object = get_object_or_404(Aportes, id=id)
if request.method == 'POST':
form = ComentarioForm(request.POST, request.FILES)
if form.is_valid():
form_uncommited = form.save(commit=False)
form_uncommited.aporte = object
form_uncommited.usuario = request.user
form_uncommited.save()
try:
subject, from_email = 'Nuevo comentario al foro ' + object.foro.nombre, '<EMAIL>'
text_content = render_to_string('email/comentario.txt', {'object': form_uncommited,})
html_content = render_to_string('email/comentario.txt', {'object': form_uncommited,})
list_mail = UserProfile.objects.exclude(user__id = request.user.id).values_list('user__email',flat=True)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
enviado = 1
return redirect('ver-foro', id=object.foro.id)
except:
pass
else:
form = ComentarioForm()
return render(request, template, locals())
@login_required
def editar_comentario(request, id, template='admin/comentario.html'):
object = get_object_or_404(Comentarios, id=id)
if request.method == 'POST':
form = ComentarioForm(request.POST, request.FILES,instance=object)
if form.is_valid():
form_uncommited = form.save()
form_uncommited.aporte = object.aporte
form_uncommited.usuario = request.user
form_uncommited.save()
return redirect('ver-foro', id=object.aporte.foro.id)
else:
form = ComentarioForm(instance=object)
return render(request, template, locals())
@login_required
def eliminar_comentario(request, id):
comentario = Comentarios.objects.get(id = id)
foro = comentario.aporte.foro.id
comentario.delete()
return redirect('ver-foro', id=foro)
#galerias
@login_required
def galerias_contraparte(request, template='admin/list_galerias.html'):
object_list_img = GaleriaImagenes.objects.filter(user = request.user.id).order_by('-id')
object_list_vid = GaleriaVideos.objects.filter(user = request.user.id).order_by('-id')
return render(request, template, locals())
@login_required
def eliminar_galeria_img(request, id):
img = GaleriaImagenes.objects.filter(id = id).delete()
return HttpResponseRedirect('/contrapartes/galerias/')
@login_required
def agregar_galeria_img(request, template='admin/galeria_img.html'):
FormSetInit = inlineformset_factory(GaleriaImagenes, Imagenes, form=ImagenesForm,extra=12,max_num=12)
if request.method == 'POST':
form = GaleriaImagenesForm(request.POST, request.FILES)
formset = FormSetInit(request.POST,request.FILES)
if form.is_valid() and formset.is_valid():
galeria = form.save(commit=False)
galeria.user = request.user
galeria.save()
instances = formset.save(commit=False)
for instance in instances:
instance.imagenes = galeria
instance.save()
formset.save_m2m()
return HttpResponseRedirect('/contrapartes/galerias/')
else:
form = GaleriaImagenesForm()
formset = FormSetInit()
return render(request, template, locals())
@login_required
def editar_galeria_img(request, id, template='admin/galeria_img.html'):
object = get_object_or_404(GaleriaImagenes, id=id)
FormSetInit = inlineformset_factory(GaleriaImagenes, Imagenes, form=ImagenesForm,extra=12,max_num=12)
if request.method == 'POST':
form = GaleriaImagenesForm(data=request.POST,instance=object,files=request.FILES)
formset = FormSetInit(request.POST,request.FILES,instance=object)
if form.is_valid() and formset.is_valid():
form_uncommited = form.save(commit=False)
form_uncommited.save()
formset.save()
return HttpResponseRedirect('/contrapartes/galerias/')
else:
form = GaleriaImagenesForm(instance=object)
formset = FormSetInit(instance=object)
return render(request, template, locals())
@login_required
def agregar_galeria_vid(request, template='admin/nueva_galeria_vid.html'):
if request.method == 'POST':
form = GaleriaVideosForm(request.POST, request.FILES)
if form.is_valid():
publi = form.save(commit=False)
publi.user = request.user
publi.save()
return HttpResponseRedirect('/contrapartes/galerias/')
else:
form = GaleriaVideosForm()
return render(request, template, locals())
@login_required
def eliminar_video(request, id):
img = GaleriaVideos.objects.filter(id = id).delete()
return HttpResponseRedirect('/contrapartes/galerias/')
@login_required
def editar_video(request, id, template='admin/nueva_galeria_vid.html'):
object = get_object_or_404(GaleriaVideos, id=id)
if request.method == 'POST':
form = GaleriaVideosForm(request.POST, request.FILES, instance=object)
if form.is_valid():
form_uncommited = form.save()
form_uncommited.user = request.user
form_uncommited.save()
return HttpResponseRedirect('/contrapartes/galerias/')
else:
form = GaleriaVideosForm(instance=object)
return render(request, template, locals())
@login_required
def mensajes(request, template='admin/mensajes.html'):
if request.method == 'POST':
form = MensajeForm(request.POST, request.FILES)
form.fields['user'].queryset = User.objects.exclude(id=request.user.id)
if form.is_valid():
form_uncommited = form.save(commit=False)
form_uncommited.usuario = request.user
form_uncommited.save()
form.save_m2m()
try:
subject, from_email = 'Nuevo mensaje ','<EMAIL>'
text_content = render_to_string('email/mensaje.txt', {'object': form_uncommited,})
html_content = render_to_string('email/mensaje.txt', {'object': form_uncommited,})
list_mail = []
for user in form_uncommited.user.all():
list_mail.append(user.email)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
enviado = 1
except:
pass
else:
form = MensajeForm()
form.fields['user'].queryset = User.objects.exclude(id=request.user.id)
enviado = 0
return render(request, template, locals())
@login_required
def estadisticas(request, template='admin/estadisticas.html'):
dic_notas = {}
dic_foros = {}
dic_aportes = {}
dic_coment = {}
list_resumen = []
for org in Contraparte.objects.all():
conteo = Notas.objects.filter(user__userprofile__contraparte = org).count()
dic_notas[org.siglas] = conteo
conteo_foros = Foros.objects.filter(contraparte__userprofile__contraparte = org).count()
dic_foros[org.siglas] = conteo_foros
conteo_aportes = Aportes.objects.filter(user__userprofile__contraparte = org).count()
dic_aportes[org.siglas] = conteo_aportes
conteo_coment = Comentarios.objects.filter(usuario__userprofile__contraparte = org).count()
dic_coment[org.siglas] = conteo_coment
conteo_eventos = Agendas.objects.filter(user__userprofile__contraparte = org).count()
conteo_img = GaleriaImagenes.objects.filter(user__userprofile__contraparte = org).count()
conteo_vid = GaleriaVideos.objects.filter(user__userprofile__contraparte = org).count()
conteo_publi = Publicacion.objects.filter(usuario__userprofile__contraparte = org).count()
list_resumen.append((org.siglas,conteo,conteo_eventos,conteo_foros,conteo_aportes,conteo_coment,conteo_img,conteo_vid,conteo_publi))
return render(request, template, locals())
@login_required
def catalogo(request, template='admin/lista_catalogo.html'):
object_list = Producto.objects.filter(user = request.user.id).order_by('-id')
return render(request, template, locals())
@login_required
def eliminar_producto(request, id):
prod = Producto.objects.filter(id = id).delete()
return HttpResponseRedirect('/contrapartes/catalogo/')
@login_required
def agregar_producto(request, template='admin/agregar_producto.html'):
FormSetInit = inlineformset_factory(Producto, Propuesta_valor, form=Propuesta_valorForm,extra=1)
FormSetInit2 = inlineformset_factory(Producto, FotosProducto, form=FotosProductoForm,extra=1,max_num=4)
FormSetInit3 = inlineformset_factory(Producto, ArchivosProducto, form=ArchivosProductoForm,extra=1)
FormSetInit4 = inlineformset_factory(Producto, RedesProducto, form=RedesFormProducto,extra=1)
if request.method == 'POST':
form = ProductoForm(request.POST, request.FILES)
formset = FormSetInit(request.POST)
formset2 = FormSetInit2(request.POST,request.FILES)
formset3 = FormSetInit3(request.POST,request.FILES)
formset4 = FormSetInit4(request.POST,request.FILES)
if form.is_valid() and formset.is_valid() and formset2.is_valid() and formset3.is_valid() and formset4.is_valid():
producto = form.save(commit=False)
producto.user = request.user
producto.correo_enviado = False
producto.save()
form.save_m2m()
instances = formset.save(commit=False)
for instance in instances:
instance.producto = producto
instance.save()
instances2 = formset2.save(commit=False)
for instance2 in instances2:
instance2.producto = producto
instance2.save()
instances3 = formset3.save(commit=False)
for instance3 in instances3:
instance3.producto = producto
instance3.save()
instances4 = formset4.save(commit=False)
for instance4 in instances4:
instance4.producto = producto
instance4.save()
if producto.publicada == True and producto.enviar_correo == True:
try:
subject, from_email = 'Nuevo producto', '<EMAIL>'
text_content = render_to_string('email/mail2.html', {'producto': producto,})
html_content = render_to_string('email/mail2.html', {'producto': producto,})
list_mail = UserProfile.objects.exclude(user__id = request.user.id).values_list('user__email',flat=True)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
enviado = 1
producto.correo_enviado = True
producto.save()
return HttpResponseRedirect('/contrapartes/catalogo/')
except:
pass
return HttpResponseRedirect('/contrapartes/catalogo/')
else:
form = ProductoForm()
formset = FormSetInit()
formset2 = FormSetInit2()
formset3 = FormSetInit3()
formset4 = FormSetInit4()
return render(request, template, locals())
@login_required
def editar_producto(request, id=None, template='admin/agregar_producto.html'):
object = get_object_or_404(Producto, id=id)
FormSetInit = inlineformset_factory(Producto, Propuesta_valor, form=Propuesta_valorForm,extra=1)
FormSetInit2 = inlineformset_factory(Producto, FotosProducto, form=FotosProductoForm,extra=1,max_num=4)
FormSetInit3 = inlineformset_factory(Producto, ArchivosProducto, form=ArchivosProductoForm,extra=1)
FormSetInit4 = inlineformset_factory(Producto, RedesProducto, form=RedesFormProducto,extra=1)
if request.method == 'POST':
form = ProductoForm(request.POST, request.FILES,instance=object)
formset = FormSetInit(request.POST,instance=object)
formset2 = FormSetInit2(request.POST,request.FILES,instance=object)
formset3 = FormSetInit3(request.POST,request.FILES,instance=object)
formset4 = FormSetInit4(request.POST,instance=object)
if form.is_valid() and formset.is_valid() and formset2.is_valid() and formset3.is_valid() and formset4.is_valid():
form_uncommited = form.save()
form_uncommited.save()
# form_uncommited.save_m2m()
formset.save()
formset2.save()
formset3.save()
formset4.save()
if form_uncommited.publicada == True and form_uncommited.correo_enviado == False and form_uncommited.enviar_correo == True:
try:
subject, from_email = 'Nuevo producto', '<EMAIL>'
text_content = render_to_string('email/mail2.html', {'producto': form_uncommited,})
html_content = render_to_string('email/mail2.html', {'producto': form_uncommited,})
list_mail = UserProfile.objects.exclude(user__id = request.user.id).values_list('user__email',flat=True)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
enviado = 1
form_uncommited.correo_enviado = True
form_uncommited.save()
return redirect('catalogo')
except:
pass
return redirect('catalogo')
else:
form = ProductoForm(instance=object)
formset = FormSetInit(instance=object)
formset2 = FormSetInit2(instance=object)
formset3 = FormSetInit3(instance=object)
formset4 = FormSetInit4(instance=object)
return render(request, template, locals())
| StarcoderdataPython |
1631893 | import logging
from django.utils import timezone
from elasticsearch import Elasticsearch, NotFoundError, RequestError
from zentral.core.exceptions import ImproperlyConfigured
from .base import BaseExporter
logger = logging.getLogger("zentral.contrib.inventory.exporters.es_machine_snapshots")
MAX_EXPORTS_COUNT = 3
ES_ALIAS = "zentral-inventory-export-machine-snapshots"
ES_TEMPLATE_NAME = ES_ALIAS
ES_INDEX_PATTERN = '{}-*'.format(ES_ALIAS)
ES_TEMPLATE = {
'index_patterns': [ES_INDEX_PATTERN],
'settings': {'number_of_shards': 1,
'number_of_replicas': 0},
'mappings': {'date_detection': False,
'dynamic_templates': [{'strings_as_keyword': {'mapping': {'ignore_above': 1024,
'type': 'keyword'},
'match_mapping_type': 'string'}}],
'properties': {'@timestamp': {'type': 'date'},
'tags': {'ignore_above': 1024,
'type': 'keyword'}}}
}
class InventoryExporter(BaseExporter):
name = "elasticsearch machine snapshots exporter"
def __init__(self, config_g):
super().__init__(config_g)
error_msgs = []
self.es_hosts = config_g["es_hosts"]
if not self.es_hosts:
error_msgs.append("Missing es_hosts")
if not isinstance(self.es_hosts, list):
error_msgs.append("es_hosts must be a list")
if error_msgs:
raise ImproperlyConfigured("{} in {}".format(", ".join(error_msgs), self.name))
def iter_machine_snapshots(self):
for serial_number, machine_snapshots in self.get_ms_query().fetch(paginate=False, for_filtering=True):
for machine_snapshot in machine_snapshots:
yield machine_snapshot
def get_es_client(self):
self._es = Elasticsearch(hosts=self.es_hosts)
self._es_version = [int(i) for i in self._es.info()["version"]["number"].split(".")]
# template
template_body = ES_TEMPLATE
if self._es_version < [7]:
template_body["mappings"] = {"_doc": template_body.pop("mappings")}
self._es.indices.put_template(ES_TEMPLATE_NAME, template_body)
# create index
for i in range(10):
existing_indices = self._es.indices.get(ES_INDEX_PATTERN).keys()
if not len(existing_indices):
next_id = 0
else:
next_id = max(int(index.rsplit("-", 1)[-1]) for index in existing_indices) + 1
index_name = ES_INDEX_PATTERN.replace("*", "{:08d}".format(next_id))
try:
self._es.indices.create(index_name)
except RequestError:
# probably race
pass
else:
# move alias
update_aliases_body = {
"actions": [
{"add": {"index": index_name, "alias": ES_ALIAS}}
]
}
try:
old_indices = self._es.indices.get_alias(ES_ALIAS)
except NotFoundError:
old_indices = []
for old_index in old_indices:
if old_index != index_name:
update_aliases_body["actions"].append(
{"remove": {"index": old_index, "alias": ES_ALIAS}}
)
self._es.indices.update_aliases(update_aliases_body)
return index_name
def index_snapshot(self, index_name, machine_snapshot):
doc_id = "{}.{}".format(machine_snapshot["serial_number"], machine_snapshot["source"]["id"])
self._es.create(index_name, doc_id, machine_snapshot)
def prune_exports(self):
existing_indices = sorted(self._es.indices.get(ES_INDEX_PATTERN).keys(), reverse=True)
for index_name in existing_indices[MAX_EXPORTS_COUNT:]:
self._es.indices.delete(index_name)
logger.info("Removed '%s' index", index_name)
def run(self):
timestamp = timezone.now().isoformat()
index_name = self.get_es_client()
logger.info("Created '%s' index", index_name)
i = 0
for machine_snapshot in self.iter_machine_snapshots():
machine_snapshot["@timestamp"] = timestamp
self.index_snapshot(index_name, machine_snapshot)
i += 1
logger.info("Added %s machine snapshot(s)", i)
self.prune_exports()
| StarcoderdataPython |
1620512 | <reponame>li195111/real-estate-price
from celery import Celery
from celery.schedules import crontab
import utils
# from app import rdb
task = Celery('tasks',
broker='redis://localhost:6379/0',
backend='redis://localhost:6379/0')
task.conf.timezone = 'UTC'
'''
@task.task
def function():
...
'''
@task.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
# clean jwt every 10 seconds.
# sender.add_periodic_task(10.0, clean_key_pairs.s(), name='clean every 10s')
pass
| StarcoderdataPython |
1765095 | import django_heroku
from .production import *
ALLOWED_HOSTS = ['*.herokuapp.com']
CACHES = {
'default': {
'BACKEND': 'django_bmemcached.memcached.BMemcached',
'LOCATION': os.getenv('MEMCACHIER_SERVERS').split(','),
'OPTIONS': {
'username': os.getenv('MEMCACHIER_USERNAME'),
'password': os.getenv('<PASSWORD>')
}
}
}
django_heroku.settings(locals())
| StarcoderdataPython |
3227106 | import logging
import itertools
from data.logs_model.datatypes import AggregatedLogCount, LogEntriesPage
from data.logs_model.interface import ActionLogsDataInterface
from data.logs_model.shared import SharedModel
logger = logging.getLogger(__name__)
def _merge_aggregated_log_counts(*args):
""" Merge two lists of AggregatedLogCount based on the value of their kind_id and datetime.
"""
matching_keys = {}
aggregated_log_counts_list = itertools.chain.from_iterable(args)
def canonical_key_from_kind_date_tuple(kind_id, dt):
""" Return a comma separated key from an AggregatedLogCount's kind_id and datetime. """
return str(kind_id) + ',' + str(dt)
for kind_id, count, dt in aggregated_log_counts_list:
kind_date_key = canonical_key_from_kind_date_tuple(kind_id, dt)
if kind_date_key in matching_keys:
existing_count = matching_keys[kind_date_key][2]
matching_keys[kind_date_key] = (kind_id, dt, existing_count + count)
else:
matching_keys[kind_date_key] = (kind_id, dt, count)
return [AggregatedLogCount(kind_id, count, dt) for (kind_id, dt, count) in matching_keys.values()]
class CombinedLogsModel(SharedModel, ActionLogsDataInterface):
"""
CombinedLogsModel implements the data model that logs to the first logs model and reads from
both.
"""
def __init__(self, read_write_logs_model, read_only_logs_model):
self.read_write_logs_model = read_write_logs_model
self.read_only_logs_model = read_only_logs_model
def log_action(self, kind_name, namespace_name=None, performer=None, ip=None, metadata=None,
repository=None, repository_name=None, timestamp=None, is_free_namespace=False):
return self.read_write_logs_model.log_action(kind_name, namespace_name, performer, ip, metadata,
repository, repository_name, timestamp,
is_free_namespace)
def count_repository_actions(self, repository, day):
rw_count = self.read_write_logs_model.count_repository_actions(repository, day)
ro_count = self.read_only_logs_model.count_repository_actions(repository, day)
return rw_count + ro_count
def get_aggregated_log_counts(self, start_datetime, end_datetime, performer_name=None,
repository_name=None, namespace_name=None, filter_kinds=None):
rw_model = self.read_write_logs_model
ro_model = self.read_only_logs_model
rw_count = rw_model.get_aggregated_log_counts(start_datetime, end_datetime,
performer_name=performer_name,
repository_name=repository_name,
namespace_name=namespace_name,
filter_kinds=filter_kinds)
ro_count = ro_model.get_aggregated_log_counts(start_datetime, end_datetime,
performer_name=performer_name,
repository_name=repository_name,
namespace_name=namespace_name,
filter_kinds=filter_kinds)
return _merge_aggregated_log_counts(rw_count, ro_count)
def yield_logs_for_export(self, start_datetime, end_datetime, repository_id=None,
namespace_id=None, max_query_time=None):
rw_model = self.read_write_logs_model
ro_model = self.read_only_logs_model
rw_logs = rw_model.yield_logs_for_export(start_datetime, end_datetime, repository_id,
namespace_id, max_query_time)
ro_logs = ro_model.yield_logs_for_export(start_datetime, end_datetime, repository_id,
namespace_id, max_query_time)
for batch in itertools.chain(rw_logs, ro_logs):
yield batch
def lookup_logs(self, start_datetime, end_datetime, performer_name=None, repository_name=None,
namespace_name=None, filter_kinds=None, page_token=None, max_page_count=None):
rw_model = self.read_write_logs_model
ro_model = self.read_only_logs_model
page_token = page_token or {}
new_page_token = {}
if page_token is None or not page_token.get('under_readonly_model', False):
rw_page_token = page_token.get('readwrite_page_token')
rw_logs = rw_model.lookup_logs(start_datetime, end_datetime, performer_name,
repository_name, namespace_name, filter_kinds,
rw_page_token, max_page_count)
logs, next_page_token = rw_logs
new_page_token['under_readonly_model'] = next_page_token is None
new_page_token['readwrite_page_token'] = next_page_token
return LogEntriesPage(logs, new_page_token)
else:
readonly_page_token = page_token.get('readonly_page_token')
ro_logs = ro_model.lookup_logs(start_datetime, end_datetime, performer_name,
repository_name, namespace_name, filter_kinds,
readonly_page_token, max_page_count)
logs, next_page_token = ro_logs
if next_page_token is None:
return LogEntriesPage(logs, None)
new_page_token['under_readonly_model'] = True
new_page_token['readonly_page_token'] = next_page_token
return LogEntriesPage(logs, new_page_token)
def lookup_latest_logs(self, performer_name=None, repository_name=None, namespace_name=None,
filter_kinds=None, size=20):
latest_logs = []
rw_model = self.read_write_logs_model
ro_model = self.read_only_logs_model
rw_logs = rw_model.lookup_latest_logs(performer_name, repository_name, namespace_name,
filter_kinds, size)
latest_logs.extend(rw_logs)
if len(latest_logs) < size:
ro_logs = ro_model.lookup_latest_logs(performer_name, repository_name, namespace_name,
filter_kinds, size - len(latest_logs))
latest_logs.extend(ro_logs)
return latest_logs
def yield_log_rotation_context(self, cutoff_date, min_logs_per_rotation):
ro_model = self.read_only_logs_model
rw_model = self.read_write_logs_model
ro_ctx = ro_model.yield_log_rotation_context(cutoff_date, min_logs_per_rotation)
rw_ctx = rw_model.yield_log_rotation_context(cutoff_date, min_logs_per_rotation)
for ctx in itertools.chain(ro_ctx, rw_ctx):
yield ctx
| StarcoderdataPython |
1675576 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Constants related to the GCP auth method and/or secrets engine."""
DEFAULT_MOUNT_POINT = 'database'
ALLOWED_CREDS_ENDPOINT = 'creds' | StarcoderdataPython |
3333633 | <gh_stars>0
__author__ = 'Nikhil'
import scrapy
from MedIndia.items import MedindiaItem
html_headers = {
"accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"accept-encoding" : "gzip, deflate, sdch, br",
"accept-language" : "en-US,en;q=0.8,ms;q=0.6",
"user-agent" : "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36"
}
#to csv - scrapy runspider MedIndia.py -o file.csv -t csv
class MedSpider(scrapy.Spider):
name = 'med'
#MedindiaSpider.download_delay = 1
allowed_domains = ["medindia.net"]
start_urls = [#"https://www.medindia.net/drug-price/brand-index.asp?alpha=a",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=b",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=c",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=d",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=e",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=f",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=g",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=h",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=i",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=j",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=k",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=l",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=m",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=n",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=o",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=p",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=q",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=r",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=s",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=t",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=u",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=v",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=w",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=x",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=y",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=z",
]
def parse(self, response):
#drug_urls = response.css("table.table-bordered.table > tr > td > a::attr(href)").extract()
#for letter_href in reponse.css("div.btn-group.btn-group-sm > a::attr(href)"):
# next_letter_page =
for href in response.css("table.table-bordered.table > tr > td > a::attr(href)"):
#print(href)
yield response.follow(href, callback=self.parse_details)
#for drug_url in drug_urls:
# drug_url = response.urljoin(drug_url)
# print(drug_url)
# yield scrapy.Request(url=drug_url,
# #headers=html_headers,
# callback=self.parse_details)
next_page_url = response.css("a[title='Next Page']::attr(href)").extract_first()
if next_page_url:
next_page_url = response.urljoin(next_page_url)
#print(next_page_url)
yield scrapy.Request(url=next_page_url, callback=self.parse)
def parse_details(self,response):
#print("we are here")
item = MedindiaItem()
item['drugName'] = response.css("td > h1::text").extract()
item['drugForm'] = response.css("td > span::text")[0].extract()
item['drugGenericName'] = response.css("td > span::text")[1].extract()
item['price'] = response.css("div.ybox > b::text").extract()
item['dosage'] = response.css("div.ybox > span > b::text")[0].extract()
item['basicInfo'] = response.css("div.report-content::text").extract()
item['conditions'] = response.css("div.caption > b > a::text").extract()
item['sideEffects'] = response.xpath('.//p[@class="drug-content"][1]/text()').extract()
item['dosageInfo'] = response.xpath('.//p[@class="drug-content"][2]/text()').extract()
item['howToTake'] = response.xpath('.//p[@class="drug-content"][3]/text()').extract()
item['contraindications'] = response.xpath('.//p[@class="drug-content"][4]/text()').extract()
item['warningsAndPrecautions'] = response.xpath('.//p[@class="drug-content"][5]/text()').extract()
item['otherPrecautions'] = response.xpath('.//p[@class="drug-content"][6]/text()').extract()
item['StorageConditions'] = response.xpath('.//p[@class="drug-content"][7]/text()').extract()
#get data of each drug
yield item
# {
# 'Host': 'www.medindia.net',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0',
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en-US,en;q=0.5',
# 'Referer': 'https://www.medindia.net/drug-price/brand-index.asp?alpha=a',
# 'Accept-Encoding': 'gzip, deflate, br',
# #'Cookie': 'ASPSESSIONIDCASCCQQB=LAIEMMHCGFGJCIOHMDFGBBLP; ASPSESSIONIDACTARTCD=AEAGKDKCJADJAGILECIGFDDN; ASPSESSIONIDCATDSQBC=FGNMNKICLBCAIFIDJGDJGALL; ASPSESSIONIDCASBCSQD=LMEDKCOCNNJHPFKFDFLMAGME; ASPSESSIONIDCATBTTBC=AMLHCGIDDLAKKOFDLNDJHFCJ',
# 'Connection': 'keep-alive'
# } | StarcoderdataPython |
70452 | import sys
import argparse
import numpy as np
from dataclasses import dataclass
from mchap.application import baseclass
from mchap.application.baseclass import SampleAssemblyError, SAMPLE_ASSEMBLY_ERROR
from mchap.application.arguments import (
CALL_MCMC_PARSER_ARGUMENTS,
collect_call_mcmc_program_arguments,
)
from mchap.calling.classes import CallingMCMC
from mchap.calling.exact import genotype_likelihoods
from mchap.jitutils import natural_log_to_log10
from mchap.io import qual_of_prob
@dataclass
class program(baseclass.program):
mcmc_chains: int = 1
mcmc_steps: int = 1000
mcmc_burn: int = 500
mcmc_incongruence_threshold: float = 0.60
@classmethod
def cli(cls, command):
"""Program initialization from cli command
e.g. `program.cli(sys.argv)`
"""
parser = argparse.ArgumentParser("MCMC haplotype calling")
for arg in CALL_MCMC_PARSER_ARGUMENTS:
arg.add_to(parser)
if len(command) < 3:
parser.print_help()
sys.exit(1)
args = parser.parse_args(command[2:])
# sort argument details
arguments = collect_call_mcmc_program_arguments(args)
return cls(cli_command=command, **arguments)
def call_sample_genotypes(self, data):
"""De novo haplotype assembly of each sample.
Parameters
----------
data : LocusAssemblyData
With sampledata fields: "read_dists_unique", "read_dist_counts".
Returns
-------
data : LocusAssemblyData
With sampledata fields: "alleles", "haplotypes", "GQ", "GPM", "PHPM", "PHQ", "MCI"
and "GL", "GP" if specified.
"""
for field in [
"alleles",
"haplotypes",
"GQ",
"GPM",
"PHPM",
"PHQ",
"MCI",
"GL",
"GP",
"AFP",
]:
data.sampledata[field] = dict()
haplotypes = data.locus.encode_haplotypes()
for sample in data.samples:
# wrap in try clause to pass sample info back with any exception
try:
reads = data.sampledata["read_dists_unique"][sample]
read_counts = read_counts = data.sampledata["read_dist_counts"][sample]
# call haplotypes
trace = (
CallingMCMC(
ploidy=data.sample_ploidy[sample],
haplotypes=haplotypes,
inbreeding=data.sample_inbreeding[sample],
steps=self.mcmc_steps,
chains=self.mcmc_chains,
random_seed=self.random_seed,
)
.fit(
reads=reads,
read_counts=read_counts,
)
.burn(self.mcmc_burn)
)
incongruence = trace.replicate_incongruence(
threshold=self.mcmc_incongruence_threshold
)
posterior = trace.posterior()
alleles, genotype_prob, phenotype_prob = posterior.mode(phenotype=True)
# store variables
data.sampledata["alleles"][sample] = alleles
data.sampledata["haplotypes"][sample] = haplotypes[alleles]
data.sampledata["GQ"][sample] = qual_of_prob(genotype_prob)
data.sampledata["GPM"][sample] = np.round(genotype_prob, self.precision)
data.sampledata["PHPM"][sample] = np.round(
phenotype_prob, self.precision
)
data.sampledata["PHQ"][sample] = qual_of_prob(phenotype_prob)
data.sampledata["MCI"][sample] = incongruence
# posterior allele frequencies if requested
if "AFP" in data.formatfields:
frequencies = np.zeros(len(haplotypes))
alleles, counts = np.unique(trace.genotypes, return_counts=True)
frequencies[alleles] = counts / counts.sum()
data.sampledata["AFP"][sample] = np.round(
frequencies, self.precision
)
# genotype posteriors if requested
if "GP" in data.formatfields:
probabilities = posterior.as_array(len(haplotypes))
data.sampledata["GP"][sample] = np.round(
probabilities, self.precision
)
# genotype likelihoods if requested
if "GL" in data.formatfields:
llks = genotype_likelihoods(
reads=reads,
read_counts=read_counts,
ploidy=data.sample_ploidy[sample],
haplotypes=haplotypes,
)
data.sampledata["GL"][sample] = np.round(
natural_log_to_log10(llks), self.precision
)
# end of try clause for specific sample
except Exception as e:
path = data.sample_bams.get(sample)
message = SAMPLE_ASSEMBLY_ERROR.format(sample=sample, bam=path)
raise SampleAssemblyError(message) from e
return data
| StarcoderdataPython |
136644 | import numpy as np
import cv2
import matplotlib.pyplot as plt
import numpy as np
import math
def ClassifyColor( BGR, width, height ): ##分類顏色 (BGR, width, height)
r_threshold = 20 ##r閾值 before 10
b_threshold = 20 ##b閾值 before 10
FortyFive_degree = math.pi / 4 ## 45度
grey_threshold = 10.0 * math.pi / 180.0 ##色角上下限 before 5
for y in range(0, height, 1):
cur_row = BGR[y]
for x in range(0, width, 1):
[b1, g1, r1] = cur_row[x] #b1, g1 and r1 are type unsigned integer 8 bits
#Convert to 32 bits integer #b1,g1和r1是無符號整數8位類型 轉換為32位整數
b = int(b1)
g = int(g1)
r = int(r1)
#Red color
if r - b > r_threshold:
if r - g > r_threshold:
cur_row[x] = [255, 255, 255]
continue ##跳过某些循环
#Blue color
if b - r > b_threshold:
if b - g > b_threshold:
cur_row[x] = [0, 0, 0]
continue
#Other colors
cur_row[x] = [0, 0, 0]
return BGR
cap = cv2.VideoCapture(0)
while(1):
if __name__ == '__main__':
ret, frame = cap.read()
if frame.size == 0:
##if ret == False:
print(f"Fail to read image {filename}")
else:
cv2.imshow('Original frame',frame)
(height, width, channels) = frame.shape
print(f"frame dimension ( {height} , {width}, {channels})\n" )
if channels != 3:
print("Image is not a color image ##################")
if frame.dtype != "uint8":
print("Image is not of type uint8 #################")
ms = frame.copy()
kernel = np.ones((5,5), np.uint8)
##dilation = cv2.dilate(test, kernel, iterations = 3)
dilation = cv2.dilate(ms, kernel, iterations = 7)
cv2.imshow('dilation', dilation)
kernel = np.ones((7,7), np.uint8)
##erosion = cv2.erode(dilation, kernel, iterations = 3)
erosion = cv2.erode(dilation, kernel, iterations = 9)
cv2.imshow('erosion', erosion)
#Convert image to NumPy array (Create a new 2D array)
# Note: The order is in BGR format! 將圖像轉換為NumPy數組(創建新的2D數組)
BGR_array = np.array( erosion )
#Classify red, blue and grey color
Result_array = ClassifyColor( BGR_array, width, height )
cv2.imshow('BGR',Result_array)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
# Close the window
cap.release()
# De-allocate any associated memory usage
cv2.destroyAllWindows()
| StarcoderdataPython |
3381440 | for i in range(11):
for j in range(i+1):
print("# ",end=" ")
for k in range(i+1,11):
print("^ ",end=" ")
print(" ")
| StarcoderdataPython |
3338051 | import sys
import pymongo
from pymongo import MongoClient
client=MongoClient()
db=client.person_db
user=db.user
img_id=sys.argv[1]
lat=sys.argv[2]
lng=sys.argv[3]
flag=0
#ele = user.find()
#cnt=user.find().count()
for ele in user.find():
#res=cmp_img_id.py #(call a script checking for similarity of image)
res=0
if res==0:
icount = int(ele['count']) + 1
result_id = str(ele['person_id'])
flag=1
user.update_one({"person_id":result_id},{'$set':{"lat":lat,"long":lng,
"count": str(icount)}},upsert=True)
if icount >3:
user.update_one({"person_id":result_id},{'$set':{"status":"Pd"}},upsert=True)
else:
continue
if flag==0:
user.insert_one({"person_id":img_id,"lat":lat,"long":lng,"count":1,
"status":"P"})
| StarcoderdataPython |
4836595 | <reponame>bopopescu/google-cloud-sdk<gh_stars>0
"""Small helper class to provide a small slice of a stream."""
from gslib.third_party.storage_apitools import exceptions
class StreamSlice(object):
def __init__(self, stream, max_bytes):
self.__stream = stream
self.__remaining_bytes = max_bytes
self.__max_bytes = max_bytes
def __str__(self):
return 'Slice of stream %s with %s/%s bytes not yet read' % (
self.__stream, self.__remaining_bytes, self.__max_bytes)
def __len__(self):
return self.__max_bytes
def read(self, size=None):
if size is not None:
size = min(size, self.__remaining_bytes)
else:
size = self.__remaining_bytes
data = self.__stream.read(size)
if not data and self.__remaining_bytes:
raise exceptions.TransferInvalidError(
'Not enough bytes in stream; expected %d, stream exhasted after %d' % (
self.__max_bytes, self.__remaining_bytes))
self.__remaining_bytes -= len(data)
return data
| StarcoderdataPython |
3288435 | <reponame>duggalsu/PySyft<filename>syft/frameworks/crypten/message_handler.py
import syft
from syft.messaging.message import CryptenInitPlan
from syft.messaging.message import CryptenInitJail
from syft.messaging.message import ObjectMessage
from syft.frameworks import crypten as syft_crypten
from syft.frameworks.crypten.context import run_party
from syft.frameworks.crypten.jail import JailRunner
from syft.frameworks.crypten import utils
from syft.generic.abstract.message_handler import AbstractMessageHandler
class CryptenMessageHandler(AbstractMessageHandler):
def __init__(self, object_store, worker):
super().__init__(object_store)
self.worker = worker
def init_routing_table(self):
return {
CryptenInitPlan: self.run_crypten_party_plan,
CryptenInitJail: self.run_crypten_party_jail,
}
def run_crypten_party_plan(self, msg: CryptenInitPlan) -> ObjectMessage: # pragma: no cover
"""Run crypten party according to the information received.
Args:
msg (CryptenInitPlan): should contain the rank_to_worker_id, world_size,
master_addr and master_port.
Returns:
An ObjectMessage containing the return value of the crypten function computed.
"""
rank_to_worker_id, world_size, master_addr, master_port = msg.crypten_context
cid = syft.ID_PROVIDER.pop()
syft_crypten.RANK_TO_WORKER_ID[cid] = rank_to_worker_id
onnx_model = msg.model
crypten_model = None if onnx_model is None else utils.onnx_to_crypten(onnx_model)
# TODO Change this, we need a way to handle multiple plan definitions
plans = self.worker.search("crypten_plan")
if len(plans) != 1:
raise ValueError(f"Error: {len(plans)} plans found. There should be only 1.")
plan = plans[0].get()
rank = self._current_rank(rank_to_worker_id)
if rank is None:
raise ValueError("Current rank can't be None")
if crypten_model:
args = (crypten_model,)
else:
args = ()
return_value = run_party(cid, plan, rank, world_size, master_addr, master_port, args, {})
# remove rank to id transaltion dict
del syft_crypten.RANK_TO_WORKER_ID[cid]
# Delete the plan at the end of the computation
self.worker.de_register_obj(plan)
return ObjectMessage(return_value)
def run_crypten_party_jail(self, msg: CryptenInitJail): # pragma: no cover
"""Run crypten party according to the information received.
Args:
message (CryptenInitJail): should contain the rank, world_size,
master_addr and master_port.
Returns:
An ObjectMessage containing the return value of the crypten function computed.
"""
rank_to_worker_id, world_size, master_addr, master_port = msg.crypten_context
cid = syft.ID_PROVIDER.pop()
syft_crypten.RANK_TO_WORKER_ID[cid] = rank_to_worker_id
ser_func = msg.jail_runner
onnx_model = msg.model
crypten_model = None if onnx_model is None else utils.onnx_to_crypten(onnx_model)
jail_runner = JailRunner.detail(ser_func, model=crypten_model)
rank = self._current_rank(rank_to_worker_id)
if rank is None:
raise ValueError("Current rank can't be None")
return_value = run_party(
cid, jail_runner, rank, world_size, master_addr, master_port, (), {}
)
# remove rank to id transaltion dict
del syft_crypten.RANK_TO_WORKER_ID[cid]
return ObjectMessage(return_value)
def _current_rank(self, rank_to_worker_id):
"""Returns current rank based on worker_id."""
rank = None
for r, worker_id in rank_to_worker_id.items():
if worker_id == self.worker.id:
rank = r
break
return rank
| StarcoderdataPython |
1608894 | # from ripe.atlas.sagan import Result
from ripe.atlas.cousteau import Probe
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from collections import Counter
from sklearn.mixture import GaussianMixture
import urllib.request
import json
import pickle
import decimal
from OllivierRicci import ricciCurvature,compute_ricciFlow
sns.set_context("paper",rc={"xtick.labelsize":10,'figure.figsize':(250,250),"ytick.labelsize":10,"axes.labelsize":10
,"legend.labelsize":15})
from ripe.atlas.cousteau import (
Measurement
)
from matplotlib.patches import Ellipse
from numpy.linalg import norm
#c is the speed of the light
c = 299792458
#a collection of colors used in GMM
colors = dict(enumerate([ "red", "blue", "green", "yellow", "purple", "orange" ,"white", "black"]))
def float_range(start, stop, step):
l = [start]
while start < stop:
start = decimal.Decimal(start)
start += decimal.Decimal(step)
l.append(start)
return l
def draw_ellipse(position, covariance, ax=None, **kwargs):
"""
draw an ellipse which tells us what is the area of influence of each centroids in the GMM
:param position:
:param covariance:
:param ax:
:param kwargs:
:return:
"""
"""Draw an ellipse with a given position and covariance"""
ax = ax or plt.gca()
# Convert covariance to principal axes
if covariance.shape == (2, 2):
U, s, Vt = np.linalg.svd(covariance)
angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))
width, height = 2 * np.sqrt(s)
else:
angle = 0
width, height = 2 * np.sqrt(covariance)
# Draw the Ellipse
for nsig in range(1, 4):
ax.add_patch(Ellipse(position, nsig * width, nsig * height,
angle, **kwargs))
def plot_gmm(gmm, X, label=True, ax=None):
ax = ax or plt.gca()
labels = gmm.fit(X).predict(X)
if label:
ax.scatter(X[:, 0], X[:, 1], c=labels, s=7, cmap='viridis', zorder=2)
else:
ax.scatter(X[:, 0], X[:, 1], s=7, zorder=2)
# ax.axis('equal')
w_factor = 0.2 / gmm.weights_.max()
for pos, covar, w in zip(gmm.means_, gmm.covariances_, gmm.weights_):
draw_ellipse(pos, covar, alpha=w * w_factor)
#id = 6278 #(sorbonne universite)
#id = 6231 #(boston university)
# id =6285 #Atlanta
# id = 6271 #Paris Afnic
# def map_Louis(id_meas):
# dico = {}
# with urllib.request.urlopen(
# "https://atlas.ripe.net/api/v2/measurements/%d/results/?format=txt" % id_meas) as my_results:
# for result in my_results.readlines():
# result = Result.get(result.decode("utf-8"))
# dico.update({result.probe_id: result.rtt_min})
# # print(json.load(open('/Users/loqman/Downloads/20190616.json')))
# # print(json.load(open('/Users/loqman/Downloads/20190521.json'))['objects'])
# probes = {d['id']: d for d in json.load(open('/Users/loqman/Downloads/20190521.json'))['objects']}
# df = pd.DataFrame(probes).transpose()
# all = []
# for t in dico.keys():
# value = df[df.index==t][['id','latitude','longitude']].values
# value = np.append(value,dico[t])
# # dg['latency'] = dico[t]
# print(value)
# all.append(value)
# dg = pd.DataFrame(all,index=dico.keys(),columns = ['id','latitude','longitude','latency'])
# dg.to_csv('losquinquihios.csv')
# return dg
def read_all(id,path_ripe,path_geo,type="min",internet= False,id_meas = None):
"""
this function translates the raw data into a readable dataframe
:param id: int which corresponds to the id of the probe we did the measure on
:param path_ripe: string the path to the ripe json data
:param path_geo: string to the geographic_distance matrix
:param type: categorical, it indicates which type of rtt we should take
:param internet: boolean, are we directly importing the data from RIPE website?
:param id_meas: the id associated to the measure
:return: pandas dataframe of two columns with latency and geographic distance
"""
dico = {}
hist = []
hist1 = []
hist2 = []
hist3 = []
if internet:
with urllib.request.urlopen(
"https://atlas.ripe.net/api/v2/measurements/%d/results/?format=txt" % id_meas) as my_results:
for result in my_results.readlines():
result = Result.get(result.decode("utf-8"))
if type == 'min':
dico.update({result.probe_id: result.rtt_min})
else:
with open(path_ripe) as my_results:
for result in my_results.readlines():
result = Result.get(result)
if type == 'min':
dico.update({result.probe_id:result.rtt_min})
hist.append(result.rtt_min)
hist1.append(result.rtt_max)
hist2.append(result.rtt_median)
hist3.append(result.rtt_max - result.rtt_min)
print(result.rtt_median,result.probe_id)
# print(dico)
geo_matrix = pd.read_pickle(path_geo).transpose()
limit = geo_matrix.loc[dico.keys()]
# print(Probe(id=6278).address_v4)
dlat = pd.Series(dico)
# print(dlat)
# print([dlat,limit[[Probe(id=6278).address_v4]]])
# df = pd.DataFrame(dlat.values,limit[['66.31.16.75']]).transpose(),index=limit.index, columns = ['latency','distance'])
df = pd.DataFrame()
id = id.split('-')[-1]
print(id)
#print(Probe(id=id).address_v4)
# print(limit[Probe(id=id).address_v4])
df['latency'] = dlat
try:
lim = limit[Probe(id=id).address_v4]
except:
return []
try:
lim.columns = [Probe(id=id).address_v4,'off']
except:
print('no worries')
# print(lim)
# print(Probe(id=id).address_v4)
try:
df['geographic'] = lim
except:
df['geographic'] = lim[Probe(id=id).address_v4]
# [Probe(id=id).address_v4]
# print(df.head())
print(df.shape)
df.dropna(inplace=True)
print(df.shape)
return df
def AS_analysis(labels,index,col =False):
"""
reads the probes id and their index in the dataframe and returns to which AS they are associated
:param labels:
:param index:
:param col:
:return:
"""
dic ={}
for (i, t) in enumerate(index):
dic[t] = labels[i]
dic_as = {}
for i in list(set(labels)):
l = []
for t in index:
if dic[t] == i:
l.append(Probe(id=t).asn_v4)
if col:
dic_as[colors[i]] = l
else:
dic_as[i] = l
return dic_as
def func(x,type):
"""
returns a regression associated to one of the three type
:param x: np.array constituting of the values
:param type: categorical : either linear, squared, root or cubical
:return: the associated function
"""
if type == "linear":
return x
elif type == 'squared':
return np.square(x)
elif type=='root':
return np.sqrt(x)
elif type =='cubical':
return x**(3)
def influence_plot(df):
"""
Quantifies the influences of each value on the linear regression (this allows us to observe outlier to a certain extent)
:param df: dataframe
:return: a plot
"""
import statsmodels.api as sm
x = np.array(df['geographic'].values)
y = np.array(df['latency'].values)
lm = sm.OLS(y, sm.add_constant(x)).fit()
plt.scatter(np.sort(x), y[np.argsort(x)])
plt.scatter(np.mean(x), np.mean(y), color="green")
plt.plot(np.sort(x), lm.predict()[np.argsort(x)], label="regression")
plt.title("Linear Regression plots with the regression line")
plt.legend()
fig, ax = plt.subplots(figsize=(12, 8))
fig = sm.graphics.influence_plot(lm, alpha=0.05, ax=ax, criterion="cooks")
plt.show()
def regression(df,clusters=True,type='linear'):
if clusters:
x = np.array(df.means_[:, 0])
y= np.array(df.means_[:, 1])
else:
x = np.array(df['geographic'].values)
y = np.array(df['latency'].values)
funco = func(x,type)
M = np.column_stack((funco,)) # construct design matrix
k, res, _, _ = np.linalg.lstsq(M, y,rcond=None)
plt.plot(x, y, '.')
x_bis = np.linspace(start=0,stop=max(x)+100)
plt.plot(x_bis, k * (func(x_bis,type)), 'r', linewidth=1)
y_bis = np.linspace(start=0,stop=max(y)+100)
print(y)
plt.plot(1/3*c*y_bis*10**(-6),y_bis,'y',linewidth = 1)
plt.legend(('measurement', 'fit','optimal'), loc=2)
plt.title('best fit: y = {:.8f}'.format(k[0]) + " for regression of type " +type)
plt.xlim(xmax=max(x)+100,xmin = -100)
plt.show()
plt.plot(x, y, '.','b')
plt.plot(x,k * (func(x,type)),'^','r')
plt.plot(x,y-k * (func(x,type)),'*','y')
plt.show()
return res
def distance(df):
#p1 = np.array((0,0))
# p2 = np.array((1,1))
#p2 = np.array((1/3*c*10**(-6),1))
print(df)
print(np.array(df.values).shape)
second = max(np.array(df.values)[:, 1])
a = 1 / 3 * c * 10 ** (-6) / second
dict = {}
print(np.array(df.values).shape, df.index.shape)
first = max(np.array(df.values)[:, 0])
for ((coord1, coord2), i) in zip(np.array(df.values), df.index):
p3 = (coord1, coord2)
# d = norm(np.cross(p2 - p1, p1 - p3)) / norm(p2 - p1)
# d_bis= norm(np.cross(p2 - p1, p1 - p3)) / norm(p2 - p1)
d = np.absolute(p3[0] / first - a * p3[1] / second) / np.sqrt(1 + a ** 2)
# print(d,d_bis)
dict[i] = d
# print(d,d_bis)
dict[i] = d
print(np.array(df.values)[:,0]/first,np.array(df.values)[:,1]/second)
# plt.plot(np.array(df.values)[:,1]/second,np.array(df.values)[:,0]/first, '.')
# plt.show()
sorted_dict = sorted(dict.items(), key=lambda kv: kv[1])
return sorted_dict
import networkx as nx
def graph_inference(values,df,data):
new_data = {}
for t in data.keys():
for s in data[t]:
new_data[s] = t
dist = distance(df)
dist_bis = list(zip(*dist))
print(dist_bis[0])
G = nx.Graph()
G.add_nodes_from(list(dist_bis[0]))
t_0 = dist_bis[0][0]
for (t,s) in dist:
print(t,s)
if s <= values:
if t!=t_0 :
G.add_edge(t_0,t)
nx.set_node_attributes(G, new_data, 'city')
return G
def combining_graph(graphs):
# id_of_interest = path_to_graphs.split('_')[1]
G_all = nx.Graph()
for i,G in enumerate(graphs):
print(i)
if i == 0:
G_all.add_nodes_from(G.nodes())
city = nx.get_node_attributes(G, 'city')
print(city)
# nx.set_node_attributes(G_all,city,'city')
G_all.add_edges_from(G.edges())
# nx.write_graphml(G_all,"/Users/loqman/Downloads/hi.graphml")
return G_all
def pipeline_ricci(path_to_data,list_of_ids,geo_matrix_path,values,internet=False):
with open('interest_probesBostonAtlantaChicagoParisMarseille.json') as json_file:
data = json.load(json_file)
graphs = []
if internet:
for (s,t) in zip(path_to_data,list_of_ids):
df = read_all(path_ripe=path_to_data,id=t,path_geo=geo_matrix_path,internet=True,id_meas=s[0])
print(len(df))
if len(df) == 0:
continue
graphs.append(graph_inference(values,df,data))
else:
for (s,t) in zip(path_to_data,list_of_ids):
df = read_all(t,s,geo_matrix_path)
graphs.append(graph_inference(values,df,data))
G = combining_graph(graphs)
return G
def gmm_visual(df,n):
elem = df[['geographic','latency']].values
elem = [list(e) for e in elem]
print(elem)
gmm = GaussianMixture(n_components=n,covariance_type='full',random_state=1).fit(elem)
labels = gmm.predict(elem)
dic = {}
for (i,t) in enumerate(df[['geographic','latency']].index):
dic[t] = labels[i]
print(Counter(labels))
print(dic)
# dist_bis = list(zip(*distance(df)))
for t in ["root","squared","linear","cubical"]:
print(regression(gmm,True,t))
probs = gmm.predict_proba(elem)
print(probs)
# print(gmm.means_)
plt.scatter(gmm.means_[:,0], gmm.means_[:, 1],c= [ "red", "blue", "green", "yellow", "purple", "orange" ,"white", "black"][:n], s=40, cmap='viridis')
plt.show()
influence_plot(df)
# with open('interest_probes.json') as json_file:
# data = json.load(json_file)
# ripe_path = ['/Users/loqman/PycharmProjects/privacy-preserving/RIPE-Atlas-measurement-parisarpnic.json','/Users/loqman/PycharmProjects/privacy-preserving/RIPE-Atlas-measurement-21715861.json']
# measurement = Measurement(id='21715861')
# print(measurement.meta_data)
# ids = [6231,6271]
# # id =6285 #Atlanta
# id = 6271 #Paris Afnic
# print(dir(measurement))
# pipeline_ricci(ripe_path,list_of_ids=ids,geo_matrix_path='/Users/loqman/PycharmProjects/privacy-preserving/geo_matrixBostonAtlantaChicagoParisLondon.pickle')
def full_pipeline(measurements,probes,matrix_geo,name,val):
with open(measurements, 'rb') as fp:
list_of_measurements = pickle.load(fp)
with open(probes, 'rb') as fp:
list_of_ids = pickle.load(fp)
print(len(list_of_measurements), len(list_of_ids))
G = pipeline_ricci(list_of_measurements, list_of_ids=list_of_ids.keys(),
geo_matrix_path=matrix_geo,
values=val, internet=True)
print(len(G.nodes()))
with open('interest_probesBostonAtlantaChicagoParisMarseille.json') as json_file:
data = json.load(json_file)
city = {}
for t in data.keys():
for s in data[t]:
print(t)
city[s] = t
nx.set_node_attributes(G, city, 'city')
# nx.write_graphml(G,"/Users/loqman/Downloads/combinaison_probes.graphml")
G = ricciCurvature(G)
ricci = nx.get_node_attributes(G, 'ricciCurvature')
abs_ricci = {}
for t in ricci.keys():
abs_ricci[t] = abs(ricci[t])
nx.set_node_attributes(G, abs_ricci, 'abs_ricci')
# G = compute_ricciFlow(G)
# # nx.write_graphml(G,)
nx.write_graphml(G, "/Users/loqman/Downloads/graph/"+name+str(val)+".graphml")
if __name__ == "__main__":
# with open('interest_probesBostonAtlantaChicagoParisMarseille.json') as json_file:
# data = json.load(json_file)
# with open('list_of_measurements_bis', 'rb') as fp:
# list_of_measurements = pickle.load(fp)
# with open('list_of_ids', 'rb') as fp:
# list_of_ids = pickle.load(fp)
# for (s, t) in zip(list_of_measurements,list_of_ids.keys()):
# map_Louis(s[0])
# break
# with open('metainfo_aug.pickle', 'rb') as fp:
# list_of_ids = pickle.load(fp)
with open('metainfo_cloudincluded_all.pickle', 'rb') as fp:
list_of_ids = pickle.load(fp)
print(list_of_ids)
# for s in list_of_ids[0].keys():
# if list_of_ids[0][s] == 'Utah':
# print(s)
# #
# for val in list(float_range(0.5, 0.7, '0.01')):
# val = float(val)
# full_pipeline('list_of_measurements_bis','list_of_ids_bis','/Users/loqman/PycharmProjects/privacy-preserving/geo_matrix_90sec.pickle','probes_06',val)
# with open('list_of_measurements', 'rb') as fp:
# list_of_measurements = pickle.load(fp)
# with open('list_of_ids', 'rb') as fp:
# list_of_ids = pickle.load(fp)
# print(len(list_of_measurements),len(list_of_ids))
# G = pipeline_ricci(list_of_measurements,list_of_ids = list_of_ids.keys(),geo_matrix_path='/Users/loqman/PycharmProjects/privacy-preserving/geo_matrix_90.pickle',values=0.5,internet=True)
# print(len(G.nodes()))
# with open('interest_probesBostonAtlantaChicagoParisMarseille.json') as json_file:
# data = json.load(json_file)
# city = {}
# for t in data.keys():
# for s in data[t]:
# print(t)
# city[s]=t
# nx.set_node_attributes(G,city,'city')
# # nx.write_graphml(G,"/Users/loqman/Downloads/combinaison_probes.graphml")
# G = ricciCurvature(G)
# ricci = nx.get_node_attributes(G, 'ricciCurvature')
# abs_ricci = {}
# for t in ricci.keys():
# abs_ricci[t] = abs(ricci[t])
# nx.set_node_attributes(G,abs_ricci,'abs_ricci')
# # G = compute_ricciFlow(G)
# # # nx.write_graphml(G,)
# nx.write_graphml(G,"/Users/loqman/Downloads/combinaison_probes_90-0.5ricci.graphml")
# for (s,t) in zip(list_of_ids.keys(),list_of_measurements):
# print(t[0])
# df = read_all(s,"",'/Users/loqman/PycharmProjects/privacy-preserving/geo_matrixBostonAtlantaChicagoParisLondon.pickle',type="min",internet= True,id_meas = t[0])
# # df = read_all(id,'/Users/loqman/PycharmProjects/privacy-preserving/RIPE-Atlas-measurement-parisarpnic.json','/Users/loqman/PycharmProjects/privacy-preserving/geo_matrixBostonAtlantaChicagoParisLondon.pickle')
# name_ordered = dist_bis[0]
# value_ordered =dist_bis[1]
# new_data = {}
# for t in data.keys():
# for s in data[t]:
# new_data[s] = t
# for (t,s) in zip(name_ordered,value_ordered):
# print(new_data[t],s)
# with open('new_data.json', 'w') as outfile:
# json.dump(new_data, outfile)
# df.to_pickle('data.pickle')
# G = graph_inference(df,data)
# nx.write_graphml(G,"/Users/loqman/Downloads/graph_try_min.graphml")
# import json
# options = {
# 'node_color': 'red',
# 'node_size': 1,
# 'line_color': 'blue',
# 'linewidths': 1,
# 'width': 0.1,
# }
# # nx.draw(G, **options)
# plt.show()
# plt.plot()
# print(distance(df))
# for t in ["root","squared","linear","cubical"]:
# print(regression(gmm,True,t))
# regression(gmm)
# l =[]
# l_bis = []
# l_third = []
# for t in dic.keys():
# if dic[t] == 3:
# l.append(t)
# elif dic[t] == 1:
# l_bis.append(t)
# elif dic[t] == 5:
# l_third.append(t)
# for n in set(l_third):
# print('Cluster orange', Probe(id=n).asn_v4)
# for n in set(l):
# print('Premier cluster:', Probe(id=n).asn_v4)
# for n in set(l_bis):
# print('Second cluster: ',Probe(id=n).asn_v4)
# probs = gmm.predict_proba(elem)
# print(probs)
# print(gmm.means_)
# plt.scatter(gmm.means_[:,0], gmm.means_[:, 1],c= [ "red", "blue", "green", "yellow", "purple", "orange" ,"white", "black"][:n], s=40, cmap='viridis')
# plt.show()
# print(kmeans.cluster_centers_)
# range_n_clusters = list(range(2,10))
# from sklearn.metrics import silhouette_score
# for n_clusters in range_n_clusters:
# Gaussian = GaussianMixture(n_components=n_clusters covariance_type='full').fit(elem)
# cluster_labels = clusterer.fit_predict(elem)
# # The silhouette_score gives the average value for all the samples.
# # This gives a perspective into the density and separation of the formed
# # clusters
# silhouette_avg = silhouette_score(elem, cluster_labels)
# print("For n_clusters =", n_clusters,
# "The average silhouette_score is :", silhouette_avg)
# plt.scatter(df['geographic'], df['latency'])
# plot_gmm(gmm, elem)
# plt.show()
# influence_plot(df)
# print(AS_analysis(labels,df.index,True))
| StarcoderdataPython |
3297600 | from setuptools import setup, find_packages
__version__ = '1.0.2'
setup(
name='amundsen-metadata',
version=__version__,
description='Metadata service package for Amundsen',
url='https://www.github.com/lyft/amundsen',
maintainer='Lyft',
maintainer_email='<EMAIL>',
packages=find_packages(exclude=['tests*']),
dependency_links=[],
install_requires=[
# Packages in here should rarely be pinned. This is because these
# packages (at the specified version) are required for project
# consuming this library. By pinning to a specific version you are the
# number of projects that can consume this or forcing them to
# upgrade/downgrade any dependencies pinned here in their project.
#
# Generally packages listed here are pinned to a major version range.
#
# e.g.
# Python FooBar package for foobaring
# pyfoobar>=1.0, <2.0
#
# This will allow for any consuming projects to use this library as
# long as they have a version of pyfoobar equal to or greater than 1.x
# and less than 2.x installed.
'Flask-RESTful>=0.3.6',
'neo4j-driver==1.6.0',
'beaker>=1.10.0',
'statsd>=3.2.1'
],
)
| StarcoderdataPython |
78765 | import abc
import copy
import re
import pandas as pd
class Feature(metaclass=abc.ABCMeta):
def __init__(self, name, path):
self.name = name
self.path = path
self._data = None
def data(self):
if self._data is None:
self.load_data()
return self._data
@abc.abstractmethod
def load_data(self):
"""Load the data to the attribute _data"""
def __str__(self):
return "[" + self.name + "," + self.path + "]"
def __repr__(self):
return self.__str__()
@Feature.register
class QueryFeature(Feature):
def load_data(self):
self._data = pd.read_csv("{0}.{1}".format(self.path, self.name), header=None, names=[self.name])
self._data['QID'] = self._data.index
self._data = self._data[['QID', self.name]]
@Feature.register
class ShardFeature(Feature):
def __init__(self, name, path, num_shards):
super().__init__(name, path)
self.num_shards = num_shards
pass
def shard_df(self, shard):
df = pd.read_csv("{0}#{1}.{2}".format(self.path, shard, self.name), header=None, names=[self.name])
df['QID'] = df.index
df['SID'] = pd.Series(shard, index=df.index)
return df
def load_data(self):
shards = [self.shard_df(shard) for shard in range(self.num_shards)]
self._data = pd.concat(shards, ignore_index=True, copy=False)
self._data = self._data[['QID', 'SID', self.name]]
def __str__(self):
return "[" + self.name + "," + self.path + "," + str(self.num_shards) + "]"
@Feature.register
class BucketFeature(Feature):
def __init__(self, name, path, num_shards, num_buckets):
super().__init__(name, path)
self.num_shards = num_shards
self.num_buckets = num_buckets
pass
def bucket_df(self, shard, bucket):
df = pd.read_csv("{0}#{1}#{2}.{3}".format(self.path, shard, bucket, self.name), header=None, names=[self.name])
df['QID'] = df.index
df['SID'] = pd.Series(shard, index=df.index)
df['BID'] = pd.Series(bucket, index=df.index)
return df
def shard_df(self, shard):
buckets = [self.bucket_df(shard, bucket) for bucket in range(self.num_buckets)]
shard = pd.concat(buckets, ignore_index=True, copy=False)
return shard[['QID', 'SID', 'BID', self.name]]
def load_data(self):
shards = [self.shard_df(shard) for shard in range(self.num_shards)]
self._data = pd.concat(shards, ignore_index=True, copy=False)
self._data = self._data[['QID', 'SID', 'BID', self.name]]
def __str__(self):
return "[" + self.name + "," + self.path + "," + str(self.num_shards) + "," + str(self.num_buckets) + "]"
class Dataset:
@staticmethod
def resolve_reference(j, path):
for elem in path.split("/")[1:]:
j = j[elem]
return j
@staticmethod
def parse_path(j, path, features_field):
i = path.find(':')
if i > -1:
p = path[:i]
pattern = re.compile('\$\{([^\}]*)\}')
matches = list(pattern.finditer(p))
end = 0
s = []
for match in matches:
s.append(p[end:match.span()[0]])
s.append(Dataset.resolve_reference(j, match.group(1)))
end = match.span()[1]
s.append(p[end:])
p = ''.join(s)
else:
p = j[features_field]['base']
return path[i + 1:], p
@staticmethod
def parse_json(j, features_field='features'):
"""Resolves paths to features"""
parsed = copy.deepcopy(j)
num_shards = parsed['shards']
if 'buckets' not in parsed:
parsed['buckets'] = None
num_buckets = parsed['buckets']
parsed[features_field]['query'] = [QueryFeature(*Dataset.parse_path(parsed, path, features_field))
for path in parsed[features_field]['query']]
parsed[features_field]['shard'] = [ShardFeature(*Dataset.parse_path(parsed, path, features_field), num_shards)
for path in parsed[features_field]['shard']]
if 'bucket' in parsed[features_field]:
parsed[features_field]['bucket'] =\
[BucketFeature(*Dataset.parse_path(parsed, path, features_field), num_shards, num_buckets)
for path in parsed[features_field]['bucket']]
else:
parsed[features_field]['bucket'] = None
return parsed
def __init__(self, query_features, shard_features, bucket_features, num_buckets):
self.query_features = query_features
self.shard_features = shard_features
self.bucket_features = bucket_features
self.num_buckets = num_buckets
@staticmethod
def merge(features, on):
assert len(features) > 0
merged = features[0]
for feature in features[1:]:
merged = pd.merge(merged, feature, on=on)
return merged
def load(self):
assert self.query_features is not None
assert len(self.query_features) > 0
assert self.shard_features is not None
assert len(self.shard_features) > 0
qf = self.merge([f.data() for f in self.query_features], on='QID')
sf = self.merge([f.data() for f in self.shard_features], on=['QID', 'SID'])
dataset = pd.merge(qf, sf, on=['QID'])
if self.bucket_features is not None:
if len(self.bucket_features) > 0:
bf = self.merge([f.data() for f in self.bucket_features], on=['QID', 'SID', 'BID'])
dataset = pd.merge(dataset, bf, on=['QID', 'SID'])
else:
bf = pd.DataFrame({
'BID': range(self.num_buckets),
'key': 1
})
columns = list(dataset.columns.values) + ['BID']
dataset['key'] = 1
dataset = pd.merge(dataset, bf, on=['key'])[columns]
return dataset
| StarcoderdataPython |
3293325 | import os
import importlib.util
from pykeops.common.gpu_utils import get_gpu_number
###############################################################
# Initialize some variables: the values may be redefined later
##########################################################
# Update config module: Search for GPU
gpu_available = get_gpu_number() > 0
numpy_found = importlib.util.find_spec("numpy") is not None
torch_found = importlib.util.find_spec("torch") is not None
###############################################################
# Compilation options
script_folder = os.path.dirname(os.path.abspath(__file__))
bin_folder = (
"" # init bin_folder... shlould be populated with the set_bin_folder() function
)
# Set the verbosity option: display output of compilations. This is a boolean: False or True
verbose = (
bool(int(os.environ["PYKEOPS_VERBOSE"]))
if "PYKEOPS_VERBOSE" in os.environ
else False
)
# Force compiled and set the cmake build type. This is a string with possible value "Release" or "Debug"
build_type = (
str(os.environ["PYKEOPS_BUILD_TYPE"])
if ("PYKEOPS_BUILD_TYPE" in os.environ)
else "Release"
)
| StarcoderdataPython |
168629 | <gh_stars>1-10
# -------- BEGIN LICENSE BLOCK --------
# Copyright 2022 FZI Forschungszentrum Informatik
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the {copyright_holder} nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -------- END LICENSE BLOCK --------
import unittest
from ros_bt_py_msgs.msg import Node, UtilityBounds
from ros_bt_py_msgs.msg import NodeDataWiring
from ros_bt_py_msgs.msg import NodeDataLocation
from ros_bt_py.nodes.list import ListLength, IsInList, IterateList,\
InsertInList, GetListElementOption
from ros_bt_py.nodes.mock_nodes import MockLeaf
from ros_bt_py.nodes.compare import CompareConstant
from ros_bt_py.nodes.decorators import IgnoreFailure
try:
import unittest.mock as mock
except ImportError:
import mock
class TestListLength(unittest.TestCase):
def testListLength(self):
list_length = ListLength()
list_length.setup()
list_length.inputs['list'] = ['a', 'b', 'c']
self.assertEqual(list_length.tick(), Node.SUCCEEDED)
self.assertEqual(list_length.outputs['length'], 3)
self.assertEqual(list_length.untick(), Node.IDLE)
self.assertEqual(list_length.reset(), Node.IDLE)
self.assertEqual(list_length.shutdown(), Node.SHUTDOWN)
class TestIsInList(unittest.TestCase):
def testIsInList(self):
in_list = IsInList({
'compare_type': str,
'list': ['a', 'b', 'c']
})
in_list.setup()
in_list.inputs['in'] = 'not in list'
self.assertEqual(in_list.tick(), Node.FAILED)
in_list.inputs['in'] = 'a'
self.assertEqual(in_list.tick(), Node.SUCCEEDED)
self.assertEqual(in_list.untick(), Node.IDLE)
self.assertEqual(in_list.reset(), Node.IDLE)
self.assertFalse(in_list._received_in)
self.assertEqual(in_list.shutdown(), Node.SHUTDOWN)
class TestGetListElementOption(unittest.TestCase):
def testGetOption(self):
get_elem = GetListElementOption({
'element_type': str,
'index': 2
})
get_elem.setup()
get_elem.inputs['list'] = ['nop', 'no', 'yep']
self.assertEqual(get_elem.tick(), Node.SUCCEEDED)
self.assertEqual(get_elem.outputs['element'], 'yep')
self.assertEqual(get_elem.untick(), Node.IDLE)
self.assertEqual(get_elem.reset(), Node.IDLE)
self.assertEqual(get_elem.shutdown(), Node.SHUTDOWN)
def testOutOfRange(self):
get_elem = GetListElementOption({
'element_type': str,
'index': 5
})
get_elem.setup()
get_elem.inputs['list'] = ['nop', 'no', 'yep']
self.assertRaises(IndexError, get_elem.tick)
def testWrongType(self):
get_elem = GetListElementOption({
'element_type': int,
'index': 2
})
get_elem.setup()
get_elem.inputs['list'] = ['no', 'int', 'toto']
self.assertRaises(TypeError, get_elem.tick)
class TestInsertInList(unittest.TestCase):
def testInsert(self):
insert = InsertInList({
'element_type': str,
'index': 2
})
insert.setup()
insert.inputs['list'] = ['first', 'before', 'after', 'last']
insert.inputs['element'] = 'here'
self.assertEqual(insert.tick(), Node.SUCCEEDED)
self.assertEqual(insert.outputs['list'][2], 'here')
self.assertEqual(insert.outputs['list'][1], 'before')
self.assertEqual(insert.outputs['list'][3], 'after')
self.assertEqual(insert.untick(), Node.IDLE)
self.assertEqual(insert.reset(), Node.IDLE)
self.assertEqual(insert.shutdown(), Node.SHUTDOWN)
def testOutOfRangePositive(self):
insert = InsertInList({
'element_type': str,
'index': 10
})
insert.setup()
# index > len(list) insert at last position
insert.inputs['list'] = ['first', 'before', 'after', 'last']
insert.inputs['element'] = 'here'
self.assertEqual(insert.tick(), Node.SUCCEEDED)
self.assertEqual(insert.outputs['list'][-1], 'here')
def testOutOfRangeNegative(self):
insert = InsertInList({
'element_type': str,
'index': -10
})
insert.setup()
# index > len(list) insert at last position
insert.inputs['list'] = ['first', 'before', 'after', 'last']
insert.inputs['element'] = 'here'
self.assertEqual(insert.tick(), Node.SUCCEEDED)
self.assertEqual(insert.outputs['list'][0], 'here')
class TestIterateList(unittest.TestCase):
def setUp(self):
self.iterate = IterateList({
'item_type': str
})
self.compare = CompareConstant({
'compare_type': str,
'expected': 'toto'
})
self.tick_count = mock.Mock(wraps=self.compare._do_tick)
self.compare._do_tick = self.tick_count
self.ignore_failure = IgnoreFailure()
def connect_compare(self):
self.compare.wire_data(NodeDataWiring(
source=NodeDataLocation(
node_name=self.iterate.name,
data_kind=NodeDataLocation.OUTPUT_DATA,
data_key='list_item'
),
target=NodeDataLocation(
node_name=self.compare.name,
data_kind=NodeDataLocation.INPUT_DATA,
data_key='in'
)))
def tick_until_compare_tick(self):
initial_count = self.tick_count.call_count
max_iteration = 20
ii = 0
last_tick = Node.RUNNING
while self.tick_count.call_count != initial_count + 1 and ii < max_iteration:
# tick the iterator until it decides to tick its compare (or until we give up)
ii += 1
self.assertEqual(last_tick, Node.RUNNING)
last_tick = self.iterate.tick()
self.assertLess(ii, max_iteration)
return last_tick
def testIterateWithChildSuccessInput(self):
self.iterate.add_child(self.ignore_failure)
self.ignore_failure.add_child(self.compare)
self.connect_compare()
self.iterate.inputs['list'] = ['some', 'ignored', 'string']
self.iterate.setup()
token = self.tick_until_compare_tick()
self.assertEqual(token, Node.RUNNING)
self.assertEqual(self.compare.inputs['in'], 'some')
token = self.tick_until_compare_tick()
self.assertEqual(token, Node.RUNNING)
self.assertEqual(self.compare.inputs['in'], 'ignored')
token = self.tick_until_compare_tick()
self.assertEqual(token, Node.SUCCEEDED)
self.assertEqual(self.compare.inputs['in'], 'string')
self.assertEqual(self.iterate.untick(), Node.IDLE)
self.assertEqual(self.iterate.reset(), Node.IDLE)
self.assertEqual(self.iterate.shutdown(), Node.SHUTDOWN)
def testIterateWithChildFailInput(self):
self.iterate.add_child(self.compare)
self.connect_compare()
self.iterate.inputs['list'] = ['toto', 'fail']
self.iterate.setup()
token = self.tick_until_compare_tick()
self.assertEqual(token, Node.RUNNING)
self.assertEqual(self.compare.inputs['in'], 'toto')
token = self.tick_until_compare_tick()
self.assertEqual(token, Node.FAILED)
self.assertEqual(self.compare.inputs['in'], 'fail')
def testIterateWithChildRunningInput(self):
self.iterate.add_child(self.compare)
self.connect_compare()
self.tick_count.side_effect = [
Node.RUNNING, Node.SUCCEEDED, Node.RUNNING, Node.SUCCEEDED]
self.iterate.inputs['list'] = ['ignored', 'bymock']
self.iterate.setup()
token = self.tick_until_compare_tick()
self.assertEqual(token, Node.RUNNING)
self.assertEqual(self.compare.inputs['in'], 'ignored')
# child returned running - input did not change
token = self.tick_until_compare_tick()
self.assertEqual(token, Node.RUNNING)
self.assertEqual(self.compare.inputs['in'], 'ignored')
token = self.tick_until_compare_tick()
self.assertEqual(token, Node.RUNNING)
self.assertEqual(self.compare.inputs['in'], 'bymock')
# child returned running - input did not change
token = self.tick_until_compare_tick()
self.assertEqual(token, Node.SUCCEEDED)
self.assertEqual(self.compare.inputs['in'], 'bymock')
def testIterateWithoutChild(self):
self.iterate.inputs['list'] = ['a', 'b']
self.iterate.setup()
self.assertEqual(self.iterate.tick(), Node.RUNNING)
self.assertEqual(self.iterate.outputs['list_item'], 'a')
self.assertEqual(self.iterate.tick(), Node.SUCCEEDED)
self.assertEqual(self.iterate.outputs['list_item'], 'b')
self.assertEqual(self.iterate.untick(), Node.IDLE)
self.assertEqual(self.iterate.reset(), Node.IDLE)
self.assertEqual(self.iterate.shutdown(), Node.SHUTDOWN)
def testIterateWithoutChildEmptyList(self):
self.iterate.inputs['list'] = []
self.iterate.setup()
self.assertEqual(self.iterate.tick(), Node.SUCCEEDED)
self.assertEqual(self.iterate.untick(), Node.IDLE)
self.assertEqual(self.iterate.reset(), Node.IDLE)
self.assertEqual(self.iterate.shutdown(), Node.SHUTDOWN)
| StarcoderdataPython |
4811257 | import os
device_name = os.getenv('DEVICENAME', 'Poweredge-R510')
energy_sensor_name = os.getenv('ENERGYNAME', device_name + '-energysensor')
mqtt_energy_topic = os.getenv('ENERGYSTATETOPIC', '' + energy_sensor_name + '/energyusage')
energy_config_template = {
"name": energy_sensor_name,
"unique_id": energy_sensor_name.lower(),
"device_class": "energy",
"state_class": "total_increasing",
"device": {
"manufacturer": "Dell",
"model": "R510",
"name": device_name,
"identifiers": [
device_name.lower()
],
"sw_version": "2.0"
},
"state_topic": mqtt_energy_topic,
"unit_of_measurement": "kWh"
} | StarcoderdataPython |
3386171 | <gh_stars>0
from rest_framework.permissions import BasePermission
class AllowSpecificClients(BasePermission):
"""
Allows access to only specific clients.
Should be used along with `<>.auth.TokenAuthentication`.
"""
allowed_clients_name = ("web",)
def has_permission(self, request, view):
if not hasattr(request, "_auth"):
return False
return request._auth.client.name in self.allowed_clients_name
class DisallowSpecificClients(BasePermission):
"""
restrict specific clients from making requests.
Should be used along with `<>.auth.TokenAuthentication`.
"""
disallowed_clients_name = ()
def has_permission(self, request, view):
if not hasattr(request, "_auth"):
return False
return request._auth.client.name not in self.disallowed_clients_name
| StarcoderdataPython |
1756135 | import webapp2
import jinja2
from google.appengine.api import users
from google.appengine.ext import ndb
import os
from user import User
from directory import Directory
import random
JINJA_ENVIRONMENT = jinja2.Environment(
loader = jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions = ['jinja2.ext.autoescape'],
autoescape = True
)
class ShareFile(webapp2.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/html'
if self.request.get('button') == 'Share':
directory_id = self.request.get('directory_id')
share_file_name = self.request.get('share_file_name')
index = self.request.get('index')
owner_email_id = self.request.get('owner_email_id')
user = users.get_current_user()
logout = users.create_logout_url('/')
template_values = {
'share_file_name' : share_file_name,
'directory_id': directory_id,
'index' : index,
'owner_email_id' : owner_email_id,
'user': user,
'logout': logout
# 'upload_url': blobstore.create_upload_url('/uploadfilehandler'),
}
template = JINJA_ENVIRONMENT.get_template('sharefile.html')
self.response.write(template.render(template_values))
if self.request.get('button') == 'Check email_id' :
directory_id = self.request.get('directory_id')
share_file_name = self.request.get('share_file_name')
index = int(self.request.get('index'))
email_id = self.request.get('email_id')
directory_key = ndb.Key(Directory,directory_id)
directory = directory_key.get()
blob_key = directory.blobs[index]
owner_email_id = self.request.get('owner_email_id')
user_counter = 0
id = email_id + '/'
shared_user_key = ndb.Key(Directory,id)
shared_user = shared_user_key.get()
user = users.get_current_user()
logout = users.create_logout_url('/')
if shared_user == None :
error_message = 'Sorry a user with this email id does not exists. Please check the email id'
template_values = {
'error_message' : error_message,
'user' : user,
'logout' : logout
}
template = JINJA_ENVIRONMENT.get_template('error.html')
self.response.write(template.render(template_values))
#all_directories = Directory.query.fetch()
#for each_directory in all_directories :
# if each_directory.id == key :
# user_counter = user_counter + 1
# break
else :
shared_user.shared_files.append(share_file_name)
shared_user.shared_files_blobs.append(blob_key)
shared_user.shared_file_owner.append(owner_email_id)
shared_user.put()
self.redirect('/main')
#if user_counter > 0 :
| StarcoderdataPython |
46111 | class Point:
"Classe Point géographique contenant une position"
def __init__(self,x,y):
self._x=x
self._y=y
def getx(self):
return self._x
def gety(self):
return self._y
def setx(self, x):
self._x = x
def sety(self, y):
self._y = y
def translation(self,valeur):
self._x +=valeur
self._y +=valeur
p1 = Point(-73,45)
p1.translation(2)
print(p1.getx())
print(p1.gety())
| StarcoderdataPython |
1708113 | <filename>conf/settings/test_ci.py
from .base import *
from .base import env
DEBUG = False
SECRET_KEY = env("SECRET_KEY", default="myverysecretkey")
TEST_RUNNER = "django.test.runner.DiscoverRunner"
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
| StarcoderdataPython |
4838564 | <filename>cloud/endagaweb/settings/test_spatialite.py
"""
Use Django prod settings from endagaweb with as few changes as necessary
to make tests run under Buck/Sandcastle.
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import os
from endagaweb.settings.staff import * # noqa: F401, F403
# Use spatialite for unit tests
GEOS_LIBRARY_PATH = os.environ['GEOS_LIBRARY_PATH']
SPATIALITE_LIBRARY_PATH = os.environ['SPATIALITE_LIBRARY_PATH']
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.spatialite',
'NAME': '/tmp/endaga.db',
},
}
ROOT_URLCONF = 'endagaweb.urls'
| StarcoderdataPython |
1624547 | <gh_stars>0
from django.core.paginator import Paginator,EmptyPage,PageNotAnInteger
from django.shortcuts import render
from pages.models import Post
def index_func(request):
posts = Post.objects.order_by('published_date')
return render(request,"home/index.html",{'posts': posts})
def about_func(request):
return render(request,"page/about.html")
def blog_func(request):
posts_list = Post.objects.all().order_by('published_date')
paginator = Paginator(posts_list,2)
page = request.GET.get('sayfa')
posts = paginator.get_page(page)
return render(request,"page/blog.html",{'posts':posts})
def iletisim_func(request):
return render(request,"page/iletisim.html")
| StarcoderdataPython |
1765060 | # ------------------------------------------------------------------------------
# Copyright (c) 2010-2013, EVEthing team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
from decimal import Decimal
from .apitask import APITask
from thing.models import Character, CorporationStanding, Faction, FactionStanding
class Standings(APITask):
name = 'thing.standings'
def run(self, url, taskstate_id, apikey_id, character_id):
if self.init(taskstate_id, apikey_id) is False:
return
try:
character = Character.objects.select_related('details').get(pk=character_id)
except Character.DoesNotExist:
self.log_warn('Character %s does not exist!', character_id)
return
# Fetch the API data
params = {'characterID': character_id}
if self.fetch_api(url, params) is False or self.root is None:
return
# Build data maps
cs_map = {}
for cs in CorporationStanding.objects.filter(character=character):
cs_map[cs.corporation_id] = cs
fs_map = {}
for fs in FactionStanding.objects.filter(character=character):
fs_map[fs.faction_id] = fs
# Iterate over rowsets
for rowset in self.root.findall('result/characterNPCStandings/rowset'):
name = rowset.attrib['name']
# NYI: Agents
if name == 'agents':
continue
# Corporations
elif name == 'NPCCorporations':
new = []
for row in rowset.findall('row'):
id = int(row.attrib['fromID'])
standing = Decimal(row.attrib['standing'])
cs = cs_map.get(id, None)
# Standing doesn't exist, make a new one
if cs is None:
cs = CorporationStanding(
character_id=character.id,
corporation_id=id,
standing=standing,
)
new.append(cs)
# Exists, check for standings change
elif cs.standing != standing:
cs.standing = standing
cs.save()
if new:
CorporationStanding.objects.bulk_create(new)
# Factions
elif name == 'factions':
factions = {}
for row in rowset.findall('row'):
id = int(row.attrib['fromID'])
standing = Decimal(row.attrib['standing'])
fs = fs_map.get(id, None)
# Standing doesn't exist, make a new one
if fs is None:
factions[id] = (row.attrib['fromName'], standing)
# Exists, check for standings change
elif fs.standing != standing:
fs.standing = standing
fs.save()
if factions:
faction_ids = set(Faction.objects.filter(pk__in=factions.keys()).values_list('id', flat=True))
new_f = []
new_fs = []
for id, (name, standing) in factions.items():
if id not in faction_ids:
new_f.append(Faction(
id=id,
name=name,
))
new_fs.append(FactionStanding(
character_id=character.id,
faction_id=id,
standing=standing,
))
if new_f:
Faction.objects.bulk_create(new_f)
if new_fs:
FactionStanding.objects.bulk_create(new_fs)
return True
| StarcoderdataPython |
1605346 | <gh_stars>0
import os
import sys
import inspect
import pytest
import json
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, "{}/src".format(parentdir))
from Components.MarketProvider import MarketProvider
from Components.Broker import Broker
from Components.IGInterface import IGInterface
from Components.AVInterface import AVInterface
from common.MockRequests import (
ig_request_login,
ig_request_set_account,
ig_request_market_info,
ig_request_search_market,
ig_request_watchlist,
)
@pytest.fixture
def config():
"""
Returns a dict with config parameter for strategy and simpleMACD
"""
# Read configuration file
try:
with open("config/config.json", "r") as file:
config = json.load(file)
except IOError:
exit()
return config
@pytest.fixture
def credentials():
"""
Returns a dict with credentials parameters
"""
return {
"username": "user",
"password": "<PASSWORD>",
"api_key": "12345",
"account_id": "12345",
"av_api_key": "12345",
}
@pytest.fixture
def requests(requests_mock):
ig_request_login(requests_mock)
ig_request_set_account(requests_mock)
ig_request_market_info(requests_mock)
ig_request_search_market(requests_mock)
ig_request_watchlist(requests_mock, data="mock_watchlist_list.json")
ig_request_watchlist(requests_mock, args="12345678", data="mock_watchlist.json")
@pytest.fixture
def make_broker(requests, config, credentials):
# Define configuration for this test
# config["alpha_vantage"]["enable"] = True
# Mock services and other components
services = {
"ig_index": IGInterface(config, credentials),
"alpha_vantage": AVInterface(credentials["av_api_key"], config),
}
broker = Broker(config, services)
return broker
def test_market_provider_epics_list(config, make_broker):
"""
Test the MarketProvider configured to fetch markets from an epics list
"""
# Configure TradingBot to use an epic list
config["general"]["market_source"]["value"] = "list"
config["general"]["epic_ids_filepath"] = "test/test_data/epics_list.txt"
# load test data for market info response, so it can be used to mock the info
# for each epic in the epic_list
mock_info = None
try:
with open("test/test_data/ig/mock_market_info.json", "r") as file:
mock_info = json.load(file)
except IOError:
exit()
# Create the class to test
mp = MarketProvider(config, make_broker)
# Run the test several times resetting the market provider
for _ in range(4):
# Read the test epic list and create a local list of the expected epics
expected_list = []
with open("test/test_data/epics_list.txt", "r") as epics_list:
for cnt, line in enumerate(epics_list):
epic = line.rstrip()
expected_list += [epic]
# Keep caling the test function building a list of returned epics
actual_list = []
try:
while True:
actual_list.append(mp.next().epic)
except StopIteration:
# Verify we read all epics in the list
assert len(expected_list) == len(actual_list)
# Verify reading the next raise another exception
with pytest.raises(StopIteration) as e:
mp.next()
mp.reset()
continue
# If we get here it means that next did not raise an exception at the end of the list
assert False
def test_market_provider_watchlist(config, make_broker):
"""
Test the MarketProvider configured to fetch markets from an IG watchlist
"""
# Define configuration for this test
config["general"]["market_source"]["value"] = "watchlist"
# Watchlist name depending on test data json
config["general"]["watchlist_name"] = "My Watchlist"
# Create class to test
mp = MarketProvider(config, make_broker)
# The test data for market_info return always the same epic id, but the test
# data for the watchlist contains 3 markets
# Run the test several times resetting the market provider
for _ in range(4):
assert mp.next().epic == "KA.D.GSK.DAILY.IP"
assert mp.next().epic == "KA.D.GSK.DAILY.IP"
assert mp.next().epic == "KA.D.GSK.DAILY.IP"
with pytest.raises(StopIteration) as e:
mp.next()
mp.reset()
def test_market_provider_api(config, make_broker):
"""
Test the MarketProvider configured to fetch markets from IG nodes
"""
# Define configuration for this test
config["general"]["market_source"]["value"] = "api"
# TODO
# Create class to test
# mp = MarketProvider(config, make_broker)
assert True
def test_market_provider_market_from_epic(config, make_broker):
"""
Test the MarketProvider get_market_from_epic() function
"""
# Define configuration for this test
config["general"]["market_source"]["value"] = "list"
config["general"]["epic_ids_filepath"] = "test/test_data/epics_list.txt"
# Create class to test
mp = MarketProvider(config, make_broker)
market = mp.get_market_from_epic("mock")
assert market is not None
assert market.epic == "KA.D.GSK.DAILY.IP"
def test_search_market(config, make_broker, requests_mock):
"""
Test the MarketProvider search_market() function
"""
# Define configuration for this test
config["general"]["market_source"]["value"] = "list"
config["general"]["epic_ids_filepath"] = "test/test_data/epics_list.txt"
mp = MarketProvider(config, make_broker)
# The mock search data contains multiple markets
ig_request_search_market(requests_mock, data="mock_error.json")
with pytest.raises(RuntimeError):
market = mp.search_market("mock")
# TODO test with single market mock data and verify no exception
| StarcoderdataPython |
3316655 | """
pygments.lexers.procfile
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for Procfile file format.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.lexer import bygroups
from pygments.token import Name
from pygments.token import Number
from pygments.token import Punctuation
from pygments.token import String
from pygments.token import Text
__all__ = ["ProcfileLexer"]
class ProcfileLexer(RegexLexer):
"""
Lexer for Procfile file format.
The format is used to run processes on Heroku or is used by Foreman or
Honcho tools.
For more information about the definition of the format, see:
https://devcenter.heroku.com/articles/procfile#procfile-format
.. versionadded:: 2.10
"""
name = 'Procfile'
aliases = ['procfile']
filenames = ['Procfile']
tokens = {
'root': [
(r'^([a-z]+)(:)', bygroups(Name.Label, Punctuation)),
(r'\s+', Text.Whitespace),
(r'"[^"]*"', String),
(r"'[^']*'", String),
(r'[0-9]+', Number.Integer),
(r'\$[a-zA-Z_][\w]*', Name.Variable),
(r'(\w+)(=)(\w+)', bygroups(Name.Variable, Punctuation, String)),
(r'([\w\-\./]+)', Text),
],
}
| StarcoderdataPython |
1783065 | <gh_stars>0
import math
import copy
class Vector3:
def __init__(self, x=0.0, y=0.0, z=0.0):
self.x = x
self.y = y
self.z = z
def __str__(self):
return '(' + str(self.x) + ' , ' + str(self.y) + ' , ' + str(self.z) + ')'
def __getitem__(self, key):
if key == 0:
return self.x
if key == 1:
return self.y
if key == 2:
return self.z
raise IndexError()
def __setitem__(self, key, value):
if key == 0:
self.x = value
if key == 1:
self.y = value
if key == 2:
self.z = value
raise IndexError()
def __neg__(self):
self.x = -self.x
self.y = -self.y
self.z = -self.z
return self
def __add__(self, other):
if other is Vector4:
self.x += other.x
self.y += other.y
self.z += other.z
elif other is int or float:
self.x += other
self.y += other
self.z += other
return self
def __sub__(self, other):
if other is Vector4:
self.x -= other.x
self.y -= other.y
self.z -= other.z
elif other is int or float:
self.x -= other
self.y -= other
self.z -= other
return self
def __mul__(self, other):
if other is Vector4:
self.x *= other.x
self.y *= other.y
self.z *= other.z
elif other is int or float:
self.x *= other
self.y *= other
self.z *= other
return self
def __truediv__(self, other):
if other is Vector4:
self.x /= other.x
self.y /= other.y
self.z /= other.z
elif other is int or float:
self.x /= other
self.y /= other
self.z /= other
return self
def length(self):
return math.sqrt(self.x * self.x +
self.y * self.y +
self.z * self.z)
def length_squared(self):
return (self.x * self.x +
self.y * self.y +
self.z * self.z)
def normalize(self):
scale = 1.0 / self.length()
self.x *= scale
self.y *= scale
self.z *= scale
def normalized(self):
v = Vector3(self.x, self.y, self.z)
v.normalize()
return copy.deepcopy(v)
def transform(self, matrix):
x = (self.x * matrix.row0.x +
self.y * matrix.row1.x +
self.z * matrix.row2.x +
matrix.row3.x)
y = (self.x * matrix.row0.y +
self.y * matrix.row1.y +
self.z * matrix.row2.y +
matrix.row3.y)
z = (self.x * matrix.row0.z +
self.y * matrix.row1.z +
self.z * matrix.row2.z +
matrix.row3.z)
self.x = x
self.y = y
self.z = z
def transformed(self, matrix):
v = Vector3(self.x, self.y, self.z)
v.transform(matrix)
return copy.deepcopy(v)
@staticmethod
def cross(lhs, rhs):
out = Vector3()
out.x = lhs.y * rhs.z - lhs.z * rhs.y
out.y = lhs.z * rhs.x - lhs.x * rhs.z
out.z = lhs.x * rhs.y - lhs.y * rhs.x
return copy.deepcopy(out)
@staticmethod
def dot(lhs, rhs):
return lhs.x * rhs.x + lhs.y * rhs.y + lhs.z * rhs.z
@staticmethod
def unit_x():
return copy.deepcopy(Vector3(1.0, 0.0, 0.0))
@staticmethod
def unit_y():
return copy.deepcopy(Vector3(0.0, 1.0, 0.0))
@staticmethod
def unit_z():
return copy.deepcopy(Vector3(0.0, 0.0, 1.0))
class Vector4:
def __init__(self, x=0.0, y=0.0, z=0.0, w=0.0):
self.x = x
self.y = y
self.z = z
self.w = w
def __str__(self):
return '(' + str(self.x) + ' , ' + str(self.y) + ' , ' + str(self.z) + ' , ' + str(self.w) + ')'
def __getitem__(self, key):
if key == 0:
return self.x
if key == 1:
return self.y
if key == 2:
return self.z
if key == 3:
return self.w
raise IndexError()
def __setitem__(self, key, value):
if key == 0:
self.x = value
if key == 1:
self.y = value
if key == 2:
self.z = value
if key == 3:
self.w = value
raise IndexError()
def __neg__(self):
self.x = -self.x
self.y = -self.y
self.z = -self.z
self.w = -self.w
return self
def __add__(self, other):
if other is Vector4:
self.x += other.x
self.y += other.y
self.z += other.z
self.w += other.w
elif other is int or float:
self.x += other
self.y += other
self.z += other
self.w += other
return self
def __sub__(self, other):
if other is Vector4:
self.x -= other.x
self.y -= other.y
self.z -= other.z
self.w -= other.w
elif other is int or float:
self.x -= other
self.y -= other
self.z -= other
self.w -= other
return self
def __mul__(self, other):
if other is Vector4:
self.x *= other.x
self.y *= other.y
self.z *= other.z
self.w *= other.w
elif other is int or float:
self.x *= other
self.y *= other
self.z *= other
self.w *= other
return self
def __truediv__(self, other):
if other is Vector4:
self.x /= other.x
self.y /= other.y
self.z /= other.z
self.w /= other.w
elif other is int or float:
self.x /= other
self.y /= other
self.z /= other
self.w /= other
return self
def xyz(self):
return Vector3(self.x, self.y, self.z)
def length(self):
return math.sqrt(self.x * self.x +
self.y * self.y +
self.z * self.z +
self.w * self.w)
def length_squared(self):
return (self.x * self.x +
self.y * self.y +
self.z * self.z +
self.w * self.w)
def normalize(self):
scale = 1.0 / self.length()
self.x *= scale
self.y *= scale
self.z *= scale
self.w *= scale
def normalized(self):
v = Vector4(self.x, self.y, self.z, self.w)
v.normalize()
return copy.deepcopy(v)
@staticmethod
def unit_x():
return copy.deepcopy(Vector4(1.0, 0.0, 0.0, 0.0))
@staticmethod
def unit_y():
return copy.deepcopy(Vector4(0.0, 1.0, 0.0, 0.0))
@staticmethod
def unit_z():
return copy.deepcopy(Vector4(0.0, 0.0, 1.0, 0.0))
@staticmethod
def unit_w():
return copy.deepcopy(Vector4(0.0, 0.0, 0.0, 1.0))
| StarcoderdataPython |
3250263 | <filename>Kerning/Remove all kerning exceptions.py
#MenuTitle: Remove Kerning Exceptions
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Removes all kernings glyph-glyph, group-glyph, and glyph-group; only keeps group-group kerning.
"""
import vanilla
class RemoveKerningExceptions( object ):
prefDomain = "com.mekkablue.RemoveKerningExceptions"
def __init__( self ):
# Window 'self.w':
windowWidth = 300
windowHeight = 160
windowWidthResize = 100 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"Remove Kerning Exceptions", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)
autosaveName = "%s.mainwindow"%self.prefDomain # stores last window position and size
)
# UI elements:
linePos, inset, lineHeight = 12, 15, 22
self.w.glyphGlyph = vanilla.CheckBox( (inset, linePos-1, -inset, 20), "Remove 🅰️🅰️ glyph-to-glyph pairs", value=True, callback=self.SavePreferences, sizeStyle='small' )
linePos += lineHeight
self.w.glyphGroup = vanilla.CheckBox( (inset, linePos-1, -inset, 20), "Remove 🅰️🔠 glyph-to-group pairs", value=True, callback=self.SavePreferences, sizeStyle='small' )
linePos += lineHeight
self.w.groupGlyph = vanilla.CheckBox( (inset, linePos-1, -inset, 20), "Remove 🔠🅰️ group-to-glyph pairs", value=True, callback=self.SavePreferences, sizeStyle='small' )
linePos += lineHeight
self.w.removeOnMastersText = vanilla.TextBox( (inset, linePos+2, 70, 14), "Remove on:", sizeStyle='small', selectable=True )
self.w.removeOnMasters = vanilla.PopUpButton( (inset+70, linePos, -inset, 17), ("current master", "⚠️ all masters of current font", "⚠️ all masters of ⚠️ all open fonts"), sizeStyle='small', callback=self.SavePreferences )
linePos += lineHeight
# Run Button:
self.w.runButton = vanilla.Button( (-100-inset, -20-inset, -inset, -inset), "Remove", sizeStyle='regular', callback=self.RemoveKerningExceptionsMain )
self.w.setDefaultButton( self.w.runButton )
# Load Settings:
if not self.LoadPreferences():
print("Note: 'Remove Kerning Exceptions' could not load preferences. Will resort to defaults")
# Open window and focus on it:
self.w.open()
self.w.makeKey()
def updateGUI(self, sender=None):
anyOptionIsSelected = self.w.glyphGlyph.get() or self.w.glyphGroup.get() or self.w.groupGlyph.get()
self.w.runButton.enable(anyOptionIsSelected)
def domain(self, key):
return "%s.%s" % (self.prefDomain, key)
def preference(self, key):
domain = self.domain(key)
return Glyphs.defaults[domain]
def SavePreferences( self, sender=None ):
try:
# write current settings into prefs:
Glyphs.defaults[self.domain("glyphGlyph")] = self.w.glyphGlyph.get()
Glyphs.defaults[self.domain("glyphGroup")] = self.w.glyphGroup.get()
Glyphs.defaults[self.domain("groupGlyph")] = self.w.groupGlyph.get()
Glyphs.defaults[self.domain("removeOnMasters")] = self.w.removeOnMasters.get()
self.updateGUI()
return True
except:
import traceback
print(traceback.format_exc())
return False
def LoadPreferences( self ):
try:
# register defaults:
Glyphs.registerDefault(self.domain("glyphGlyph"), 1)
Glyphs.registerDefault(self.domain("glyphGroup"), 1)
Glyphs.registerDefault(self.domain("groupGlyph"), 1)
Glyphs.registerDefault(self.domain("removeOnMasters"), 0)
# load previously written prefs:
self.w.glyphGlyph.set( self.preference("glyphGlyph") )
self.w.glyphGroup.set( self.preference("glyphGroup") )
self.w.groupGlyph.set( self.preference("groupGlyph") )
self.w.removeOnMasters.set( self.preference("removeOnMasters") )
return True
except:
import traceback
print(traceback.format_exc())
return False
def RemoveKerningExceptionsMain( self, sender=None ):
try:
# clear macro window log:
Glyphs.clearLog()
# update settings to the latest user input:
if not self.SavePreferences():
print("Note: 'Remove Kerning Exceptions' could not write preferences.")
thisFont = Glyphs.font # frontmost font
if thisFont is None:
Message(title="No Font Open", message="The script requires at least one font. Open a font and run the script again.", OKButton=None)
else:
glyphGlyph = self.preference("glyphGlyph")
glyphGroup = self.preference("glyphGroup")
groupGlyph = self.preference("groupGlyph")
removeOnMasters = self.preference("removeOnMasters")
if removeOnMasters==2:
fonts = Glyphs.fonts
allMasters = True
else:
fonts = (thisFont,)
if removeOnMasters==0:
allMasters = False
else:
allMasters = True
for thisFont in fonts:
print("\nRemoving kerning exceptions in: %s" % thisFont.familyName)
if thisFont.filepath:
print("📄 %s" % thisFont.filepath)
else:
print("⚠️ The font file has not been saved yet.")
totalCount = 0
for thisMaster in thisFont.masters:
if allMasters or thisMaster==thisFont.selectedFontMaster:
pairsToBeRemoved = []
for leftSide in thisFont.kerning[thisMaster.id].keys():
leftSideIsGlyph = not leftSide.startswith("@")
for rightSide in thisFont.kerning[thisMaster.id][leftSide].keys():
rightSideIsGlyph = not rightSide.startswith("@")
removeGlyphGlyph = leftSideIsGlyph and rightSideIsGlyph and glyphGlyph
removeGlyphGroup = leftSideIsGlyph and not rightSideIsGlyph and glyphGroup
removeGroupGlyph = not leftSideIsGlyph and rightSideIsGlyph and groupGlyph
if removeGroupGlyph or removeGlyphGroup or removeGlyphGlyph:
pairsToBeRemoved.append( (leftSide, rightSide) )
countOfDeletions = len(pairsToBeRemoved)
totalCount += countOfDeletions
print("🚫 Removing %i pairs in master ‘%s’..." % ( countOfDeletions, thisMaster.name))
for pair in pairsToBeRemoved:
left, right = pair
if not left.startswith("@"):
left = thisFont.glyphForId_(left).name
if not right.startswith("@"):
right = thisFont.glyphForId_(right).name
thisFont.removeKerningForPair(thisMaster.id, left, right)
# Final report:
Glyphs.showNotification(
"Removed %i Exceptions" % (totalCount),
"Processed %i font%s. Details in Macro Window" % (
len(fonts),
"" if len(fonts)!=1 else "s",
),
)
print("\nDone.")
except Exception as e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print("Remove Kerning Exceptions Error: %s" % e)
import traceback
print(traceback.format_exc())
RemoveKerningExceptions() | StarcoderdataPython |
4826793 | # Get Launcher as well as OpenGL imports
from projects.launcher import *
w, h = 500, 500
def square(size=(100, 100), pos=(0, 0)):
glColor3f(0.25, 0.5, 0.75)
glBegin(GL_QUADS)
glVertex2f(pos[0], pos[1])
glVertex2f(pos[0] + size[0], pos[1])
glVertex2f(pos[0] + size[0], pos[1] + size[1])
glVertex2f(pos[0], pos[1] + size[1])
glEnd()
glLoadIdentity()
def triangle(size=(100, 100), pos=(0, 0)):
glColor3f(0.8, 0, 0.25)
glBegin(GL_TRIANGLES)
glVertex2f(pos[0] + size[0], pos[1])
glVertex2f(pos[0] + size[0] // 2, pos[1] + size[1])
glVertex2f(pos[0], pos[1])
glEnd()
glLoadIdentity()
def iterate():
glViewport(0, 0, w, h)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0.0, w, 0.0, h, 0.0, 1.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def display_func():
glLoadIdentity()
iterate()
square((50, 50), (100, 400))
triangle((w // 3, h // 3), (w // 3, h // 3))
if __name__ == "__main__":
Launcher(display_func, (w, h)).loop()
| StarcoderdataPython |
1600371 | <reponame>Obarads/torch_point_cloud
import os, sys
import numpy as np
from plyfile import PlyData, PlyElement
from sklearn.decomposition import PCA
##
## Write
##
def write_pc(filename, xyz, rgb=None):
"""
write into a ply file
ref.:https://github.com/loicland/superpoint_graph/blob/ssp%2Bspg/partition/provider.py
"""
if rgb is None:
# len(xyz[0]): for a xyz list, I don't use `.shape`.
rgb = np.full((len(xyz), 3), 255, dtype=np.int32)
if not isinstance(xyz, (np.ndarray, np.generic)):
xyz = np.array(xyz, np.float32)
prop = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
vertex_all = np.empty(len(xyz), dtype=prop)
for i_prop in range(0, 3):
vertex_all[prop[i_prop][0]] = xyz[:, i_prop]
for i_prop in range(0, 3):
vertex_all[prop[i_prop+3][0]] = rgb[:, i_prop]
ply = PlyData([PlyElement.describe(vertex_all, 'vertex')], text=True)
ply.write(filename)
def write_pc_embedding(filename, xyz, embeddings):
"""
write a ply with colors corresponding to geometric features
ref.:https://github.com/loicland/superpoint_graph/blob/ssp%2Bspg/partition/provider.py
"""
if embeddings.shape[1]>3:
pca = PCA(n_components=3)
#pca.fit(np.eye(embeddings.shape[1]))
pca.fit(np.vstack((np.zeros((embeddings.shape[1],)),np.eye(embeddings.shape[1]))))
embeddings = pca.transform(embeddings)
#value = (embeddings-embeddings.mean(axis=0))/(2*embeddings.std())+0.5
#value = np.minimum(np.maximum(value,0),1)
#value = (embeddings)/(3 * embeddings.std())+0.5
value = np.minimum(np.maximum((embeddings+1)/2,0),1)
color = np.array(255 * value, dtype='uint8')
prop = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
vertex_all = np.empty(len(xyz), dtype=prop)
for i in range(0, 3):
vertex_all[prop[i][0]] = xyz[:, i]
for i in range(0, 3):
vertex_all[prop[i+3][0]] = color[:, i]
ply = PlyData([PlyElement.describe(vertex_all, 'vertex')], text=True)
ply.write(filename)
def label_to_color(labels, seed=0, color_map:list=None):
np.random.seed(seed)
max_labels = np.max(labels)
if color_map is None:
label_color_list = np.random.randint(0,255,(max_labels+1,3))
else:
label_color_list = color_map
point_colors = np.array([ np.array([0,0,0]) if 0 > i else label_color_list[i] for i in labels])
return point_colors
def write_pc_label(filename, xyz, labels, seed=0, color_map:list=None):
point_colors = label_to_color(labels, seed=seed, color_map=color_map)
write_pc(filename, xyz, point_colors)
def check_label_colors(file_name, num_points, seed=0, margin=0.5):
"""
Output label colors of write_label.
"""
xyz = np.zeros((num_points, 3))
margin_coord = np.arange(num_points) * margin
xyz[:,0] = margin_coord
labels = np.arange(num_points)
write_pc_label(file_name, xyz, labels, seed=seed)
def write_pc_intensity(file_name, xyz, intensity):
intensity = intensity[:,np.newaxis]
intensity = np.concatenate([intensity,intensity,intensity],axis=1)
rgb = (intensity*255)
rgb = rgb.astype(np.int32)
write_pc(file_name,xyz,rgb)
##
## Read
##
| StarcoderdataPython |
4816674 | <filename>code/python-modules/debugrayleigh.py
'''
Code to debug the mixed rayleigh issues using personal_test dataset
'''
import numpy as np
import personal_test as pt
import matplotlib.pyplot as plt
from copy import deepcopy
import process
'''basic setup
txtnames,kinectDict,startsDict = pt.setup()
task_type = txtnames[0]
kinect = kinectDict[task_type]
starts = startsDict[task_type]
idnum = '1'
user = pt.initializeUser(idnum,kinect,starts)
print 'Initializing'
init_task = process.Task(curr_data_obj, curr_extrema=[relevant_starts[0],relevant_starts[1]], k=3, basis_dim=2, first_task=True)
print 'init data_inds', init_task.data_inds
evolution = process.Process(init_task)
curr_task_data_inds = np.arange(relevant_starts[curr_task_num],relevant_starts[curr_task_num+1])
curr_task_data = curr_data_obj.data_array[curr_task_data_inds,:]
'''
def runUpdate(user, kinect, starts, num_updates):
reload(process)
user_copy = deepcopy(user)
#initialize on the first task
init_task = process.Task(kinect,[starts[0],starts[1]],first_task=True)
evolution = process.Process(init_task)
a = init_task.printTaskDef()
a = [int(x) for x in a[1]]
print a
# use a random task instance order
task_instances_to_use = np.random.permutation(range(1,len(starts)-1))[0:num_updates]
for instance in task_instances_to_use:
# for each instance run through the necessary data inds
curr_task_data_inds = np.arange(starts[instance],starts[instance+1])
curr_task_data = kinect.data_array[curr_task_data_inds,:]
# do the online process
for i,data in enumerate(curr_task_data):
print 'Frame: '+str(i)
new_features = pt.getNewFeatures(user_copy,data)
pct_complete = evolution.onlineUpdate(new_features,user_copy)
print 'Evolution pct complete: ', pct_complete
print 'UPDATING_______\n\n'
evolution.updateKnownTasks(user_copy,compute_type='average',proper_update=True)
print 'DONE UPDATING'
return
| StarcoderdataPython |
4808162 | '''Cross validation
bug tracker
-----------------------------
priority | name
-----------------------------
1 | rubi, how accuracy so high?? 64%!
2 | should use PURGED K-FOLD Cross Validation or TimeSeriesSplit instead of standart split
3 | rubi, which normalize function to use?
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from examples.trading.cv.purged_k_fold import PurgedKFold
from examples.trading.utils import *
from sklearn.model_selection import TimeSeriesSplit
import keras
from keras.layers import Dense, Dropout
from keras.models import Sequential
from keras.optimizers import RMSprop
# import pandas.io.data as web
# https://towardsdatascience.com/deep-learning-for-beginners-practical-guide-with-python-and-keras-d295bfca4487
# https://www.youtube.com/watch?v=aircAruvnKk
batch_size = 128 # we cannot pass the entire data into network at once , so we divide it to batches . number of samples that we will pass through the network at 1 time and use for each epoch. default is 32
epochs = 100 # 50 # iterations. on each, train all data, then evaluate, then adjust parameters (weights and biases)
# iterations = 60000/128
# size_input = 10 # x_train.shape[1] # no of features
size_hidden = 512 # If a model has more hidden units (a higher-dimensional representation space), and/or more layers, then the network can learn more complex representations. However, it makes the network more computationally expensive and may lead to overfit
size_output = 2 # there are 3 classes (buy.sell. hold) or (green,red,hold)
name_output = ['Green bar', 'Red Bar'] # 'no direction Bar'
# iterations = 60000/128
symbol = '^GSPC' # ^GSPC=SP500 (3600->1970 or 12500 =2000) DJI(300, 1988) QQQ(300, 2000) GOOG XLF XLV
skipDays = 3600 # 12500 total 13894 daily bars
percentTestSplit = 0.33 # 33% from data will go to test
print('\nLoading data')
print('\n======================================')
# Define date range
# start_date, end_date='1970-01-03','2019-07-12'
# dates=pd.date_range(start_date,end_date)
# print("dates=" ,dates)
# print("date[0]=",dates[0])
#
# # Define stock symbols
# symbols = []#'TSLA', 'GOOG', 'FB'] # SPY will be added in get_data()
# import get_prices as hist
# hist.get_stock_data(symbol, start_date=start_date, end_date=end_date)
# process = DataProcessing("stock_prices.csv", 0.9)
# process.gen_test(10)
# process.gen_train(10)
#
# Get stock data
df_all = get_data_from_disc(symbol, skipDays)
print(df_all.tail())
# Slice and plot
# plot_selected(df_all, [ 'Close', 'sma200'], shouldNormalize=True, symbol=symbol)
# Slice and plot
plot_selected(df_all, title='TA-price of ' + symbol + ' vs time', columns=['Close', 'sma200'], shouldNormalize=False,
symbol=symbol)
plot_selected(df_all.tail(500), title='TA-sma 1,10,20,50,200 of ' + symbol + ' vs time',
columns=['Close', 'sma10', 'sma20', 'sma50', 'sma200', 'sma400', 'bb_hi10', 'bb_lo10', 'bb_hi20',
'bb_lo20', 'bb_hi50', 'bb_lo200', 'bb_lo50', 'bb_hi200'], shouldNormalize=False, symbol=symbol)
plot_selected(df_all.tail(500), title='TA-range sma,bband of ' + symbol + ' vs time',
columns=['range_sma', 'range_sma1', 'range_sma2', 'range_sma3', 'range_sma4', 'rel_bol_hi10',
'rel_bol_hi20', 'rel_bol_hi200', 'rel_bol_hi50'], shouldNormalize=False, symbol=symbol)
plot_selected(df_all.tail(500), title='TA-rsi,stoc of ' + symbol + ' vs time',
columns=['rsi10', 'rsi20', 'rsi50', 'rsi200', 'stoc10', 'stoc20', 'stoc50', 'stoc200'],
shouldNormalize=False, symbol=symbol)
# plot_selected(df, ['Date','Close'] , start_date, end_date, shouldNormalize=False)
elements = df_all.size
shape = df_all.shape
print('\nInput Data')
print('\n======================================')
df_data = df_all.loc[:,
['sma10', 'sma20', 'sma50', 'sma200', 'sma400', 'range_sma', 'range_sma1', 'range_sma2', 'range_sma3',
'range_sma4', 'bb_hi10', 'bb_lo10', 'bb_hi20', 'bb_lo20', 'bb_hi50', 'bb_lo50', 'bb_hi200', 'bb_lo200',
'rel_bol_hi10', 'rel_bol_lo10', 'rel_bol_hi20', 'rel_bol_lo20', 'rel_bol_hi50', 'rel_bol_lo50',
'rel_bol_hi200', 'rel_bol_lo200', 'rsi10', 'rsi20', 'rsi50', 'rsi200', 'stoc10', 'stoc20', 'stoc50',
'stoc200']]
print('columns=', df_data.columns)
print('\ndata describe=\n', df_data.describe())
print('shape=', str(shape), " elements=" + str(elements), ' rows=', str(shape[0]))
print('\nOutput Data (Labeling)')
print('\n======================================')
df_y = df_all['isUp'] # np.random.randint(0,2,size=(shape[0], ))
# y = np.random.randint(0,2,size=(shape[0], ))
# print(y)
# df_y = pd.DataFrame()#, columns=list('is_up'))
# df_y['isUp'] = y
print(df_y)
ts_cv = PurgedKFold (n_splits=5, gap_percentage=2.5)
#ts_cv = StratifiedKFold(n_splits=5, shuffle=False)
# ts_cv = TimeSeriesSplit(n_splits=5)
cv_scores = []
for train_index, test_index in ts_cv.split(df_data.values):
x_train = df_data.values[train_index]
x_test = df_data.values[test_index]
print('x_train shape=', str(x_train.shape))
print('x_test shape=', str(x_test.shape))
print('Observations: %d' % (len(x_train) + len(x_test)))
print('Training Observations: %d' % (len(x_train)))
print('Testing Observations: %d' % (len(x_test)))
x_train = tf.keras.utils.normalize(x_train, axis=1)
x_test = tf.keras.utils.normalize(x_test, axis=1)
print('\ntrain data', x_train.shape)
print('\ntest data', x_test.shape)
y_train = df_y.values[train_index]
y_test = df_y.values[test_index]
print('y_train shape=', str(y_train.shape))
print('y_test shape=', str(y_test.shape))
y_train = keras.utils.to_categorical(y_train, size_output)
y_test = keras.utils.to_categorical(y_test, size_output)
print('\ntrain labels', y_train.shape)
print('\ntest labels', y_test.shape)
size_input = x_train.shape[1] # no of features
# create model
model = Sequential() # stack of layers
# model.add(tf.keras.layers.Flatten())
model.add(Dense(size_hidden, activation='relu', input_shape=(size_input,)))
model.add(Dropout(0.2)) # for generalization
model.add(Dense(size_hidden, activation='relu'))
model.add(Dropout(0.2)) # regularization technic by removing some nodes
model.add(Dense(size_output,
activation='softmax')) # last layer always has softmax(except for regession problems and binary- 2 classes where sigmoid is enough)
# For binary classification, softmax & sigmoid should give the same results, because softmax is a generalization of sigmoid for a larger number of classes.
# softmax: loss: 0.3099 - acc: 0.8489 - val_loss: 0.2929 - val_acc: 0.8249
# sigmoid: loss: 0.2999 - acc: 0.8482 - val_loss: 0.1671 - val_acc: 0.9863
# Prints a string summary of the neural network.')
model.summary()
# Compile model
model.compile(loss='categorical_crossentropy', # measure how accurate the model during training
optimizer=RMSprop(), # this is how model is updated based on data and loss function
metrics=['accuracy'])
# Fit the model
# model.fit(df_data[train], df_y[train], epochs=150, batch_size=10, verbose=0)
model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, verbose=1)
# history = model.fit(x_train
# , y_train
# , batch_size=batch_size
# , epochs=epochs
# , validation_data=(x_test, y_test)
# , verbose=1)
# evaluate the model
scores = model.evaluate(x_test, y_test, verbose=0)
print("----end of itteration: %s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
cv_scores.append(scores[1] * 100)
print("-------total accuracy : %.2f%% (+/- %.2f%%)" % (np.mean(cv_scores), np.std(cv_scores)))
| StarcoderdataPython |
1787670 | <reponame>mazi76erX2/football_forecaster
"""
Django settings for Football Forecaster project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
from datetime import timedelta
from django.contrib.messages import constants as messages
from dotenv import load_dotenv
from cloudinary import config
load_dotenv()
MODE = (os.getenv('MODE') == 'production')
# Custom user model
# AUTH_USER_MODEL = "accounts.CustomUser"
ALLOWED_HOSTS = ['*']
DB_HOST = os.environ.get('DB_HOST')
DB_NAME = os.environ.get('DB_NAME')
DB_USER = os.environ.get('DB_USER')
DB_PASSWORD = os.environ.get('DB_PASSWORD')
DB_PORT = os.environ.get('DB_PORT')
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', default='fjm-%3(m78tv5^qe8sm_k)7@rzandileqx6!%067*&6gu8ph+x6unt740')
# Application definition
INSTALLED_APPS = [
# admin menu
'admin_menu',
# default
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
# local-apps
'accounts',
'pages',
# all-auth
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.google',
'allauth.socialaccount.providers.twitter',
# third-party
'django_registration',
'widget_tweaks',
'corsheaders',
'rest_framework',
'rest_framework.authtoken',
'django_better_admin_arrayfield',
'django_filters',
'cloudinary',
'rest_registration',
'letsencrypt',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'football_forecaster.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR.parent, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'football_forecaster.wsgi.application'
"""""""""""""""""""""""""""""""""""""""""""""""""""""
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
"""""""""""""""""""""""""""""""""""""""""""""""""""""
if os.getenv('TEST'):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR.parent, 'db.sqlite3'),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': DB_NAME,
'USER': DB_USER,
'PASSWORD': DB_PASSWORD,
'HOST': DB_HOST,
'PORT': DB_PORT,
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'Africa/Johannesburg'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_root")
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "media_root")
SITE_ID = 1
STATICFILES_DIRS = [
BASE_DIR.parent / "static",
# os.path.join(BASE_DIR, 'assets'),
]
LOGIN_URL = "/login/"
LOGIN_REDIRECT_URL = "/logged_in/"
LOGOUT_REDIRECT_URL = "/"
"""""""""""""""""""""""""""""""""""""""""""""""""""""
Site Settings
"""""""""""""""""""""""""""""""""""""""""""""""""""""
SITE_NAME = os.getenv('SITE_NAME')
"""""""""""""""""""""""""""""""""""""""""""""""""""""
Admin Menu Settings
"""""""""""""""""""""""""""""""""""""""""""""""""""""
ADMIN_LOGO = 'img/logo.png'
# MENU_WEIGHT = {
# 'World': 20,
# 'Auth': 4,
# 'Sample': 5
# }
ADMIN_STYLE = {
'primary-color': '#B42D33',
'secondary-color': '#000000',
'tertiary-color': '#333333'
}
"""""""""""""""""""""""""""""""""""""""""""""""""""""
Message Settings
"""""""""""""""""""""""""""""""""""""""""""""""""""""
MESSAGE_TAGS = {
messages.DEBUG: 'debug',
messages.INFO: 'info',
messages.SUCCESS: 'success',
messages.WARNING: 'warning',
messages.ERROR: 'danger',
}
"""""""""""""""""""""""""""""""""""""""""""""""""""""
Authentication Settings
"""""""""""""""""""""""""""""""""""""""""""""""""""""
AUTHENTICATION_BACKENDS = [
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
]
"""""""""""""""""""""""""""""""""""""""""""""""""""""
All-Auth Settings
"""""""""""""""""""""""""""""""""""""""""""""""""""""
# Provider specific settings
SOCIALACCOUNT_PROVIDERS = {
'facebook': {
'METHOD': 'oauth2',
'SDK_URL': '//connect.facebook.net/{locale}/sdk.js',
'SCOPE': ['email', 'public_profile'],
'AUTH_PARAMS': {'auth_type': 'reauthenticate'},
'INIT_PARAMS': {'cookie': True},
'FIELDS': [
'id',
'email',
'name',
'first_name',
'last_name',
'verified',
'locale',
'timezone',
'link',
'gender',
'updated_time',
],
'EXCHANGE_TOKEN': True,
'LOCALE_FUNC': 'path.to.callable',
'VERIFIED_EMAIL': False,
'VERSION': 'v7.0',
},
'google': {
# For each OAuth based provider, either add a ``SocialApp``
# (``socialaccount`` app) containing the required client
# credentials, or list them here:
'APP': {
'client_id': os.getenv('GOOGLE_CLIENT_ID'),
'secret': os.getenv('GOOGLE_SECRET_KEY'),
'key': os.getenv('GOOGLE_API_KEY'),
}
},
'twitter': {
# For each OAuth based provider, either add a ``SocialApp``
# (``socialaccount`` app) containing the required client
# credentials, or list them here:
'APP': {
'client_id': os.getenv('TWITTER_CLIENT_ID'),
'secret': os.getenv('TWITTER_SECRET_KEY'),
'key': os.getenv('TWITTER_API_KEY'),
}
},
}
"""""""""""""""""""""""""""""""""""""""""""""""""""""
CDN
"""""""""""""""""""""""""""""""""""""""""""""""""""""
CDN_NAME = os.environ.get('CDN_NAME')
CDN_API_KEY = os.environ.get('CDN_API_KEY')
CDN_API_SECRET = os.environ.get('CDN_API_SECRET')
"""""""""""""""""""""""""""""""""""""""""""""""""""""
Twilio Settings
"""""""""""""""""""""""""""""""""""""""""""""""""""""
TWILIO_ACCOUNT_SID = os.environ.get('TWILIO_ACCOUNT_SID')
TWILIO_AUTH_TOKEN = os.environ.get('TWILIO_AUTH_TOKEN')
TWILIO_WPP_NUMBER = os.environ.get('TWILIO_WPP_NUMBER')
"""""""""""""""""""""""""""""""""""""""""""""""""""""
Cloudinary Settings
"""""""""""""""""""""""""""""""""""""""""""""""""""""
# Cloudinary
# https://cloudinary.com/documentation/django_integration
config(
cloud_name=CDN_NAME,
api_key=CDN_API_KEY,
api_secret=CDN_API_SECRET,
secure=True
)
# Email
# https://docs.djangoproject.com/en/3.0/topics/email/
# Gmail SMTP requirements
# https://support.google.com/a/answer/176600?hl=en
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = os.getenv('EMAIL_HOST')
EMAIL_HOST_USER = os.getenv('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.getenv('EMAIL_HOST_PASSWORD')
EMAIL_PORT = os.getenv('EMAIL_PORT')
EMAIL_USE_TLS = True
EMAIL_USE_SSL = False
DEFAULT_FROM_EMAIL = os.getenv('DEFAULT_FROM_EMAIL')
"""""""""""""""""""""""""""""""""""""""""""""""""""""
Django Registration
"""""""""""""""""""""""""""""""""""""""""""""""""""""
ACCOUNT_ACTIVATION_DAYS = 7
"""""""""""""""""""""""""""""""""""""""""""""""""""""
Django REST Registration
"""""""""""""""""""""""""""""""""""""""""""""""""""""
REST_REGISTRATION = {
'REGISTER_VERIFICATION_ENABLED': False,
'REGISTER_EMAIL_VERIFICATION_ENABLED': False,
'RESET_PASSWORD_VERIFICATION_ENABLED': False,
# 'REGISTER_VERIFICATION_URL': f'{os.getenv('REGISTER_VERIFICATION_URL')}/verify-user/',
# 'RESET_PASSWORD_VERIFICATION_URL': f'{os.getenv('RESET_PASSWORD_VERIFICATION_URL')}/reset-password/',
# 'REGISTER_EMAIL_VERIFICATION_URL': f'{os.getenv('REGISTER_EMAIL_VERIFICATION_URL')}/verify-email/',
#
# 'VERIFICATION_FROM_EMAIL': os.getenv('VERIFICATION_FROM_EMAIL') # '<EMAIL>',
}
"""""""""""""""""""""""""""""""""""""""""""""""""""""
Region Settings
"""""""""""""""""""""""""""""""""""""""""""""""""""""
LANGUAGES = (('en', 'English'),)
COUNTRIES = (('za', 'South Africa'),)
PROVINCES = (
('ec', 'Eastern Cape'),
('fs', 'Free State'),
('gau', 'Gauteng'),
('kzn', 'KwaZulu-Natal'),
('lim', 'Limpopo'),
('mpu', 'Mpumalanga'),
('nc', 'Northern Cape'),
('nw', 'North West'),
('wc', 'Western Cape'),
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""
Django REST Framework
"""""""""""""""""""""""""""""""""""""""""""""""""""""
# Django Rest Framework
# https://www.django-rest-framework.org/
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
}
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=5),
'REFRESH_TOKEN_LIFETIME': timedelta(days=14),
'ROTATE_REFRESH_TOKENS': True,
'BLACKLIST_AFTER_ROTATION': False,
'ALGORITHM': 'HS256',
'SIGNING_KEY': os.environ.get('SECRET_KEY', default='fjm-%3(m78tv5^qe8sm_k)7@lash!%067*&6gu8ph+x6unt740'),
'VERIFYING_KEY': None,
'AUTH_HEADER_TYPES': ('JWT',),
'USER_ID_FIELD': 'id',
'USER_ID_CLAIM': 'user_id',
'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),
'TOKEN_TYPE_CLAIM': 'token_type',
}
"""""""""""""""""""""""""""""""""""""""""""""""""""""
CSFR Settings
"""""""""""""""""""""""""""""""""""""""""""""""""""""
CSRF_COOKIE_SAMESITE = 'Strict'
SESSION_COOKIE_SAMESITE = 'Strict'
CSRF_COOKIE_HTTPONLY = True
SESSION_COOKIE_HTTPONLY = True
"""""""""""""""""""""""""""""""""""""""""""""""""""""
CORS Headers
"""""""""""""""""""""""""""""""""""""""""""""""""""""
CORS_ORIGIN_WHITELIST = (
'http://localhost:3000',
)
CORS_EXPOSE_HEADERS = ['Content-Type', 'X-CSRFToken']
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_ALLOW_ALL = True
"""""""""""""""""""""""""""""""""""""""""""""""""""""
Email Admins
"""""""""""""""""""""""""""""""""""""""""""""""""""""
ADMINS = [
('Xolani', '<EMAIL>'),
]
| StarcoderdataPython |
146878 | <reponame>karlp/KiCost
# -*- coding: utf-8 -*-
# MIT license
#
# Copyright (C) 2018 by XESS Corporation / <NAME> / <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Author information.
__author__ = '<NAME>'
__webpage__ = 'https://github.com/hildogjr/'
__company__ = 'University of Campinas - Brazil'
# Libraries.
import json
import requests
import re
import sys
import os
import copy
from collections import OrderedDict
if sys.version_info[0] < 3:
from urllib import quote_plus
else:
from urllib.parse import quote_plus
# KiCost definitions.
from ..global_vars import DEFAULT_CURRENCY, DEBUG_OVERVIEW, ERR_SCRAPE, KiCostError, W_NOINFO, NO_PRICE
from .. import DistData
# Distributors definitions.
from .distributor import distributor_class
# Uncomment for debug
# Use `debug('x + 1')` for instance.
# def debug(expression):
# frame = sys._getframe(1)
# distributor_class.logger.info(expression, '=', repr(eval(expression, frame.f_globals, frame.f_locals)))
MAX_PARTS_PER_QUERY = 20 # Maximum number of parts in a single query.
# Information to return from PartInfo KitSpace server.
QUERY_AVAIABLE_CURRENCIES = ['GBP', 'EUR', 'USD']
# DEFAULT_CURRENCY
QUERY_ANSWER = '''
mpn{manufacturer, part},
datasheet,
description,
specs{key, value},
offers(from: {DISTRIBUTORS}){
product_url,
sku {vendor, part},
description,
moq,
in_stock_quantity,
prices{''' + ','.join(QUERY_AVAIABLE_CURRENCIES) + '''}
}
'''
# Informations not used: type,specs{key, name, value},image {url, credit_string, credit_url},stock_location
QUERY_ANSWER = re.sub(r'[\s\n]', '', QUERY_ANSWER)
QUERY_PART = 'query ($input: MpnInput!) { part(mpn: $input) {' + QUERY_ANSWER + '} }'
QUERY_MATCH = 'query ($input: [MpnOrSku]!){ match(parts: $input) {' + QUERY_ANSWER + '} }'
QUERY_SEARCH = 'query ($input: String!){ search(term: $input) {' + QUERY_ANSWER + '} }'
QUERY_URL = 'https://dev-partinfo.kitspace.org/graphql'
__all__ = ['api_partinfo_kitspace']
class api_partinfo_kitspace(distributor_class):
name = 'KitSpace'
type = 'api'
enabled = True
url = 'https://kitspace.org/' # Web site API information.
API_DISTRIBUTORS = ['digikey', 'farnell', 'mouser', 'newark', 'rs', 'arrow', 'tme', 'lcsc']
DIST_TRANSLATION = { # Distributor translation.
'Digikey': 'digikey',
'Farnell': 'farnell',
'Mouser': 'mouser',
'Newark': 'newark',
'RS': 'rs',
'TME': 'tme',
'Arrow Electronics': 'arrow',
'LCSC': 'lcsc',
}
# Dict to translate KiCost field names into KitSpace distributor names
KICOST2KITSPACE_DIST = {v: k for k, v in DIST_TRANSLATION.items()}
@staticmethod
def init_dist_dict():
if api_partinfo_kitspace.enabled:
distributor_class.add_distributors(api_partinfo_kitspace.API_DISTRIBUTORS)
@staticmethod
def query(query_parts, distributors, query_type=QUERY_MATCH):
'''Send query to server and return results.'''
distributors = [api_partinfo_kitspace.KICOST2KITSPACE_DIST[d] for d in distributors]
# Allow changing the URL for debug purposes
try:
url = os.environ['KICOST_KITSPACE_URL']
except KeyError:
url = QUERY_URL
# Sort the distributors to create a reproducible query
query_type = re.sub(r'\{DISTRIBUTORS\}', '["' + '","'.join(sorted(distributors)) + '"]', query_type)
# r = requests.post(url, {"query": QUERY_SEARCH, "variables": variables}) #TODO future use for ISSUE #17
variables = '{"input":[' + ','.join(query_parts) + ']}'
# Remove all spaces, even inside the manf#
# SET comment: this is how the code always worked. Octopart (used by KitSpace) ignores spaces inside manf# codes.
variables = variables.replace(' ', '')
# Do the query using POST
data = 'query={}&variables={}'.format(quote_plus(query_type), quote_plus(variables))
distributor_class.log_request(url, data)
data = OrderedDict()
data["query"] = query_type
data["variables"] = variables
response = requests.post(url, data)
distributor_class.log_response(response)
if response.status_code == requests.codes['ok']: # 200
results = json.loads(response.text)
return results
elif response.status_code == requests.codes['not_found']: # 404
raise KiCostError('Kitspace server not found check your internet connection.', ERR_SCRAPE)
elif response.status_code == requests.codes['request_timeout']: # 408
raise KiCostError('KitSpace is not responding.', ERR_SCRAPE)
elif response.status_code == requests.codes['bad_request']: # 400
raise KiCostError('Bad request to Kitspace server probably due to an incorrect string '
'format check your `manf#` codes and contact the suport team.', ERR_SCRAPE)
elif response.status_code == requests.codes['gateway_timeout']: # 504
raise KiCostError('One of the internal Kitspace services may experiencing problems. Contact the Kitspace support.', ERR_SCRAPE)
else:
raise KiCostError('Kitspace error: ' + str(response.status_code), ERR_SCRAPE)
@staticmethod
def get_spec(data, item, default=None):
'''Get the value of `value` field of a dictionary if the `name` field identifier.
Used to get information from the JSON response.'''
for d in data['specs']:
if d['key'] == item:
value = d['value']
return value if value is not None else default
return default
@staticmethod
def get_part_info(query, parts, distributors, currency, distributors_wanted):
'''Query PartInfo for quantity/price info and place it into the parts list.
`distributors_wanted` is the list of distributors we want for each query.
`distributors` is the list of all distributors we want, in general.
This difference is because some queries are for an specific distributor.
'''
# Translate from PartInfo distributor names to the names used internally by kicost.
dist_xlate = api_partinfo_kitspace.DIST_TRANSLATION
results = api_partinfo_kitspace.query(query, distributors)
# Loop through the response to the query and enter info into the parts list.
for part_query, part, dist_want, result in zip(query, parts, distributors_wanted, results['data']['match']):
if not result:
distributor_class.logger.warning(W_NOINFO+'No information found for parts \'{}\' query `{}`'.format(part.refs, str(part_query)))
continue
# Get the information of the part.
part.datasheet = result.get('datasheet')
part.lifecycle = api_partinfo_kitspace.get_spec(result, 'lifecycle_status', 'active').lower()
# Misc data collected, currently not used inside KiCost
part.update_specs({sp['key']: (sp['key'], sp['value']) for sp in result['specs'] if sp['value']})
# Loop through the offers from various dists for this particular part.
for offer in result['offers']:
# Get the distributor who made the offer and add their
# price/qty info to the parts list if its one of the accepted distributors.
dist = dist_xlate.get(offer['sku']['vendor'], '')
if dist not in dist_want:
# Not interested in this distributor
continue
# Get the DistData for this distributor
dd = part.dd.get(dist, DistData())
# This will happen if there are not enough entries in the price/qty list.
# As a stop-gap measure, just assign infinity to the part increment.
# A better alternative may be to examine the packaging field of the offer.
part_qty_increment = float("inf")
# Get pricing information from this distributor.
dist_currency = {cur: pri for cur, pri in offer['prices'].items() if pri}
if not dist_currency:
# Some times the API returns minimum purchase 0 and a not valid `price_tiers`.
distributor_class.logger.warning(NO_PRICE+'No price information found for parts \'{}\' query `{}`'.
format(part.refs, str(part_query)))
else:
prices = None
# Get the price tiers prioritizing:
# 1) The asked currency by KiCost user;
# 2) The default currency given by `DEFAULT_CURRENCY` in root `global_vars.py`;
# 3) The first not null tiers
if currency in dist_currency:
prices = dist_currency[currency]
dd.currency = currency
elif DEFAULT_CURRENCY in dist_currency:
prices = dist_currency[DEFAULT_CURRENCY]
dd.currency = DEFAULT_CURRENCY
else:
dd.currency, prices = next(iter(dist_currency.items()))
price_tiers = {qty: float(price) for qty, price in prices}
# Combine price lists for multiple offers from the same distributor
# to build a complete list of cut-tape and reeled components.
dd.price_tiers.update(price_tiers)
# Compute the quantity increment between the lowest two prices.
# This will be used to distinguish the cut-tape from the reeled components.
if len(price_tiers) > 1:
part_break_qtys = sorted(price_tiers.keys())
part_qty_increment = part_break_qtys[1] - part_break_qtys[0]
# Select the part SKU, web page, and available quantity.
# Each distributor can have different stock codes for the same part in different
# quantities / delivery package styles: cut-tape, reel, ...
# Therefore we select and overwrite a previous selection if one of the
# following conditions is met:
# 1. We don't have a selection for this part from this distributor yet.
# 2. The MOQ is smaller than for the current selection.
# 3. The part_qty_increment for this offer smaller than that of the existing selection.
# (we prefer cut-tape style packaging over reels)
# 4. For DigiKey, we can't use part_qty_increment to distinguish between
# reel and cut-tape, so we need to look at the actual DigiKey part number.
# This procedure is made by the definition `distributors_info[dist]['ignore_cat#_re']`
# at the distributor profile.
dist_part_num = offer.get('sku', '').get('part', '')
qty_avail = dd.qty_avail
in_stock_quantity = offer.get('in_stock_quantity')
if not qty_avail or (in_stock_quantity and qty_avail < in_stock_quantity):
# Keeps the information of more availability.
dd.qty_avail = in_stock_quantity # In stock.
ign_stock_code = distributor_class.get_distributor_info(dist).ignore_cat
valid_part = not (ign_stock_code and re.match(ign_stock_code, dist_part_num))
# debug('dd.part_num') # Uncomment to debug
# debug('dd.qty_increment') # Uncomment to debug
moq = offer.get('moq')
if (valid_part and
(not dd.part_num or
(dd.qty_increment is None or part_qty_increment < dd.qty_increment) or
(not dd.moq or (moq and dd.moq > moq)))):
# Save the link, stock code, ... of the page for minimum purchase.
dd.moq = moq # Minimum order qty.
dd.url = offer.get('product_url', '') # Page to purchase the minimum quantity.
dd.part_num = dist_part_num
dd.qty_increment = part_qty_increment
# Update the DistData for this distributor
part.dd[dist] = dd
@staticmethod
def query_part_info(parts, distributors, currency):
'''Fill-in the parts with price/qty/etc info from KitSpace.'''
distributor_class.logger.log(DEBUG_OVERVIEW, '# Getting part data from KitSpace...')
# Use just the distributors avaliable in this API.
# Note: The user can use --exclude and define it with fields.
distributors = [d for d in distributors if distributor_class.get_distributor_info(d).is_web()
and d in api_partinfo_kitspace.API_DISTRIBUTORS]
FIELDS_CAT = sorted([d + '#' for d in distributors])
# Create queries to get part price/quantities from PartInfo.
queries = [] # Each part reference query.
query_parts = [] # Pointer to the part.
query_part_stock_code = [] # Used the stock code mention for disambiguation, it is used `None` for the "manf#".
# Translate from PartInfo distributor names to the names used internally by kicost.
available_distributors = set(api_partinfo_kitspace.API_DISTRIBUTORS)
for part in parts:
# Create a PartInfo query using the manufacturer's part number or the distributor's SKU.
part_dist_use_manfpn = copy.copy(distributors)
# Create queries using the distributor SKU
# Check if that part have stock code that is accepted to use by this module (API).
# KiCost will prioritize these codes under "manf#" that will be used for get
# information for the part hat were not filled with the distributor stock code. So
# this is checked after the 'manf#' buv code.
found_codes_for_all_dists = True
for d in FIELDS_CAT:
part_stock = part.fields.get(d)
if part_stock:
part_catalogue_code_dist = d[:-1]
if part_catalogue_code_dist in available_distributors:
part_code_dist = api_partinfo_kitspace.KICOST2KITSPACE_DIST[part_catalogue_code_dist]
queries.append('{"sku":{"vendor":"' + part_code_dist + '","part":"' + part_stock + '"}}')
query_parts.append(part)
query_part_stock_code.append([part_catalogue_code_dist])
part_dist_use_manfpn.remove(part_catalogue_code_dist)
else:
found_codes_for_all_dists = False
# Create a query using the manufacturer P/N
part_manf = part.fields.get('manf', '')
part_code = part.fields.get('manf#')
if part_code and not found_codes_for_all_dists:
# Not all distributors has code, add a query for the manufaturer P/N
queries.append('{"mpn":{"manufacturer":"' + part_manf + '","part":"' + part_code + '"}}')
query_parts.append(part)
# List of distributors without an specific part number
query_part_stock_code.append(part_dist_use_manfpn)
n_queries = len(query_parts)
if not n_queries:
return
# Setup progress bar to track progress of server queries.
progress = distributor_class.progress(n_queries, distributor_class.logger)
# Slice the queries into batches of the largest allowed size and gather
# the part data for each batch.
for i in range(0, len(queries), MAX_PARTS_PER_QUERY):
slc = slice(i, i+MAX_PARTS_PER_QUERY)
api_partinfo_kitspace.get_part_info(queries[slc], query_parts[slc], distributors, currency, query_part_stock_code[slc])
progress.update(len(queries[slc]))
# Done with the scraping progress bar so delete it or else we get an
# error when the program terminates.
progress.close()
distributor_class.register(api_partinfo_kitspace, 50)
| StarcoderdataPython |
40980 | <reponame>intelkevinputnam/lpot-docs
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimization class."""
import os
from typing import List
from lpot.ux.components.optimization.graph_optimizer.optimize_model import (
optimize_graph,
optimize_graph_config,
)
from lpot.ux.components.optimization.optimization import Optimization
class GraphOptimization(Optimization):
"""Optimization class."""
def execute(self) -> None:
"""Execute graph optimization."""
if self.tune:
optimize_graph_config(
input_graph=self.input_graph,
output_graph=self.output_graph,
config=self.config_path,
framework=self.framework,
)
else:
optimize_graph(
input_graph=self.input_graph,
output_graph=self.output_graph,
input=self.input_nodes,
output=self.output_nodes,
framework=self.framework,
precisions=self.output_precision,
)
@property
def optimization_script(self) -> str:
"""Get optimization script path."""
return os.path.join(
os.path.dirname(__file__),
"optimize_model.py",
)
@property
def parameters(self) -> List[str]:
"""Get optimization parameters."""
parameters = [
f"--input-graph={self.input_graph}",
f"--output-graph={self.output_graph}",
f"--framework={self.framework}",
]
if self.tune:
parameters.extend(
[
f"--config={self.config_path}",
],
)
else:
parameters.extend(
[
f"--input-nodes={self.input_nodes}",
f"--output-nodes={self.output_nodes}",
f"--precisions={self.output_precision}",
],
)
return parameters
| StarcoderdataPython |
3354948 | import pandas as pd
import numpy as np
import os
import glob
from common.io import file_base, parent_dir, mkdir
import re
import os
# @todo add warning if file is missing (i.e., None)
# @todo clean out files such that features are in 1 and labels (i.e., y or professor rating) is in another (samename_label.csv)
#
def get_metadata(fname, class_type=('lecture', 'lab', 'seminar', 'recitation discussion')):
fname = fname.replace("Full_","");
str_split = fname.split('_')
semester = str(str_split[1] + str_split[2][2:4]).lower()
fparts = str_split[0].split('/')[-1].split('-')
cid = fparts[0]
cid = [cid.lower().replace(c, '') for c in class_type if c.lower() in str(cid).lower()]
if len(cid) == 0:
return None, None, None, None
if (re.search("[0-9]{6}",cid[0])):
course_id = cid[0][:-2]
section = cid[0][-2:]
else:
course_id = cid[0]
section = '01'
name = fparts[1].lower()
return course_id, semester, name, section
def get_scores(fpath):
"""
:param fpath:
:return:
"""
# parse table
df_scorechart = pd.read_excel(fpath, skiprows=10, nrows=29)
df_scorechart.drop(df_scorechart.columns[12:], axis=1)
return df_scorechart
def parse_surveys(data_dir, output_dir):
"""
:param data_dir:
:param output_dir:
:return:
"""
survey_dirs = glob.glob(data_dir + '*')
colleges = [str(d).replace(data_dir, '') for d in survey_dirs]
for college in colleges:
obin = output_dir + '/' + college + '/'
mkdir(obin)
inbin = data_dir + '/' + college + '/'
survey_paths = glob.glob(inbin + '*.xls')
for survey in survey_paths:
course, semester, name, section = get_metadata(survey)
if course is None:
continue
df_scores = get_scores(survey)
course_dir = os.path.join(obin, course)
if mkdir(course_dir):
print('created {}'.format(course_dir))
semester_dir = os.path.join(course_dir, semester)
if mkdir(semester_dir):
print('created {}'.format(semester_dir))
fout = semester_dir + '/' + name + '_' + section + '_' + semester + '.csv'
df_scores.to_csv(fout)
if __name__ == "__main__":
fpath = '/Users/zackhillman/Documents/eece2300/FinalProject/termproject/data/raw/surveys-raw/camd/CINE233601Lecture-Blake-201840_Summer_1_2018-Ratings_summary_w_CRN.xls'
data_dir = '/Users/zackhillman/Documents/eece2300/FinalProject/termproject/data/raw/surveys-raw/'
output_dir = '/Users/zackhillman/Documents/eece2300/FinalProject/termproject/data/processed/surveys/'
parse_surveys(data_dir, output_dir)
#
# dict_of_colleges = {}
# dict_of_files = {}
# for (dirpath, dirnames, filenames) in os.walk(data_dir):
# list_of_files = []
# college_ref = file_base(dirpath)
# for filename in filenames:
# if filename.endswith('.xls'):
# dict_of_files[filename] = os.sep.join([dirpath, filename])
# list_of_files.append(filename)
# if len(list_of_files) > 0:
# dict_of_colleges[college_ref] = list_of_files
# # parse header
#
# for k, v in dict_of_colleges.items():
# print('There are {} samples for {}'.format(k, len(dict_of_colleges[k])))
# # There are ccis samples for 254
# # There are camd samples for 54
# # There are dmsb samples for 174
# # There are coe samples for 280
#
# #parse init info
# df_initial = pd.read_excel(fpath, nrows=6)
#
# df_intiial_transposed = df_initial.T
#
# df_initial = df_intiial_transposed.iloc[:2]
#
# #print(df_initial)
#
#
#
# # parse averaged scores
# df_overall_raw = pd.read_excel(fpath, skiprows=40, nrows=1)
#
# # check if last column is Nan
# if str(df_overall_raw.columns[-1][:6]).lower() == 'unname':
# df_overall_raw = df_overall_raw.drop(labels=df_overall_raw.columns[-1], axis=1)
#
# #print(df_overall_raw)
#
# df_overall = df_overall_raw.iloc[:, 3::]
# #print(df_overall)
#
# df_scorechart.to_json("/Users/zackhillman/Documents/eece2300/FinalProject/termproject/data/processed/1.txt")
#
# # stats (i.e. number of each type of survey) | StarcoderdataPython |
100218 | <gh_stars>1-10
from attacker import *
from victim import *
def p_selection(p_init, it, num_iter):
""" Piece-wise constant schedule for p (the fraction of pixels changed on every iteration). """
it = int(it / num_iter * 10000)
if 10 < it <= 50: return p_init / 2
elif 50 < it <= 200: return p_init / 4
elif 200 < it <= 500: return p_init / 8
elif 500 < it <= 1000: return p_init / 16
elif 1000 < it <= 2000: return p_init / 32
elif 2000 < it <= 4000: return p_init / 64
elif 4000 < it <= 6000: return p_init / 128
elif 6000 < it <= 8000: return p_init / 256
elif 8000 < it <= 10000: return p_init / 512
else: return p_init
def attack(model, x, y, logits_clean, dataset, batch_size, run_time, args):
eps, seed, l2_attack, num_iter, p_init, num_srg, use_square_plus, use_nas = \
(args.eps / 255 if not args.l2_attack else args.eps), (args.seed if args.seed != -1 else run_time), \
args.l2_attack, args.num_iter, args.p_init, args.num_srg, args.use_square_plus, args.use_nas
np.random.seed(seed)
min_val, max_val = 0, 1
c, h, w = x.shape[1:]
n_features = c * h * w
if l2_attack: # the initial stripes in square attack
delta_init = np.zeros(x.shape)
s = h // 5
sp_init = (h - s * 5) // 2
center_h = sp_init + 0
for counter in range(h // s):
center_w = sp_init + 0
for counter2 in range(w // s):
delta_init[:, :, center_h:center_h + s, center_w:center_w + s] += QueryNet.meta_pseudo_gaussian_pert(None, s).reshape(
[1, 1, s, s]) * np.random.choice([-1, 1], size=[x.shape[0], c, 1, 1])
center_w += s
center_h += s
x_best = np.clip(x + delta_init / np.sqrt(np.sum(delta_init ** 2, axis=(1, 2, 3), keepdims=True)) * eps, 0, 1)
else:
x_best = np.clip(x + np.random.choice([-eps, eps], size=[x.shape[0], c, 1, w]), min_val, max_val)
logits = model(x_best)
loss_min = get_margin_loss(y, logits)
n_queries = np.ones(x.shape[0]) * 2 # have queried with original samples and stripe samples
surrogate_names = ['DenseNet121', 'ResNet50', 'DenseNet169', 'ResNet101', 'DenseNet201', 'VGG19'][:num_srg] # surrogates if not using nas
result_path = get_time() + f'_{dataset}_{model.arch}' + \
('_l2' if l2_attack else '_linfty') + \
f'_eps{round(eps* (255 if not l2_attack else 1), 2)}' + \
('_Eval' if num_srg != 0 else '') + \
('_Sqr+' if use_square_plus else '') + \
(f'_NAS{num_srg}' if use_nas else ('_'+'-'.join(surrogate_names) if len(surrogate_names) != 0 else ''))
print(result_path)
logger = LoggerUs(result_path)
os.makedirs(result_path + '/log', exist_ok=True)
log.reset_path(result_path + '/log/main.log')
metrics_path = logger.result_paths['base'] + '/log/metrics'
log.print(str(args))
sampler = DataManager(x, logits_clean, eps, result_dir=result_path, loss_init=get_margin_loss(y, logits_clean))
sampler.update_buffer(x_best, logits, loss_min, logger, targeted=False, data_indexes=None, margin_min=loss_min)
sampler.update_lipschitz()
querynet = QueryNet(sampler, model.arch, surrogate_names, use_square_plus, True, use_nas, l2_attack, eps, batch_size)
def get_surrogate_loss(srgt, x_adv, y_ori): # for transferability evaluation in QueryNet's 2nd forward operation
if x_adv.shape[0] <= batch_size: return get_margin_loss(y_ori, srgt(torch.Tensor(x_adv)).cpu().detach().numpy())
batch_num = int(x_adv.shape[0]/batch_size)
if batch_size * batch_num != int(x_adv.shape[0]): batch_num += 1
loss_value = get_margin_loss(y_ori[:batch_size], srgt(torch.Tensor(x_adv[:batch_size])).cpu().detach().numpy())
for i in range(batch_num-1):
new_loss_value = get_margin_loss(y_ori[batch_size*(i+1):batch_size*(i+2)], srgt(torch.Tensor(x_adv[batch_size*(i+1):batch_size*(i+2)])).cpu().detach().numpy())
loss_value = np.concatenate((loss_value, new_loss_value), axis=0)
del new_loss_value
return loss_value
time_start = time.time()
metrics = np.zeros([num_iter, 7])
for i_iter in range(num_iter):
# focus on unsuccessful AEs
idx_to_fool = loss_min > 0
x_curr, x_best_curr, y_curr, loss_min_curr = x[idx_to_fool], x_best[idx_to_fool], y[idx_to_fool], loss_min[idx_to_fool]
# QueryNet's forward propagation
x_q, a = querynet.forward(x_curr, x_best_curr, y_curr, get_surrogate_loss, min_val=min_val, max_val=max_val, p=p_selection(p_init, i_iter, num_iter), targeted=False)
# query
logits = model(x_q)
loss = get_margin_loss(y_curr, logits)
idx_improved = loss < loss_min_curr
loss_min[idx_to_fool] = idx_improved * loss + ~idx_improved * loss_min_curr
idx_improved = np.reshape(idx_improved, [-1, *[1] * len(x.shape[:-1])])
x_best[idx_to_fool] = idx_improved * x_q + ~idx_improved * x_best_curr
n_queries[idx_to_fool] += 1
# QueryNet's backward propagation
message = querynet.backward(idx_improved, a, data_indexes=np.where(idx_to_fool)[0],
margin_min=loss_min, img_adv=x_q, lbl_adv=logits, loss=loss, logger=logger, targeted=False)
if a is not None:
print(' '*80, end='\r')
log.print(message)
querynet.sampler.save(i_iter)
# logging
acc_corr = (loss_min > 0.0).mean()
mean_nq_all, mean_nq = np.mean(n_queries), np.mean(n_queries[loss_min <= 0])
median_nq_all, median_nq = np.median(n_queries)-1, np.median(n_queries[loss_min <= 0])-1
avg_loss = np.mean(loss_min)
elapse = time.time() - time_start
msg = '{}: Acc={:.2%}, AQ_suc={:.2f}, MQ_suc={:.1f}, AQ_all={:.2f}, MQ_all={:.1f}, ALoss_all={:.2f}, |D|={:d}, Time={:.1f}s'.\
format(i_iter + 1, acc_corr, mean_nq, median_nq, mean_nq_all, median_nq_all, avg_loss, querynet.sampler.clean_sample_indexes[-1], elapse)
log.print(msg if 'easydl' not in model.arch else msg + ', query=%d' % model.query)
metrics[i_iter] = [acc_corr, mean_nq, median_nq, mean_nq_all, median_nq_all, avg_loss, elapse]
np.save(metrics_path, metrics)
if acc_corr == 0: break
torch.cuda.empty_cache()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Define hyperparameters.')
parser.add_argument('--model', default='gdas', type=str,
help='network architecture [wrn-28-10-drop, gdas, pyramidnet272, easydlcifar] for CIFAR10'
'[inception_v3, mnasnet1_0, resnext101_32x8d] for ImageNet'
'[resnet_preact, wrn, densenet, easydlmnist] for MNIST')
parser.add_argument('--l2_attack', action='store_true', help='perform l2 attack')
parser.add_argument('--eps', type=float, default=16, help='the attack bound')
parser.add_argument('--num_iter', type=int, default=10000, help='maximum query times.')
parser.add_argument('--num_x', type=int, default=10000, help='number of samples for evaluation.')
parser.add_argument('--num_srg', type=int, default=0, help='number of surrogates.')
parser.add_argument('--use_nas', action='store_true', help='use NAS to train the surrogate.')
parser.add_argument('--use_square_plus', action='store_true', help='use Square+.')
parser.add_argument('--p_init', type=float, default=0.05, help='hyperparameter of Square, the probability of changing a coordinate.')
parser.add_argument('--gpu', type=str, default='1', help='GPU number(s).')
parser.add_argument('--run_times', type=int, default=1, help='repeated running time.')
parser.add_argument('--seed', type=int, default=-1, help='random seed')
args = parser.parse_args()
if args.use_nas: assert args.num_srg > 0
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
log = Logger('')
for model_name in args.model.split(','):
if model_name in ['wrn-28-10-drop', 'gdas', 'pyramidnet272', 'easydlcifar']: dataset = 'cifar10'
elif model_name in ['inception_v3', 'mnasnet1_0', 'resnext101_32x8d']: dataset = 'imagenet'
elif model_name in ['resnet_preact', 'wrn', 'densenet', 'easydlmnist']: dataset = 'mnist'
else: raise ValueError('Invalid Victim Name!')
if dataset == 'mnist':
if not ((args.l2_attack and args.eps == 3) or (not args.l2_attack and args.eps == 76.5)):
print('Warning: not using default eps in the paper, which is l2=3 or linfty=76.5 for MNIST.')
x_test, y_test = load_mnist(args.num_x)
batch_size = 3000 if not args.use_nas else 300
model = VictimMnist(model_name, batch_size=batch_size)
elif dataset == 'imagenet':
assert (not args.use_nas), 'NAS is not supported for ImageNet for resource concerns'
if not ((args.l2_attack and args.eps == 5) or (not args.l2_attack and args.eps == 12.75)):
print('Warning: not using default eps in the paper, which is l2=5 or linfty=12.75 for ImageNet.')
batch_size = 100 if model_name != 'resnext101_32x8d' else 32
model = VictimImagenet(model_name, batch_size=batch_size) if model_name != 'easydlmnist' else VictimEasydl(arch='easydlmnist')
x_test, y_test = load_imagenet(args.num_x, model)
else:
if not ((args.l2_attack and args.eps == 3) or (not args.l2_attack and args.eps == 16)):
print('Warning: not using default eps in the paper, which is l2=3 or linfty=16 for CIFAR10.')
x_test, y_test = load_cifar10(args.num_x)
batch_size = 2048 if not args.use_nas else 128
model = VictimCifar(model_name, no_grad=True, train_data='full', epoch='final').eval() if model_name != 'easydlcifar' else VictimEasydl(arch='easydlcifar')
logits_clean = model(x_test)
corr_classified = logits_clean.argmax(1) == y_test.argmax(1)
print('Clean accuracy: {:.2%}'.format(np.mean(corr_classified)) + ' ' * 40)
y_test = dense_to_onehot(y_test.argmax(1), n_cls=10 if dataset != 'imagenet' else 1000)
for run_time in range(args.run_times):
attack(model, x_test[corr_classified], y_test[corr_classified], logits_clean[corr_classified], dataset, batch_size, run_time, args)
| StarcoderdataPython |
1644029 | import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
settings_module = os.environ.setdefault('DJANGO_SETTINGS_MODULE', "settings.base")
application = Cling(get_wsgi_application())
| StarcoderdataPython |
1738218 | <gh_stars>1-10
# coding=utf8
import hashlib
import mimetypes
import re
import evernote.edam.type.ttypes as Types
from evernote.api.client import EvernoteClient
from evernote.edam.error.ttypes import EDAMUserException
from storage import Storage
class EvernoteController(object):
def __init__(self, token, isSpecialToken=False, sandbox=False, isInternational=False, notebooks=None):
self.token = token
if sandbox:
self.client = EvernoteClient(token=self.token, service_host='sandbox.yinxiang.com')
elif isInternational:
self.client = EvernoteClient(token=self.token, service_host='www.evernote.com')
else:
self.client = EvernoteClient(token=self.token, service_host='app.yinxiang.com')
self.isSpecialToken = isSpecialToken
self.userStore = self.client.get_user_store()
self.noteStore = self.client.get_note_store()
self.storage = Storage(notebooks)
def get_upload_limit(self):
return {
1: 25 * 1024 * 1024,
3: 100 * 1024 * 1024,
5: 200 * 1024 * 1024,
}.get(self.userStore.getUser().privilege, 0)
def create_notebook(self, noteFullPath):
if self.get(noteFullPath): return False
notebook = Types.Notebook()
notebook.name = noteFullPath
try:
notebook = self.noteStore.createNotebook(notebook)
except EDAMUserException, e:
if e.errorCode == 10 and e.parameter == 'Notebook.name':
self.storage.update(self.token, self.noteStore)
return True
else:
raise e
self.storage.create_notebook(notebook)
return True
def create_note(self, noteFullPath, content='', fileDict={}):
if self.get(noteFullPath): return False
if 1 < len(noteFullPath):
notebook = noteFullPath[0]
title = noteFullPath[1]
else:
notebook = self.storage.defaultNotebook
title = noteFullPath[0]
note = Types.Note()
note.title = title
note.content = '<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd">'
note.content += '<en-note>'
content = re.sub('<en-media.*?/>', '', content)
note.content += content
if self.get([notebook]) is None: self.create_notebook(notebook)
note.notebookGuid = self.get([notebook]).guid
if fileDict:
note.resources = []
for fileName, fileBytes in fileDict.iteritems():
fileData = Types.Data()
fileData.bodyHash = self._md5(fileBytes)
fileData.size = len(fileBytes)
fileData.body = fileBytes
fileAttr = Types.ResourceAttributes()
fileAttr.fileName = fileName
fileAttr.attachment = True
fileResource = Types.Resource()
fileResource.data = fileData
fileResource.mime = mimetypes.guess_type(fileName)[0] or 'application/octet-stream'
fileResource.attributes = fileAttr
note.resources.append(fileResource)
note.content += '<en-media type="%s" hash="%s"/>' % (fileResource.mime, fileData.bodyHash)
note.content += '</en-note>'
note = self.noteStore.createNote(note)
self.storage.create_note(note, notebook)
return True
def update_note(self, noteFullPath, content=None, fileDict={}):
note = self.get(noteFullPath)
if note is None: return self.create_note(noteFullPath, content or '', fileDict)
if 1 < len(noteFullPath):
notebook = noteFullPath[0]
title = noteFullPath[1]
else:
notebook = self.storage.defaultNotebook
title = noteFullPath[0]
oldContent = self.get_content(noteFullPath)
content = content or oldContent
header = '<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd">'
guid = note.guid
content = re.sub('<en-media.*?/>', '', content)
note = Types.Note()
note.guid = guid
note.title = title
note.content = header
note.content += '<en-note>'
note.content += content
if fileDict:
note.resources = []
for fileName, fileBytes in fileDict.iteritems():
fileData = Types.Data()
fileData.bodyHash = self._md5(fileBytes)
fileData.size = len(fileBytes)
fileData.body = fileBytes
fileAttr = Types.ResourceAttributes()
fileAttr.fileName = fileName
fileAttr.attachment = True
fileResource = Types.Resource()
fileResource.data = fileData
fileResource.mime = mimetypes.guess_type(fileName)[0] or 'application/octet-stream'
fileResource.attributes = fileAttr
note.resources.append(fileResource)
note.content += '<en-media type="%s" hash="%s"/>' % (fileResource.mime, fileData.bodyHash)
note.content += '</en-note>'
self.noteStore.updateNote(self.token, note)
self.storage.delete_note(noteFullPath)
self.storage.create_note(note, notebook)
return True
def get_content(self, noteFullPath):
note = self.get(noteFullPath)
if note is None: return
r = self.noteStore.getNoteContent(note.guid)
try:
content = re.compile('[\s\S]*?<en-note[^>]*?>([\s\S]*?)</en-note>').findall(r)[0]
except:
content = ''
return content
def get_attachment(self, noteFullPath):
note = self.get(noteFullPath)
attachmentDict = {}
for resource in (self.noteStore.getNote(note.guid, False, True, False, False).resources or {}):
attachmentDict[resource.attributes.fileName] = resource.data.body
return attachmentDict
def move_note(self, noteFullPath, _to):
if self.get(noteFullPath) is None: return False
if len(noteFullPath) < 2 or 1 < len(_to): raise Exception('Type Error')
self.noteStore.copyNote(self.token, self.get(noteFullPath).guid, self.get(_to).guid)
if self.isSpecialToken:
self.noteStore.expungeNote(self.token, self.get(noteFullPath).guid)
else:
self.noteStore.deleteNote(self.token, self.get(noteFullPath).guid)
self.storage.move_note(noteFullPath, _to)
return True
def delete_note(self, noteFullPath):
note = self.get(noteFullPath)
if note is None: return False
if len(noteFullPath) < 2: raise Exception('Types Error')
self.noteStore.deleteNote(self.token, note.guid)
self.storage.delete_note(noteFullPath)
return True
def delete_notebook(self, noteFullPath):
if not self.get(noteFullPath) or not self.isSpecialToken: return False
if 1 < len(noteFullPath): raise Exception('Types Error')
self.noteStore.expungeNotebook(self.token, self.get(noteFullPath).guid)
self.storage.delete_notebook(noteFullPath)
return True
def get(self, s):
return self.storage.get(s)
def show_notebook(self):
self.storage.show_notebook()
def show_notes(self, notebook=None):
self.storage.show_notes(notebook)
def _md5(self, s):
m = hashlib.md5()
m.update(s)
return m.hexdigest()
if __name__ == '__main__':
# You can get this from 'https://%s/api/DeveloperToken.action'%SERVICE_HOST >>
# In China it's https://app.yinxiang.com/api/DeveloperToken.action <<
token = 'S=s1:U=91eca:E=15be6680420:C=1548eb6d760:P=1cd:A=en-devtoken:V=2:H=026e6ff5f5d0753eb37146a1b4660cc9'
e = EvernoteController(token, True, True)
# e.update_note('Hello', 'Test', 'Changed', 'README.md')
e.create_note(['Test', '中文'], 'Chinese')
| StarcoderdataPython |
4831861 | # Generated by Django 3.2.4 on 2021-06-15 19:28
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("app_forecast", "0004_auto_20210615_2035"),
]
operations = [
migrations.AlterField(
model_name="forecast",
name="date",
field=models.DateField(db_index=True, default=datetime.date.today),
),
migrations.AddConstraint(
model_name="forecast",
constraint=models.UniqueConstraint(
fields=("date", "country_code"),
name="date and country_code pair",
),
),
]
| StarcoderdataPython |
3353458 | # =============================================================================
# Copyright 2020 NVIDIA. All Rights Reserved.
# Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from typing import List, Optional
from transformers import (
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
RobertaConfig,
RobertaModel,
)
from nemo.backends.pytorch.nm import TrainableNM
from nemo.core.neural_modules import PretrainedModelInfo
from nemo.core.neural_types import ChannelType, NeuralType
from nemo.utils.decorators import add_port_docs
__all__ = ['Roberta']
class Roberta(TrainableNM):
"""
ROBERTA wraps around the Huggingface implementation of ROBERTA from their
transformers repository for easy use within NeMo.
Args:
pretrained_model_name (str): If using a pretrained model, this should
be the model's name. Otherwise, should be left as None.
config_filename (str): path to model configuration file. Optional.
vocab_size (int): Size of the vocabulary file, if not using a
pretrained model.
hidden_size (int): Size of the encoder and pooler layers.
num_hidden_layers (int): Number of hidden layers in the encoder.
num_attention_heads (int): Number of attention heads for each layer.
intermediate_size (int): Size of intermediate layers in the encoder.
hidden_act (str): Activation function for encoder and pooler layers;
"gelu", "relu", and "swish" are supported.
max_position_embeddings (int): The maximum number of tokens in a
sequence.
"""
@property
@add_port_docs()
def input_ports(self):
"""Returns definitions of module input ports.
input_ids: input token ids
token_type_ids: segment type ids
attention_mask: attention mask
"""
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"token_type_ids": NeuralType(('B', 'T'), ChannelType()),
"attention_mask": NeuralType(('B', 'T'), ChannelType()),
}
@property
@add_port_docs()
def output_ports(self):
"""Returns definitions of module output ports.
hidden_states: output embedding
"""
return {"hidden_states": NeuralType(('B', 'T', 'D'), ChannelType())}
def __init__(
self,
pretrained_model_name=None,
config_filename=None,
vocab_size=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
max_position_embeddings=512,
):
super().__init__()
# Check that only one of pretrained_model_name, config_filename, and
# vocab_size was passed in
total = 0
if pretrained_model_name is not None:
total += 1
if config_filename is not None:
total += 1
if vocab_size is not None:
total += 1
if total != 1:
raise ValueError(
"Only one of pretrained_model_name, vocab_size, "
+ "or config_filename should be passed into the "
+ "ROBERTA constructor."
)
# TK: The following code checks the same once again.
if vocab_size is not None:
config = RobertaConfig(
vocab_size_or_config_json_file=vocab_size,
vocab_size=vocab_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
max_position_embeddings=max_position_embeddings,
)
model = RobertaModel(config)
elif pretrained_model_name is not None:
model = RobertaModel.from_pretrained(pretrained_model_name)
elif config_filename is not None:
config = RobertaConfig.from_json_file(config_filename)
model = RobertaModel(config)
else:
raise ValueError(
"Either pretrained_model_name or vocab_size must" + " be passed into the ROBERTA constructor"
)
model.to(self._device)
self.add_module("roberta", model)
self.config = model.config
self._hidden_size = model.config.hidden_size
@property
def hidden_size(self):
"""
Property returning hidden size.
Returns:
Hidden size.
"""
return self._hidden_size
@staticmethod
def list_pretrained_models() -> Optional[List[PretrainedModelInfo]]:
pretrained_models = []
for key, value in ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP.items():
model_info = PretrainedModelInfo(
pretrained_model_name=key,
description="weights by HuggingFace",
parameters=ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP[key],
location=value,
)
pretrained_models.append(model_info)
return pretrained_models
def forward(self, input_ids, token_type_ids, attention_mask):
return self.roberta(input_ids, attention_mask=attention_mask)[0]
| StarcoderdataPython |
4816336 | import importlib
import types
import collections
import inspect
import numpy as np
import logging
import typing
from functools import partial
import typing
class Encoder:
"""
Encode arbitrary objects. The encoded object consists of dicts,
lists, ints, floats and strings.
"""
def __call__(self, obj) -> typing.Any:
"""
Encode arbitrary objects as dicts, str, int, float, list
Parameters
----------
obj : Any
object to be encoded
Returns
-------
Any
encoded object
"""
return self.encode(obj)
def encode(self, obj) -> typing.Any:
"""
Encode arbitrary objects as dicts, str, int, float, list
Parameters
----------
obj : Any
object to be encoded
Returns
-------
Any
encoded object
"""
# use type() to check for dict and list because type() does not
# consider subtypes which is the desired behaviour in this case
if isinstance(obj, (str, int, float)):
# end recursion
return obj
elif obj is None:
return obj
elif type(obj) == dict:
# end recursion
return self._encode_dict(obj)
elif type(obj) == list:
# end recursion
return self._encode_list(obj)
elif isinstance(obj, np.ndarray):
return self._encode_array(obj)
elif isinstance(obj, collections.Mapping):
return self._encode_mapping(obj)
elif isinstance(obj, collections.Iterable):
return self._encode_iterable(obj)
elif isinstance(obj, types.ModuleType):
return self._encode_module(obj)
elif inspect.isclass(obj) or isinstance(obj, type):
# use both ways to determine functions here
# (the second uglier one serves as fallback here in case inspect
# does not cover all cases)
return self._encode_type(obj)
elif isinstance(obj, (types.BuiltinFunctionType, types.FunctionType)):
return self._encode_function(obj)
else:
return self._encode_class(obj)
def _encode_list(self, obj) -> list:
"""
Encode list
Parameters
----------
obj : list
list to be encoded
Returns
-------
list
list with encoded internal items
"""
return [self.encode(i) for i in obj]
def _encode_dict(self, obj) -> dict:
"""
Encode dict
Parameters
----------
obj : dict
dict to be encoded
Returns
-------
dict
dict with encoded internal items
"""
return {self.encode(_key):
self.encode(_item) for _key, _item in obj.items()}
def _encode_array(self, obj) -> dict:
"""
Encode array
Parameters
----------
obj : :class:`np.ndarray`
object to be encoded
Returns
-------
dict
array encoded as a list inside a dict
"""
# # if numpy array: add explicit array specifier
# use tolist instead of tostring here (even though this requires
# additional encoding steps and increases memory usage), since tolist
# retains the shape and tostring doesn't
return {"__array__": self.encode(obj.tolist())}
def _encode_mapping(self, obj) -> dict:
"""
Encode mapping
Parameters
----------
obj : collections.Mapping
object to be encoded
Returns
-------
dict
mapping encoded as a dict with original data and type
"""
# encode via encoding the type and the mapping converted to dict
# separately and add a conversion specifier
convert_repr = {
"type": self.encode(type(obj)),
"repr": self.encode(dict(obj)),
}
return {"__convert__": convert_repr}
def _encode_iterable(self, obj) -> dict:
"""
Encode iterable
Parameters
----------
obj : collections.Iterable
object to be encoded
Returns
-------
dict
iterable encoded as a dict with original data and type
"""
# encode via converting the type and the mapping converted to list
# separately and add conversion specifier
convert_repr = {
"type": self.encode(type(obj)),
"repr": self.encode(list(obj)),
}
return {"__convert__": convert_repr}
def _encode_module(self, obj) -> dict:
"""
Encode module
Parameters
----------
obj : types.ModuleType
module to be encoded
Returns
-------
dict
module encoded as a dict
"""
# encode via name and module specifier
return {"__module__": obj.__module__}
def _encode_type(self, obj) -> dict:
"""
Encode class or type
Parameters
----------
obj :
class/type to be encoded
Returns
-------
dict
class/type encoded as a dict
"""
type_repr = {
"module": self.encode(obj.__module__),
"name": self.encode(obj.__name__),
}
return {"__type__": type_repr}
def _encode_function(self, obj) -> dict:
"""
Encode function
Parameters
----------
obj :
function to be encoded
Returns
-------
dict
function encoded as a dict
"""
function_repr = {
"module": self.encode(obj.__module__),
"name": self.encode(obj.__name__),
}
return {"__function__": function_repr}
def _encode_class(self, obj) -> dict:
"""
Encode arbitrary object
Parameters
----------
obj :
arbitrary object to be encoded
Returns
-------
dict
arbitrary object encoded as a dict
"""
try:
class_repr = {
"type": self.encode(type(obj)),
"dict": self.encode(obj.__dict__)
}
return {"__class__": class_repr}
except Exception as e:
logging.error(e)
class Decoder:
"""
Deocode arbitrary objects which were encoded by :class:`Encoder`.
"""
def __init__(self):
super().__init__()
self._decode_mapping = {
"__array__": self._decode_array,
"__convert__": self._decode_convert,
"__module__": self._decode_module,
"__type__": self._decode_type,
"__function__": self._decode_function,
"__class__": self._decode_class,
"__classargs__": self._decode_classargs,
"__functionargs__": self._decode_functionargs
}
def __call__(self, obj) -> typing.Any:
"""
Decode object
Parameters
----------
obj : Any
object to be decoded
Returns
-------
Any
decoded object
"""
return self.decode(obj)
def decode(self, obj) -> typing.Any:
"""
Decode object
Parameters
----------
obj : Any
object to be decoded
Returns
-------
Any
decoded object
"""
if isinstance(obj, (str, int, float)):
return obj
elif isinstance(obj, dict):
return self._decode_dict(obj)
elif isinstance(obj, list):
return self._decode_list(obj)
else:
return obj
def _decode_dict(self, obj) -> dict:
"""
Decode dict with respect to unique identifier keys.
Parameters
----------
obj : dict
dict to be decoded
Returns
-------
dict
decoded dict
"""
for key in obj.keys():
if key in self._decode_mapping:
return self._decode_mapping[key](obj[key])
else:
obj[key] = self.decode(obj[key])
return obj
def _decode_list(self, obj) -> list:
"""
Decode list
Parameters
----------
obj : list
list to be decoded
Returns
-------
Any
decoded list
"""
return [self.decode(_i) for _i in obj]
def _decode_array(self, obj) -> np.ndarray:
"""
Decode np.ndarray
Parameters
----------
obj : :class:`np.ndarray`
array to be decoded
Returns
-------
:class:`np.ndarray`
decoded array
"""
return np.array(self.decode(obj))
def _decode_convert(self, obj: dict) -> typing.Union[
typing.Iterable, typing.Mapping]:
"""
Decode mappings and iterables
Parameters
----------
obj : dict
dict to be decoded
Returns
-------
typing.Union[typing.Iterable, typing.Mapping]
decoded object
"""
# decode items in dict representation
convert_repr = self.decode(obj)
# create new object
return convert_repr["type"](convert_repr["repr"])
def _decode_module(self, obj: dict) -> types.ModuleType:
"""
Decode module
Parameters
----------
obj : dict
dict to be decoded
Returns
-------
ModuleType
decoded module
"""
return importlib.import_module(self.decode(obj))
def _decode_type(self, obj) -> typing.Any:
"""
Decode type
Parameters
----------
obj : dict
dict to be decoded
Returns
-------
Any
decoded type
"""
# decode items in dict representation
type_repr = self.decode(obj)
return getattr(importlib.import_module(type_repr["module"]),
type_repr["name"])
def _decode_function(self, obj: dict) -> typing.Union[
types.FunctionType, types.BuiltinFunctionType]:
"""
Decode function
Parameters
----------
obj : dict
dict to be decoded
Returns
-------
typing.Union[types.FunctionType, types.BuiltinFunctionType]
decoded function
"""
# decode items in dict representation
function_repr = self.decode(obj)
return getattr(importlib.import_module(function_repr["module"]),
function_repr["name"])
def _decode_class(self, obj: dict) -> typing.Any:
"""
Decode arbitrary object
Parameters
----------
obj : dict
dict to be decoded
Returns
-------
Any
decoded object
"""
class_repr = self.decode(obj)
cls_type = class_repr["type"]
cls_dict = class_repr["dict"]
# need to create a temporary type here (which is basically a raw
# object, since using object directly raises
# "TypeError: __class__ assignment only supported for heap types
# or ModuleType subclasses"
# After a bit of research this kind of class re-creation only
# seems to be possible, if the intermediate class was created in
# python (which is not True for the object type since this is part
# of Python's C Core)
tmp_cls = type("__tmp", (), {})
# create instance of temporary class
tmp_instance = tmp_cls()
# change class type
tmp_instance.__class__ = self.decode(cls_type)
# update attributes of class
tmp_instance.__dict__.update(self.decode(cls_dict))
return tmp_instance
def _decode_classargs(self, obj: dict) -> typing.Any:
"""
Create an object from specified class and arguments
Parameters
----------
obj : dict
dictionary which representes the object. Must include `module` and
`name`. Can optionally include `args` and `kwargs`.
Returns
-------
Any
decoded object
Raises
------
TypeError
arguments and name must be encoded as a dict
"""
classargs = self.decode(obj)
if not isinstance(classargs, dict):
raise TypeError("Arguments for classargs must be defined as dict.")
obj_cls = getattr(importlib.import_module(classargs["module"]),
classargs["name"])
args = classargs.get("args", [])
kwargs = classargs.get("kwargs", {})
return obj_cls(*args, **kwargs)
def _decode_functionargs(self, obj: dict) -> typing.Any:
"""
Create an function from specified function and arguments
Parameters
----------
obj : dict
dictionary which representes the function. Must include `module`
and `name`. Can optionally include `args` and `kwargs` which are
passed via `functool.partial`.
Returns
-------
Any
decoded function
Raises
------
TypeError
arguments and name must be encoded as a dict
"""
functionargs = self.decode(obj)
if not isinstance(functionargs, dict):
raise TypeError("Arguments for classargs must be defined as dict.")
fn = getattr(importlib.import_module(functionargs["module"]),
functionargs["name"])
args = functionargs.get("args", [])
kwargs = functionargs.get("kwargs", {})
return partial(fn, args, kwargs)
| StarcoderdataPython |
3305351 | # type: ignore
# -*- coding: utf-8 -*-
#
# ramstk.analyses.milhdk217f.models.inductor.py is part of the RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Inductor MIL-HDBK-217F Constants and Calculations Module."""
# Standard Library Imports
from math import exp
from typing import Dict, Union
PART_COUNT_LAMBDA_B = {
1: {
1: [
0.0035,
0.023,
0.049,
0.019,
0.065,
0.027,
0.037,
0.041,
0.052,
0.11,
0.0018,
0.053,
0.16,
2.3,
],
2: [
0.0071,
0.046,
0.097,
0.038,
0.13,
0.055,
0.073,
0.081,
0.10,
0.22,
0.035,
0.11,
0.31,
4.7,
],
3: [
0.023,
0.16,
0.35,
0.13,
0.45,
0.21,
0.27,
0.35,
0.45,
0.82,
0.011,
0.37,
1.2,
16.0,
],
4: [
0.028,
0.18,
0.39,
0.15,
0.52,
0.22,
0.29,
0.33,
0.42,
0.88,
0.015,
0.42,
1.2,
19.0,
],
},
2: {
1: [
0.0017,
0.0073,
0.023,
0.0091,
0.031,
0.011,
0.015,
0.016,
0.022,
0.052,
0.00083,
0.25,
0.073,
1.1,
],
2: [
0.0033,
0.015,
0.046,
0.018,
0.061,
0.022,
0.03,
0.033,
0.044,
0.10,
0.0017,
0.05,
0.15,
2.2,
],
},
}
PART_COUNT_PI_Q = [0.25, 1.0, 10.0]
PART_STRESS_PI_Q = {
1: {1: [1.5, 5.0], 2: [3.0, 7.5], 3: [8.0, 30.0], 4: [12.0, 30.0]},
2: [0.03, 0.1, 0.3, 1.0, 4.0, 20.0],
}
PI_E = {
1: [
1.0,
6.0,
12.0,
5.0,
16.0,
6.0,
8.0,
7.0,
9.0,
24.0,
0.5,
13.0,
34.0,
610.0,
],
2: [
1.0,
4.0,
12.0,
5.0,
16.0,
5.0,
7.0,
6.0,
8.0,
24.0,
0.5,
13.0,
34.0,
610.0,
],
}
REF_TEMPS = {
1: {1: 329.0, 2: 352.0, 3: 364.0, 4: 400.0, 5: 398.0, 6: 477.0},
2: {1: 329.0, 2: 352.0, 3: 364.0, 4: 409.0},
}
def calculate_part_count(**attributes: Dict[str, Union[float, int, str]]) -> float:
"""Wrap get_part_count_lambda_b().
This wrapper allows us to pass an attribute dict from a generic parts
count function.
:param attributes: the attributes for the connection being calculated.
:return: _base_hr; the parts count base hazard rates.
:rtype: float
"""
return get_part_count_lambda_b(
attributes["subcategory_id"],
attributes["environment_active_id"],
attributes["family_id"],
)
def calculate_part_stress(
**attributes: Dict[str, Union[float, int, str]]
) -> Dict[str, Union[float, int, str]]:
"""Calculate the part stress hazard rate for an inductive device.
This function calculates the MIL-HDBK-217F hazard rate using the part
stress method.
:return: attributes; the keyword argument (hardware attribute)
dictionary with updated values.
:rtype: dict
"""
attributes["piC"] = float(attributes["construction_id"])
attributes["piQ"] = get_part_stress_quality_factor(
attributes["subcategory_id"],
attributes["quality_id"],
attributes["family_id"],
)
_power_input = attributes["voltage_dc_operating"] * attributes["current_operating"]
if attributes["subcategory_id"] == 2 and attributes["specification_id"] == 2:
attributes["temperature_rise"] = get_temperature_rise_spec_sheet(
int(attributes["page_number"])
)
elif attributes["power_operating"] > 0.0 and attributes["area"] > 0.0:
attributes["temperature_rise"] = calculate_temperature_rise_power_loss_surface(
attributes["power_operating"], attributes["area"]
)
elif attributes["power_operating"] > 0.0 and attributes["weight"] > 0.0:
attributes["temperature_rise"] = calculate_temperature_rise_power_loss_weight(
attributes["power_operating"], attributes["weight"]
)
elif _power_input > 0.0 and attributes["weight"] > 0.0:
attributes["temperature_rise"] = calculate_temperature_rise_input_power_weight(
_power_input, attributes["weight"]
)
else:
attributes["temperature_rise"] = 0.0
attributes["temperature_hot_spot"] = calculate_hot_spot_temperature(
attributes["temperature_active"], attributes["temperature_rise"]
)
attributes["lambda_b"] = calculate_part_stress_lambda_b(
attributes["subcategory_id"],
attributes["insulation_id"],
attributes["temperature_hot_spot"],
)
attributes["hazard_rate_active"] = (
attributes["lambda_b"] * attributes["piQ"] * attributes["piE"]
)
if attributes["subcategory_id"] == 2:
attributes["hazard_rate_active"] = (
attributes["hazard_rate_active"] * attributes["piC"]
)
return attributes
def calculate_hot_spot_temperature(
temperature_active: float,
temperature_rise: float,
) -> float:
"""Calculate the coil or transformer hot spot temperature.
:return: _temperature_hot_spot; the calculated hot spot temperature.
:rtype: float
"""
return temperature_active + 1.1 * temperature_rise
def calculate_part_stress_lambda_b(
subcategory_id: int,
insulation_id: int,
temperature_hot_spot: float,
) -> float:
"""Calculate part stress base hazard rate (lambda b) from MIL-HDBK-217F.
This function calculates the MIL-HDBK-217F hazard rate using the parts
stress method.
:param subcategory_id: the subcategory ID for the inductive device being calculated.
:param insulation_id: the insulation class ID for the inductive device being
calculated.
:param temperature_hot_spot: the hot spot temperature for the inductive device
being calculated.
:return: _lambda_b; the calculated parts stress lambda_b.
:rtype: float
:raise: KeyError when passed an unknown subcategory ID or insulation ID.
"""
_dic_factors = {
1: {
1: [0.0018, 15.6],
2: [0.002, 14.0],
3: [0.0018, 8.7],
4: [0.002, 10.0],
5: [0.00125, 3.8],
6: [0.00159, 8.4],
},
2: {
1: [0.000335, 15.6],
2: [0.000379, 14.0],
3: [0.000319, 8.7],
4: [0.00035, 10.0],
},
}
_ref_temp = REF_TEMPS[subcategory_id][insulation_id]
_f0 = _dic_factors[subcategory_id][insulation_id][0]
_f1 = _dic_factors[subcategory_id][insulation_id][1]
return _f0 * exp(((temperature_hot_spot + 273.0) / _ref_temp) ** _f1)
def calculate_temperature_rise_input_power_weight(
power_input: float,
weight: float,
) -> float:
"""Calculate the temperature rise based on input power and xfmr weight.
.. attention:: input power must be calculated by the calling function from
voltage and current as it is not an attribute of an inductive device.
:param power_input: the input power in W.
:param weight: the weight of the xfmr in lbf.
:retur: _temperature_rise; the calculated temperature rise in C.
:rtype: float
:raise: ZeroDivisionError if passed a weight=0.0.
"""
return 2.1 * (power_input / weight**0.6766)
def calculate_temperature_rise_power_loss_surface(
power_operating: float,
area: float,
) -> float:
"""Calculate the temperature rise based on the power loss and surface area.
:param power_operating: the power loss in W.
:param area: the radiating surface area of the case in sq. inches.
:return: _temperature_rise; the calculated temperature rise in C.
:rtype: float
:raise: ZeroDivisionError if passed an area=0.0.
"""
return 125.0 * power_operating / area
def calculate_temperature_rise_power_loss_weight(
power_operating: float,
weight: float,
) -> float:
"""Calculate the temperature rise based on the power loss and xfmr weight.
:param power_operating: the power loss in W.
:param weight: the weight of the device in lbf.
:return: _temperature_rise; the calculated temperature rise in C.
:rtype: float
:raise: ZeroDivisionError if passed a weight=0.0.
"""
return 11.5 * (power_operating / weight**0.6766)
def get_part_count_lambda_b(
subcategory_id: int,
environment_active_id: int,
family_id: int,
) -> float:
"""Retrieve the parts count base hazard rate (lambda b) from MIL-HDBK-217F.
This function calculates the MIL-HDBK-217F hazard rate using the parts
count method.
This function calculates the MIL-HDBK-217F hazard rate using the parts
count method. The dictionary PART_COUNT_217F_LAMBDA_B contains the
MIL-HDBK-217F parts count base hazard rates. Keys are for
PART_COUNT_217F_LAMBDA_B are:
#. subcategory_id
#. environment_active_id
#. family id; if the inductor subcategory is NOT family dependent, then
the second key will be zero.
Current subcategory IDs are:
+----------------+-------------------------------+-----------------+
| Subcategory | Inductor | MIL-HDBK-217F |
| ID | Style | Section |
+================+===============================+=================+
| 1 | Transformer | 11.1 |
+----------------+-------------------------------+-----------------+
| 2 | Coil | 11.2 |
+----------------+-------------------------------+-----------------+
These keys return a list of base hazard rates. The hazard rate to use is
selected from the list depending on the active environment.
:param subcategory_id: the subcategory ID for the inductive device being calculated.
:param environment_active_id: the active operating environment ID for the inductive
device being calculated.
:param family_id: the family ID for the inductive device being calculated.
:return: _base_hr; the part count base hazard rate.
:rtype: float
:raise: KeyError if passed an unknown subcategory ID or family ID.
:raise: IndexError if passed an unknown active environment ID.
"""
return PART_COUNT_LAMBDA_B[subcategory_id][family_id][environment_active_id - 1]
def get_part_stress_quality_factor(
subcategory_id: int,
quality_id: int,
family_id: int,
) -> float:
"""Select the MIL-HDBK-217F quality factor for the inductor device.
:param subcategory_id: the subcategory identifier.
:param quality_id: the quality level identifier.
:param family_id: the device family identifier.
:return: _pi_q; the selected quality factor
:rtype: float
:raise: IndexError if passed an unknown quality ID.
:raise: KeyError if passed an unknown subcategory ID or family ID.
"""
return (
PART_STRESS_PI_Q[subcategory_id][family_id][quality_id - 1]
if subcategory_id == 1
else PART_STRESS_PI_Q[subcategory_id][quality_id - 1]
)
def get_temperature_rise_spec_sheet(page_number: int) -> float:
"""Retrieve the temperature rise based on the spec sheet from MIL-C-39010.
:param page_number: the spec sheet to retrieve the temperature rise
for.
:return: _temperature_rise; the spec sheet temperature rise.
:rtype: float
:raise: KeyError if an unknown spec sheet is passed.
"""
return {
1: 15.0,
2: 15.0,
3: 15.0,
4: 35.0,
5: 15.0,
6: 35.0,
7: 15.0,
8: 35.0,
9: 15.0,
10: 15.0,
11: 35.0,
12: 35.0,
13: 15.0,
14: 15.0,
}[page_number]
def set_default_values(
**attributes: Dict[str, Union[float, int, str]],
) -> Dict[str, Union[float, int, str]]:
"""Set the default value of various parameters.
:param attributes: the attribute dict for the inductove device being calculated.
:return: attributes; the updated attribute dict.
:rtype: dict
"""
if attributes["rated_temperature_max"] <= 0.0:
attributes["rated_temperature_max"] = _set_default_max_rated_temperature(
attributes["subcategory_id"]
)
if attributes["temperature_rise"] <= 0.0:
attributes["temperature_rise"] = _set_default_temperature_rise(
attributes["subcategory_id"],
attributes["family_id"],
)
return attributes
def _set_default_max_rated_temperature(subcategory_id: int) -> float:
"""Set the default maximum rated temperature.
:param subcategory_id: the subcategory ID of the inductive device with missing
defaults.
:return: _rated_temperature_max
:rtype: float
"""
return 130.0 if subcategory_id == 1 else 125.0
def _set_default_temperature_rise(
subcategory_id: int,
family_id: int,
) -> float:
"""Set the default temperature rise.
:param subcategory_id: the subcategory ID of the inductive device with missing
defaults.
:param family_id: the family ID of the inductive device with missing defaults.
:return: _temperature_rise
:rtype: float
"""
return 30.0 if subcategory_id == 1 and family_id == 3 else 10.0
| StarcoderdataPython |
1755430 | # get string input
Total_bill =int(raw_input("Enter the total amont: "))
# get integer input: int() convert string to integer
# float() convert string to floating number
tip_rate = float(raw_input("Enter tip rate (such as .15): "))
tip=(Total_bill*tip_rate)
total=int(Total_bill+tip)
# use string formatting to output result
print "You should pay: $%d" % (total)
| StarcoderdataPython |
1668604 | <gh_stars>0
from pytz import timezone
from helpers import query, generate_uuid
from ..queries.document import construct_get_file_for_document, construct_insert_document
from ..sudo_query import update as sudo_update
from .file import download_sh_doc_to_mu_file
from .exceptions import NoQueryResultsException
TIMEZONE = timezone('Europe/Brussels')
APPLICATION_GRAPH = "http://mu.semte.ch/application"
SIGNED_DOCS_GRAPH = "http://mu.semte.ch/graphs/organizations/kanselarij"
DOC_BASE_URI = "http://kanselarij.vo.data.gift/id/stukken/"
def get_file_for_document(document_uri):
query_string = construct_get_file_for_document(document_uri, file_mimetype="application/pdf")
file_results = query(query_string)['results']['bindings']
if not file_results:
raise NoQueryResultsException("No pdf-file found for document by uri <{}>".format(document_uri))
file = {key: value["value"] for key, value in file_results[0].items()}
return file
def download_sh_doc_to_kaleidos_doc(sh_package_id, sh_document_id, document_name):
virtual_file = download_sh_doc_to_mu_file(sh_package_id, sh_document_id)
doc = {
"uuid": generate_uuid(),
"name": document_name
}
doc["uri"] = DOC_BASE_URI + doc["uuid"]
ins_doc_query_string = construct_insert_document(document_name, virtual_file["uri"], SIGNED_DOCS_GRAPH)
sudo_update(ins_doc_query_string)
return doc
| StarcoderdataPython |
1622730 | import numpy as np
from toolkit.methods.pnpl import CvxPnPL, DLT, EPnPL, OPnPL
from toolkit.suites import parse_arguments, PnPLReal
from toolkit.datasets import Linemod, Occlusion
# reproducibility is a great thing
np.random.seed(0)
np.random.seed(42)
# parse console arguments
args = parse_arguments()
# Just a loading data scenario
if args.load:
session = PnPLReal.load(args.load)
session.print(args.print_mode)
quit()
# run something
session = PnPLReal(methods=[CvxPnPL, DLT, EPnPL, OPnPL])
session.run(data=[Linemod(args.datasets_prefix), Occlusion(args.datasets_prefix)])
# session.run(data=[Linemod(args.datasets_prefix)])
if args.save:
session.save(args.save)
session.print()
| StarcoderdataPython |
3266190 | from pygame.rect import Rect
from battle_city.collections.sliced_array import SlicedArray
from battle_city.monsters import Coin
import pytest
def test_init_empty():
array = SlicedArray(grid=32)
assert array._parts == {}
assert array._grid == 32
assert len(array) == 0
def test_init_filled():
coin_a = Coin(0, 0)
coin_b = Coin(0, 0)
array = SlicedArray([coin_a, coin_b], grid=16)
assert array._parts == {
(0, 0): [coin_a, coin_b],
}
assert len(array) == 2
def test_append():
coin_a = Coin(0, 0)
coin_b = Coin(16, 16)
array = SlicedArray([coin_a], grid=16)
array.append(coin_b)
assert array._parts == {
(0, 0): [coin_a],
(1, 1): [coin_b],
}
assert len(array) == 2
def test_remove():
coin_a = Coin(0, 0)
coin_b = Coin(16, 16)
array = SlicedArray([coin_a, coin_b], grid=16)
array.remove(coin_b)
assert array._parts == {
(0, 0): [coin_a],
(1, 1): [],
}
assert len(array) == 1
def test_remove_not_exists():
coin_a = Coin(0, 0)
coin_b = Coin(16, 16)
array = SlicedArray([coin_a], grid=16)
with pytest.raises(ValueError):
array.remove(coin_b)
assert array._parts == {
(0, 0): [coin_a],
(1, 1): [],
}
assert len(array) == 1
def test_find_nearest():
far_coins = {Coin(32, 32), Coin(32, 64), Coin(64, 64)}
nearest_coins = {Coin(0, 0), Coin(1, 1), Coin(16, 1), Coin(1, 16)}
array = SlicedArray(far_coins, grid=16)
array.multiple_append(nearest_coins)
result = array.find_nearest(Rect(18, 18, 13, 13))
# rect.bottom/rect.right is 31
assert set(result) == nearest_coins
def test_iter():
coin_a = Coin(0, 0)
coin_b = Coin(16, 16)
array = SlicedArray([coin_a, coin_b], grid=16)
assert set(array) == {coin_a, coin_b}
| StarcoderdataPython |
3232041 | import boto3
import click
def get_r_client():
return boto3.client('rekognition', region_name='eu-west-1')
def delete_collection(client,collection):
print(client.delete_collection(CollectionId=collection))
@click.command()
@click.option('--collection-id', help='Your picture file')
def main(collection_id):
c = get_r_client()
delete_collection(c,collection_id)
if __name__ == "__main__":
main() | StarcoderdataPython |
1761423 | <reponame>vtecftwy/unpackai
import streamlit as st
def make_predictions():
st.write("Hello")
st.button("useless button")
| StarcoderdataPython |
147624 | <filename>kerlescan/exceptions.py
class HTTPError(Exception):
def __init__(self, status_code, message=""):
"""
Raise this exception to return an http response indicating an error.
This is a boilerplate exception that was originally from Crane project.
:param status_code: HTTP status code. It's a good idea to get this straight
from httplib, such as httplib.NOT_FOUND
:type status_code: int
:param message: optional error message to be put in the response
body. If not supplied, the default message for the
status code will be used.
"""
super(HTTPError, self).__init__()
self.message = message
self.status_code = status_code
class ItemNotReturned(Exception):
def __init__(self, message):
"""
Raise this exception if an item was not returned by inventory service
"""
super(ItemNotReturned, self).__init__()
self.message = message
class ServiceError(Exception):
def __init__(self, message):
"""
Raise this exception if the inventory service is not reachable or does
not provide a valid response
"""
super(ServiceError, self).__init__()
self.message = message
class RBACDenied(Exception):
def __init__(self, message):
"""
Raise this exception if the inventory service reports that you do not
have rbac permission to access the service
"""
super(RBACDenied, self).__init__()
self.message = message
class UnparsableNEVRAError(RuntimeError):
def __init__(self, message):
"""
Raise this exception if we cannot parse a nevra
"""
super(RuntimeError, self).__init__()
self.message = message
| StarcoderdataPython |
138427 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: streamlit/proto/Video.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='streamlit/proto/Video.proto',
package='',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1bstreamlit/proto/Video.proto\"\x97\x01\n\x05Video\x12\x0b\n\x03url\x18\x06 \x01(\t\x12\x12\n\nstart_time\x18\x03 \x01(\x05\x12\x19\n\x04type\x18\x05 \x01(\x0e\x32\x0b.Video.Type\"2\n\x04Type\x12\n\n\x06UNUSED\x10\x00\x12\n\n\x06NATIVE\x10\x01\x12\x12\n\x0eYOUTUBE_IFRAME\x10\x02J\x04\x08\x01\x10\x02J\x04\x08\x02\x10\x03J\x04\x08\x04\x10\x05R\x06\x66ormatR\x04\x64\x61tab\x06proto3'
)
_VIDEO_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='Video.Type',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNUSED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='NATIVE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='YOUTUBE_IFRAME', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=101,
serialized_end=151,
)
_sym_db.RegisterEnumDescriptor(_VIDEO_TYPE)
_VIDEO = _descriptor.Descriptor(
name='Video',
full_name='Video',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='url', full_name='Video.url', index=0,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='start_time', full_name='Video.start_time', index=1,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='Video.type', index=2,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_VIDEO_TYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=32,
serialized_end=183,
)
_VIDEO.fields_by_name['type'].enum_type = _VIDEO_TYPE
_VIDEO_TYPE.containing_type = _VIDEO
DESCRIPTOR.message_types_by_name['Video'] = _VIDEO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Video = _reflection.GeneratedProtocolMessageType('Video', (_message.Message,), {
'DESCRIPTOR' : _VIDEO,
'__module__' : 'streamlit.proto.Video_pb2'
# @@protoc_insertion_point(class_scope:Video)
})
_sym_db.RegisterMessage(Video)
# @@protoc_insertion_point(module_scope)
| StarcoderdataPython |
1770570 | import asyncio
import dataclasses
@dataclasses.dataclass
class Command:
future: asyncio.Future = dataclasses.field(init=False, compare=False, hash=False)
def __post_init__(self):
try:
self.future = asyncio.get_running_loop().create_future()
except Exception:
self.future = None
@dataclasses.dataclass
class Event:
pass
| StarcoderdataPython |
4813077 | import numpy as np
def dump_nparray(array, filename):
#array_file = open(filename, 'w')
array_file = open(filename, 'w')
np.uint32(array.ndim).tofile(array_file)
#for d in xrange(array.ndim):
for d in range(array.ndim):
np.uint32(array.shape[d]).tofile(array_file)
array.tofile(array_file)
array_file.close()
def load_nparray(filename, array_dtype):
array_file = open(filename, 'r')
n_dim = np.fromfile(array_file, dtype = np.uint32, count = 1)[0]
shape = []
#for d in xrange(n_dim):
for d in range(n_dim):
shape.append(np.fromfile(array_file, dtype = np.uint32, count = 1)[0])
array_data = np.fromfile(array_file, dtype = array_dtype)
array_file.close()
return np.reshape(array_data, shape)
| StarcoderdataPython |
10985 | from django.contrib import admin
from .models import Image
@admin.register(Image)
class ImageAdmin(admin.ModelAdmin):
list_display = ('image', 'predict_covid', 'predict_no_findings', 'predict_pneumonia', 'created_at', 'updated_at', 'activated_at')
| StarcoderdataPython |
101167 | <filename>binary_mnist_pathnet.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tensorflow as tf
import input_data
import pathnet
import numpy as np
import time
FLAGS = None
def train():
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir,
one_hot=True,
fake_data=FLAGS.fake_data)
total_tr_data, total_tr_label = mnist.train.next_batch(mnist.train._num_examples);
# Gathering a1 Data
tr_data_a1=total_tr_data[(total_tr_label[:,FLAGS.a1]==1.0)];
for i in range(len(tr_data_a1)):
for j in range(len(tr_data_a1[0])):
rand_num=np.random.rand();
if(rand_num>=0.5):
tr_data_a1[i,j]=np.minimum(tr_data_a1[i,j]+rand_num,1.0);
# Gathering a2 Data
tr_data_a2=total_tr_data[(total_tr_label[:,FLAGS.a2]==1.0)];
for i in range(len(tr_data_a2)):
for j in range(len(tr_data_a2[0])):
rand_num=np.random.rand();
if(rand_num>=0.5):
tr_data_a2[i,j]=np.minimum(tr_data_a2[i,j]+rand_num,1.0);
# Gathering b1 Data
tr_data_b1=total_tr_data[(total_tr_label[:,FLAGS.b1]==1.0)];
for i in range(len(tr_data_b1)):
for j in range(len(tr_data_b1[0])):
rand_num=np.random.rand();
if(rand_num>=0.5):
tr_data_b1[i,j]=np.minimum(tr_data_b1[i,j]+rand_num,1.0);
# Gathering b2 Data
tr_data_b2=total_tr_data[(total_tr_label[:,FLAGS.b2]==1.0)];
for i in range(len(tr_data_b2)):
for j in range(len(tr_data_b2[0])):
rand_num=np.random.rand();
if(rand_num>=0.5):
tr_data_b2[i,j]=np.minimum(tr_data_b2[i,j]+rand_num,1.0);
tr_data1=np.append(tr_data_a1,tr_data_a2,axis=0);
tr_label1=np.zeros((len(tr_data1),2),dtype=float);
for i in range(len(tr_data1)):
if(i<len(tr_data_a1)):
tr_label1[i,0]=1.0;
else:
tr_label1[i,1]=1.0;
tr_data2=np.append(tr_data_b1,tr_data_b2,axis=0);
tr_label2=np.zeros((len(tr_data2),2),dtype=float);
for i in range(len(tr_data2)):
if(i<len(tr_data_b1)):
tr_label2[i,0]=1.0;
else:
tr_label2[i,1]=1.0;
## TASK 1
sess = tf.InteractiveSession()
# Input placeholders
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, [None, 784], name='x-input')
y_ = tf.placeholder(tf.float32, [None, 2], name='y-input')
with tf.name_scope('input_reshape'):
image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])
tf.summary.image('input', image_shaped_input, 2)
# geopath_examples
geopath=pathnet.geopath_initializer(FLAGS.L,FLAGS.M);
# fixed weights list
fixed_list=np.ones((FLAGS.L,FLAGS.M),dtype=str);
for i in range(FLAGS.L):
for j in range(FLAGS.M):
fixed_list[i,j]='0';
# Hidden Layers
weights_list=np.zeros((FLAGS.L,FLAGS.M),dtype=object);
biases_list=np.zeros((FLAGS.L,FLAGS.M),dtype=object);
for i in range(FLAGS.L):
for j in range(FLAGS.M):
if(i==0):
weights_list[i,j]=pathnet.module_weight_variable([784,FLAGS.filt]);
biases_list[i,j]=pathnet.module_bias_variable([FLAGS.filt]);
else:
weights_list[i,j]=pathnet.module_weight_variable([FLAGS.filt,FLAGS.filt]);
biases_list[i,j]=pathnet.module_bias_variable([FLAGS.filt]);
for i in range(FLAGS.L):
layer_modules_list=np.zeros(FLAGS.M,dtype=object);
for j in range(FLAGS.M):
if(i==0):
layer_modules_list[j]=pathnet.module(x, weights_list[i,j], biases_list[i,j], 'layer'+str(i+1)+"_"+str(j+1))*geopath[i,j];
else:
layer_modules_list[j]=pathnet.module2(j,net, weights_list[i,j], biases_list[i,j], 'layer'+str(i+1)+"_"+str(j+1))*geopath[i,j];
net=np.sum(layer_modules_list)/FLAGS.M;
#net=net/FLAGS.M;
# Output Layer
output_weights=pathnet.module_weight_variable([FLAGS.filt,2]);
output_biases=pathnet.module_bias_variable([2]);
y = pathnet.nn_layer(net,output_weights,output_biases,'output_layer');
# Cross Entropy
with tf.name_scope('cross_entropy'):
diff = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)
with tf.name_scope('total'):
cross_entropy = tf.reduce_mean(diff)
tf.summary.scalar('cross_entropy', cross_entropy)
# Need to learn variables
var_list_to_learn=[]+output_weights+output_biases;
for i in range(FLAGS.L):
for j in range(FLAGS.M):
if (fixed_list[i,j]=='0'):
var_list_to_learn+=weights_list[i,j]+biases_list[i,j];
# GradientDescent
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(cross_entropy,var_list=var_list_to_learn);
# Accuracy
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
# Merge all the summaries and write them out to /tmp/tensorflow/mnist/logs/mnist_with_summaries (by default)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train1', sess.graph)
test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test1')
tf.global_variables_initializer().run()
# Generating randomly geopath
geopath_set=np.zeros(FLAGS.candi,dtype=object);
for i in range(FLAGS.candi):
geopath_set[i]=pathnet.get_geopath(FLAGS.L,FLAGS.M,FLAGS.N);
# parameters placeholders and ops
var_update_ops=np.zeros(len(var_list_to_learn),dtype=object);
var_update_placeholders=np.zeros(len(var_list_to_learn),dtype=object);
for i in range(len(var_list_to_learn)):
var_update_placeholders[i]=tf.placeholder(var_list_to_learn[i].dtype,shape=var_list_to_learn[i].get_shape());
var_update_ops[i]=var_list_to_learn[i].assign(var_update_placeholders[i]);
# geopathes placeholders and ops
geopath_update_ops=np.zeros((len(geopath),len(geopath[0])),dtype=object);
geopath_update_placeholders=np.zeros((len(geopath),len(geopath[0])),dtype=object);
for i in range(len(geopath)):
for j in range(len(geopath[0])):
geopath_update_placeholders[i,j]=tf.placeholder(geopath[i,j].dtype,shape=geopath[i,j].get_shape());
geopath_update_ops[i,j]=geopath[i,j].assign(geopath_update_placeholders[i,j]);
acc_geo=np.zeros(FLAGS.B,dtype=float);
summary_geo=np.zeros(FLAGS.B,dtype=object);
for i in range(FLAGS.max_steps):
# Select Candidates to Tournament
compet_idx=range(FLAGS.candi);
np.random.shuffle(compet_idx);
compet_idx=compet_idx[:FLAGS.B];
# Learning & Evaluating
for j in range(len(compet_idx)):
# Shuffle the data
idx=range(len(tr_data1));
np.random.shuffle(idx);
tr_data1=tr_data1[idx];tr_label1=tr_label1[idx];
# Insert Candidate
pathnet.geopath_insert(sess,geopath_update_placeholders,geopath_update_ops,geopath_set[compet_idx[j]],FLAGS.L,FLAGS.M);
acc_geo_tr=0;
for k in range(FLAGS.T):
summary_geo_tr, _, acc_geo_tmp = sess.run([merged, train_step,accuracy], feed_dict={x:tr_data1[k*FLAGS.batch_num:(k+1)*FLAGS.batch_num,:],y_:tr_label1[k*FLAGS.batch_num:(k+1)*FLAGS.batch_num,:]});
acc_geo_tr+=acc_geo_tmp;
acc_geo[j]=acc_geo_tr/FLAGS.T;
summary_geo[j]=summary_geo_tr;
# Tournament
winner_idx=np.argmax(acc_geo);
acc=acc_geo[winner_idx];
summary=summary_geo[winner_idx];
# Copy and Mutation
for j in range(len(compet_idx)):
if(j!=winner_idx):
geopath_set[compet_idx[j]]=np.copy(geopath_set[compet_idx[winner_idx]]);
geopath_set[compet_idx[j]]=pathnet.mutation(geopath_set[compet_idx[j]],FLAGS.L,FLAGS.M,FLAGS.N);
train_writer.add_summary(summary, i);
print('Training Accuracy at step %s: %s' % (i, acc));
if(acc >= 0.99):
print('Learning Done!!');
print('Optimal Path is as followed.');
print(geopath_set[compet_idx[winner_idx]]);
task1_optimal_path=geopath_set[compet_idx[winner_idx]];
break;
"""
geopath_sum=np.zeros((len(geopath),len(geopath[0])),dtype=float);
for j in range(len(geopath_set)):
for k in range(len(geopath)):
for l in range(len(geopath[0])):
geopath_sum[k][l]+=geopath_set[j][k][l];
print(geopath_sum);
"""
iter_task1=i;
# Fix task1 Optimal Path
for i in range(FLAGS.L):
for j in range(FLAGS.M):
if(task1_optimal_path[i,j]==1.0):
fixed_list[i,j]='1';
# Get variables of fixed list
var_list_to_fix=[];
#var_list_to_fix=[]+output_weights+output_biases;
for i in range(FLAGS.L):
for j in range(FLAGS.M):
if(fixed_list[i,j]=='1'):
var_list_to_fix+=weights_list[i,j]+biases_list[i,j];
var_list_fix=pathnet.parameters_backup(var_list_to_fix);
"""
for i in range(FLAGS.L):
for j in range(FLAGS.M):
if(task1_optimal_path[i,j]==1.0):
fixed_list[i,j]='0';
"""
# parameters placeholders and ops
var_fix_ops=np.zeros(len(var_list_to_fix),dtype=object);
var_fix_placeholders=np.zeros(len(var_list_to_fix),dtype=object);
for i in range(len(var_list_to_fix)):
var_fix_placeholders[i]=tf.placeholder(var_list_to_fix[i].dtype,shape=var_list_to_fix[i].get_shape());
var_fix_ops[i]=var_list_to_fix[i].assign(var_fix_placeholders[i]);
## TASK 2
# Need to learn variables
var_list_to_learn=[]+output_weights+output_biases;
for i in range(FLAGS.L):
for j in range(FLAGS.M):
if (fixed_list[i,j]=='0'):
var_list_to_learn+=weights_list[i,j]+biases_list[i,j];
for i in range(FLAGS.L):
for j in range(FLAGS.M):
if(fixed_list[i,j]=='1'):
tmp=biases_list[i,j][0];
break;
break;
# Initialization
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train2', sess.graph)
test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test2')
tf.global_variables_initializer().run()
# Update fixed values
pathnet.parameters_update(sess,var_fix_placeholders,var_fix_ops,var_list_fix);
# GradientDescent
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(cross_entropy,var_list=var_list_to_learn);
# Generating randomly geopath
geopath_set=np.zeros(FLAGS.candi,dtype=object);
for i in range(FLAGS.candi):
geopath_set[i]=pathnet.get_geopath(FLAGS.L,FLAGS.M,FLAGS.N);
# parameters placeholders and ops
var_update_ops=np.zeros(len(var_list_to_learn),dtype=object);
var_update_placeholders=np.zeros(len(var_list_to_learn),dtype=object);
for i in range(len(var_list_to_learn)):
var_update_placeholders[i]=tf.placeholder(var_list_to_learn[i].dtype,shape=var_list_to_learn[i].get_shape());
var_update_ops[i]=var_list_to_learn[i].assign(var_update_placeholders[i]);
acc_geo=np.zeros(FLAGS.B,dtype=float);
summary_geo=np.zeros(FLAGS.B,dtype=object);
for i in range(FLAGS.max_steps):
# Select Candidates to Tournament
compet_idx=range(FLAGS.candi);
np.random.shuffle(compet_idx);
compet_idx=compet_idx[:FLAGS.B];
# Learning & Evaluating
for j in range(len(compet_idx)):
# Shuffle the data
idx=range(len(tr_data2));
np.random.shuffle(idx);
tr_data2=tr_data2[idx];tr_label2=tr_label2[idx];
geopath_insert=np.copy(geopath_set[compet_idx[j]]);
for l in range(FLAGS.L):
for m in range(FLAGS.M):
if(fixed_list[l,m]=='1'):
geopath_insert[l,m]=1.0;
# Insert Candidate
pathnet.geopath_insert(sess,geopath_update_placeholders,geopath_update_ops,geopath_insert,FLAGS.L,FLAGS.M);
acc_geo_tr=0;
for k in range(FLAGS.T):
summary_geo_tr, _, acc_geo_tmp = sess.run([merged, train_step,accuracy], feed_dict={x:tr_data2[k*FLAGS.batch_num:(k+1)*FLAGS.batch_num,:],y_:tr_label2[k*FLAGS.batch_num:(k+1)*FLAGS.batch_num,:]});
acc_geo_tr+=acc_geo_tmp;
acc_geo[j]=acc_geo_tr/FLAGS.T;
summary_geo[j]=summary_geo_tr;
# Tournament
winner_idx=np.argmax(acc_geo);
acc=acc_geo[winner_idx];
summary=summary_geo[winner_idx];
# Copy and Mutation
for j in range(len(compet_idx)):
if(j!=winner_idx):
geopath_set[compet_idx[j]]=np.copy(geopath_set[compet_idx[winner_idx]]);
geopath_set[compet_idx[j]]=pathnet.mutation(geopath_set[compet_idx[j]],FLAGS.L,FLAGS.M,FLAGS.N);
train_writer.add_summary(summary, i);
print('Training Accuracy at step %s: %s' % (i, acc));
if(acc >= 0.99):
print('Learning Done!!');
print('Optimal Path is as followed.');
print(geopath_set[compet_idx[winner_idx]]);
task2_optimal_path=geopath_set[compet_idx[winner_idx]];
break;
"""
geopath_sum=np.zeros((len(geopath),len(geopath[0])),dtype=float);
for j in range(len(geopath_set)):
for k in range(len(geopath)):
for l in range(len(geopath[0])):
geopath_sum[k][l]+=geopath_set[j][k][l];
print(geopath_sum);
"""
iter_task2=i;
overlap=0;
for i in range(len(task1_optimal_path)):
for j in range(len(task1_optimal_path[0])):
if(task1_optimal_path[i,j]==task2_optimal_path[i,j])&(task1_optimal_path[i,j]==1.0):
overlap+=1;
print("Entire Iter:"+str(iter_task1+iter_task2)+",TASK1:"+str(iter_task1)+",TASK2:"+str(iter_task2)+",Overlap:"+str(overlap));
train_writer.close()
test_writer.close()
def main(_):
FLAGS.log_dir+=str(int(time.time()));
if tf.gfile.Exists(FLAGS.log_dir):
tf.gfile.DeleteRecursively(FLAGS.log_dir)
tf.gfile.MakeDirs(FLAGS.log_dir)
train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--fake_data', nargs='?', const=True, type=bool,
default=False,
help='If true, uses fake data for unit testing.')
parser.add_argument('--learning_rate', type=float, default=0.05,
help='Initial learning rate')
parser.add_argument('--max_steps', type=int, default=10000,
help='Number of steps to run trainer.')
parser.add_argument('--dropout', type=float, default=0.9,
help='Keep probability for training dropout.')
parser.add_argument('--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
parser.add_argument('--log_dir', type=str, default='/tmp/tensorflow/pathnet/binary_mnist/pathnet/1_3_1_2',
help='Summaries log directry')
parser.add_argument('--M', type=int, default=10,
help='The Number of Modules per Layer')
parser.add_argument('--L', type=int, default=3,
help='The Number of Layers')
parser.add_argument('--N', type=int, default=3,
help='The Number of Selected Modules per Layer')
parser.add_argument('--T', type=int, default=50,
help='The Number of epoch per each geopath')
parser.add_argument('--batch_num', type=int, default=16,
help='The Number of batches per each geopath')
parser.add_argument('--filt', type=int, default=20,
help='The Number of Filters per Module')
parser.add_argument('--candi', type=int, default=20,
help='The Number of Candidates of geopath')
parser.add_argument('--B', type=int, default=2,
help='The Number of Candidates for each competition')
parser.add_argument('--a1', type=int, default=1,
help='The first class of task1')
parser.add_argument('--a2', type=int, default=3,
help='The second class of task1')
parser.add_argument('--b1', type=int, default=1,
help='The first class of task2')
parser.add_argument('--b2', type=int, default=2,
help='The second class of task2')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| StarcoderdataPython |
4816046 | name = "mpesa" | StarcoderdataPython |
3224567 | <filename>Plots/plot_timing_histo.py
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import sys
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
params = {'legend.fontsize': 'x-large',
'figure.figsize': (13, 6),
'figure.autolayout': True,
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
pylab.rcParams.update(params)
month="Aug2021"
def get_timing(lang, test):
result = None
with open("../Results/timing_results_"+month+".txt", "r") as fid:
lines = fid.readlines()
for line in lines:
if (lang in line) and (test in line):
a, b, result = line.split(",")
break
return result
languages = ["C", "Fortran", "Python", "Numba", "Julia", "IDL", "Matlab", "R", "Java"]
#languages = ["C", "Fortran", "Python", "Numba", "Julia", "IDL", "Matlab", "R", "Java", "Scala"]
test_cases = ["copy_matrix", "look_and_say", "iterative_fibonacci", "recursive_fibonacci", "matrix_multiplication", "evaluate_functions", "belief_propagation", "markov_chain", "laplace_equation", "munchauser_number", "pernicious_number"]
num_lang = len(languages)
num_test = len(test_cases)
A = np.empty((num_lang,num_test,))
B = np.zeros((num_lang,num_test,))
A[:] = np.nan
i = 0
for lang in languages:
j = 0
for test in test_cases:
result = get_timing(lang, test)
if result:
A[i,j] = float(result)
j += 1
i += 1
A = np.ma.masked_invalid(A)
for j in range(num_test):
if A[0,j] == 0.0:
A[:,j] = np.exp(A[:,j])
else:
coef = A[0,j]
A[:,j] = A[:,j] / coef
data_sets = [A[j,:] for j in range(num_lang)]
colors = ["blue", "orange", "green", "yellow", "purple", "red", "pink", "olive", "brown", "gray", "lime"]
fig, ax = plt.subplots(figsize=(15.0, 7.0))
pos = np.arange(num_test)
bar_width = 0.085
i = 0
for a in data_sets:
ax.bar(pos + (i+1)*bar_width, a, bar_width, color=colors[i])
i += 1
plt.yscale('log')#, nonposy='clip')
ax.yaxis.grid()
#plt.legend(loc='best')
# Shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(languages, loc='center left', bbox_to_anchor=(1, 0.5))
#plt.legend(languages, loc='upper center')
ax.set_xticks(pos)
ax.set_xticklabels(test_cases, rotation=45)
plt.savefig("fig_languages_histo_"+month+".png", bbox_inches = "tight")
plt.show()
| StarcoderdataPython |
1678536 | <gh_stars>1-10
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class EggheadCourseIE(InfoExtractor):
IE_DESC = 'egghead.io course'
IE_NAME = 'egghead:course'
_VALID_URL = r'https://egghead\.io/courses/(?P<id>[a-zA-Z_0-9-]+)'
_TEST = {
'url': 'https://egghead.io/courses/professor-frisby-introduces-composable-functional-javascript',
'playlist_count': 29,
'info_dict': {
'id': 'professor-frisby-introduces-composable-functional-javascript',
'title': '<NAME> Introduces Composable Functional JavaScript',
'description': 're:(?s)^This course teaches the ubiquitous.*You\'ll start composing functionality before you know it.$',
},
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title = self._html_search_regex(r'<h1 class="title">([^<]+)</h1>', webpage, 'title')
ul = self._search_regex(r'(?s)<ul class="series-lessons-list">(.*?)</ul>', webpage, 'session list')
found = re.findall(r'(?s)<a class="[^"]*"\s*href="([^"]+)">\s*<li class="item', ul)
entries = [self.url_result(m) for m in found]
return {
'_type': 'playlist',
'id': playlist_id,
'title': title,
'description': self._og_search_description(webpage),
'entries': entries,
}
| StarcoderdataPython |
36256 | <gh_stars>1-10
import numpy as np
import math
from pandas import DataFrame
def min_rw_index(prices, start, end):
"""
Searches min price index inside window [start,end]
:param prices: in list format
:param start: window start index
:param end: window end index
:return:
"""
matching_index = start
for i in range(start, end + 1):
if prices[matching_index] > prices[i]:
matching_index = i
return matching_index
def max_rw_index(prices, start, end):
"""
Searches min price index inside window [start,end]
:param prices: in list format
:param start: window start index
:param end: window end index
:return:
"""
matching_index = start
for i in range(start, end + 1):
if prices[matching_index] < prices[i]:
matching_index = i
return matching_index
def get_closest_resistance(values_and_indices, price, current_index):
values = values_and_indices[0]
indices = values_and_indices[1]
value = 10000000
resistance_index = -1
for i in range(len(values)):
avg = np.array(values[i]).mean()
if price <= avg <= value and min(indices[i]) <= current_index:
value = avg
resistance_index = i
return value, resistance_index
def get_closest_support(values_and_indices, price, current_index):
values = values_and_indices[0]
indices = values_and_indices[1]
value = -10000000
support_index = -1
for i in range(len(values)):
avg = np.array(values[i]).mean()
if value <= avg <= price and min(indices[i]) <= current_index:
value = avg
support_index = i
return value, support_index
class TA:
data: DataFrame
def __init__(self, data):
self.data = data.reset_index(drop=False)
def run(self, callback, user_data):
close_prices = self.data["close"].to_list()
for i in range(len(close_prices)):
callback(self, i, close_prices[i], user_data)
# PATTERNS
def candle_directions(self, tail=0):
if tail == 0:
tail = len(self.data['close'])
close_prices = self.data['close'].tail(tail).to_list()
open_prices = self.data['open'].tail(tail).to_list()
colors = tail * [1]
for i in range(tail):
if close_prices[i] < open_prices[i]:
colors[i] = -1
return colors
def reversals(self):
close_prices = self.data['close'].to_list()
open_prices = self.data['open'].to_list()
r = len(close_prices) * [0]
for i in range(2, len(close_prices)):
min_0 = min([open_prices[i - 2], close_prices[i - 2]])
min_1 = min([open_prices[i - 1], close_prices[i - 1]])
min_2 = min([open_prices[i - 0], close_prices[i - 0]])
if min_1 < min_0 and min_1 < min_2:
r[i] = -1
continue
max_0 = min([open_prices[i - 2], close_prices[i - 2]])
max_1 = min([open_prices[i - 1], close_prices[i - 1]])
max_2 = min([open_prices[i - 0], close_prices[i - 0]])
if max_1 > max_0 and max_1 > max_2:
r[i] = 1
return r
# INDICATORS
def resistance_lines(self, resistance_type, threshold=0.02):
"""
Support occurs when falling prices stop, change direction, and begin to
rise. Support is often viewed as a “floor” which is supporting, or
holding up, prices.
Resistance is a price level where rising prices stop, change direction,
and begin to fall. Resistance is often viewed as a “ceiling” keeping
prices from rising higher.
If price breaks support or resistance, the price often continues to the
next level of support or resistance. Support and resistance levels are
not always exact; they are usually a zone covering a small range of prices
so levels can be breached, or pierced, without necessarily being broken.
As a result, support/resistance levels help identify possible points where
price may change directions.
:param resistance_type: 's' for support lines, 'r' for resistance lines
:param threshold:
:return:
"""
values, ids = [], []
open_prices = self.data["open"].to_list()
close_prices = self.data["close"].to_list()
for i in range(1, len(open_prices) - 1):
# find minima/maxima
t_0 = min(open_prices[i - 1], close_prices[i - 1])
t_1 = min(open_prices[i + 0], close_prices[i + 0])
t_2 = min(open_prices[i + 1], close_prices[i + 1])
if resistance_type == 'r':
t_0 = max(open_prices[i - 1], close_prices[i - 1])
t_1 = max(open_prices[i + 0], close_prices[i + 0])
t_2 = max(open_prices[i + 1], close_prices[i + 1])
check = t_1 >= t_0 and t_1 >= t_2
if resistance_type == "s":
check = t_1 <= t_0 and t_1 <= t_2
if check:
# check if this one belongs to past support points
found = False
for j in range(len(values)):
if abs((np.mean(values[j]) - t_1) / t_1) <= threshold:
values[j].append(t_1)
ids[j].append(i)
found = True
break
if not found:
values.append([t_1])
ids.append([i])
return values, ids
def rsi(self, initial_size=14, window_size=14):
"""
The relative strength index (RSI) is most commonly used to indicate
temporarily overbought or oversold conditions in a market.
A market is overbought when the RSI value is over 70 and indicates
oversold conditions when RSI readings are under 30.
A weakness of the RSI is that sudden, sharp price movements can cause
it to spike repeatedly up or down, and, thus, it is prone to giving
false signals. However, if those spikes or falls show a trading
confirmation when compared with other signals, it could signal an entry
or exit point.
:param initial_size:
:param window_size:
:return:
"""
price = self.data["close"].to_list()
gain = len(price) * [0]
loss = len(price) * [0]
for i in range(1, len(price)):
if price[i] > price[i - 1]:
gain[i] = price[i] - price[i - 1]
else:
loss[i] = price[i - 1] - price[i]
average_gain = np.mean(gain[:initial_size + 1])
average_loss = np.mean(loss[:initial_size + 1])
rsi = len(price) * [50]
for i in range(initial_size, len(price)):
average_gain = (average_gain * (window_size - 1) + gain[i]) / window_size
average_loss = (average_loss * (window_size - 1) + loss[i]) / window_size
rs = average_gain
if average_loss != 0:
rs = rs / average_loss
rsi[i] = 100 - 100 / (1 + rs)
return rsi
def bollinger_bands(self, window_size=10, num_of_std=5):
"""
Bollinger Bands are a form of technical analysis that traders
use to plot trend lines that are two standard deviations away
from the simple moving average price of a security. The goal is
to help a trader know when to enter or exit a position by identifying
when an asset has been overbought or oversold.
:param window_size:
:param num_of_std:
:return:
"""
price = self.data["close"]
rolling_mean = price.rolling(window=window_size).mean()
rolling_std = price.rolling(window=window_size).std()
upper_band = rolling_mean + (rolling_std * num_of_std)
lower_band = rolling_mean - (rolling_std * num_of_std)
return rolling_mean, upper_band, lower_band
def regional_locals(self, window_radius=15):
"""
Compute minima and maxima points within a rolling window
:param window_radius: rolling window half size (full size is 2w+1)
:return:
"""
prices = self.data["close"]
maxima = []
minima = []
for i in range(window_radius, len(prices) - window_radius):
if max_rw_index(prices, i - window_radius, i + window_radius) == i:
maxima.append(i)
elif min_rw_index(prices, i - window_radius, i + window_radius) == i:
minima.append(i)
return maxima, minima
def sma(self, window):
"""
Computes the Simple Moving Average given a rolling window size
:param window: window size
:return:
"""
prices = self.data["close"]
return prices.rolling(window=window).mean()
def ema(self, window):
"""
Computes the Exponential Moving Average
:param window:
:return:
"""
prices = self.data["close"]
# return prices.ewm(span=window).mean()
sma_w = self.sma(window)
mod_price = prices.copy()
mod_price.iloc[0:window] = sma_w[0:window]
return mod_price.ewm(span=window, adjust=False).mean()
def mac(self, short_window, long_window, average_type="sma"):
"""
Compute Moving Averages Crossovers
:param short_window:
:param long_window:
:param average_type:
:return:
"""
short = np.array(self.sma(short_window))
long = np.array(self.sma(long_window))
mac = short - long
signal = len(short) * [0]
for i in range(long_window + 1, len(signal)):
if mac[i] > 0 and mac[i - 1] < 0:
signal[i] = 1
elif mac[i] < 0 and mac[i - 1] > 0:
signal[i] = -1
return mac, signal
# MEASURES
def pct_change(self, window_size=1):
prices = self.data["close"]
return prices.pct_change(periods=window_size)
def max_in_range(self, start_index: int = 0, end_index: int = -1):
if end_index < 0:
end_index = len(self.data) - 1
prices = self.data["close"].to_list()
i = max_rw_index(prices, start_index, end_index)
return prices[i], i - start_index
def max_pct_in_range(self, start_index: int = 0, end_index: int = -1):
if end_index < 0:
end_index = len(self.data) - 1
prices = self.data["close"].to_list()
i = max_rw_index(prices, start_index, end_index)
return (prices[i] - prices[start_index]) / prices[start_index] * 100.0, i - start_index
def single_pct_change(self, start_index: int = 0, end_index: int = -1):
if end_index < 0:
end_index = len(self.data) - 1
prices = self.data["close"].to_list()
return (prices[end_index] - prices[start_index]) / prices[start_index] * 100.0
# SIMPLIFICATION
def pips(self, n=5, distance_type="euclidean"):
"""
Finds n Perceptually Important Points(PIPs)
The algorithm starts by characterizing the first and the
last observation as the first two PIPs. Subsequently, it
calculates the distance between all remaining observations
and the two initial PIPs and signifies as the third PIP the
one with the maximum distance.
:param n: total number of pips
:param distance_type: distance type between pips: "euclidean",
"perpendicular" or "vertical"
:return:
"""
def pip_euclidean_distance(xi, xt, xtt, pi, pt, ptt):
return math.sqrt((xt - xi) ** 2 + (pt - pi) ** 2) + math.sqrt((xtt - xi) ** 2 + (ptt - pi) ** 2)
def pip_perpendicular_distance(xi, xt, xtt, pi, pt, ptt):
s = (ptt - pt) / (xtt - xt)
c = pt - xt * s
return abs(s * xi + c - pi) / math.sqrt(s * s + 1)
def pip_vertical_distance(xi, xt, xtt, pi, pt, ptt):
s = (ptt - pt) / (xtt - xt)
c = pt - xt * s
return abs(s * xi + c - pi)
prices = self.data["close"]
pips = [0, len(prices) - 1]
# function to find pip that maximizes the distance between left and right
def pip(left, right):
maximum_distance = 0
maximum_distance_index = 0
for i in range(left + 1, right):
dist = pip_euclidean_distance(i, left, right, prices[i], prices[left], prices[right])
if dist > maximum_distance:
maximum_distance = dist
maximum_distance_index = i
return maximum_distance_index, maximum_distance
# generate pips
while len(pips) < n:
m = 0
mi = 0
for i in range(len(pips) - 1):
if pips[i + 1] - 1 > pips[i]:
mmi, mm = pip(pips[i], pips[i + 1])
if mm > m:
m = mm
mi = mmi
pips.append(mi)
pips.sort()
return pips
def decimate(self, k=18, t=0.5):
"""
:param k:
:param t:
:return:
"""
prices = self.data["close"]
def merge_cost(s1, s2):
cost = 0
A = prices[int(s1[0])]
B = prices[int(s2[1])]
for i in range(s1[0], s2[1] + 1):
a = (i - s1[0]) / (s2[1] - s1[0])
cost = cost + (prices[i] - (a * A + (1 - a) * B)) ** 2
return cost
segments = []
for i in range(int(len(prices) / 2)):
segments.append([i * 2, i * 2 + 1])
costs = (len(segments) - 1) * [0]
for i in range(len(costs)):
costs[i] = merge_cost(segments[i], segments[i + 1])
while len(segments) > len(prices) / k:
minI = min_rw_index(costs, 0, len(costs) - 1)
segments[minI][1] = segments[minI + 1][1]
del segments[minI + 1]
if minI > 0:
costs[minI - 1] = merge_cost(segments[minI - 1], segments[minI])
if len(segments) > minI + 1:
costs[minI] = merge_cost(segments[minI], segments[minI + 1])
if len(costs) - 1 > minI:
del costs[minI + 1]
else:
del costs[minI]
s = []
for i in range(len(segments)):
s.append(segments[i])
if i < len(segments) - 1:
s.append([segments[i][1], segments[i + 1][0]])
changed = True
while changed:
changed = False
# merge trends
for i in range(len(s) - 1):
if (prices[s[i][0]] - prices[s[i][1]]) * (prices[s[i + 1][0]] - prices[s[i + 1][1]]) >= 0:
s[i][1] = s[i + 1][1]
del s[i + 1]
changed = True
break
# fix extremes
for i in range(len(s) - 1):
if prices[s[i][0]] - prices[s[i][1]] < 0:
s[i][1] = s[i + 1][0] = max_rw_index(prices, s[i][0], s[i + 1][1])
else:
s[i][1] = s[i + 1][0] = min_rw_index(prices, s[i][0], s[i + 1][1])
# remove small variation segments
for i in range(len(s)):
if abs(prices[s[i][0]] - prices[s[i][1]]) < t:
changed = True
if i == 0:
s[i + 1][0] = s[i][0]
elif i == len(s) - 1:
s[i - 1][1] = s[i][1]
else:
s[i - 1][1] = s[i + 1][0]
del s[i]
break
l = []
for k in s:
l.append(k[0])
l.append(s[-1][1])
return l
# TODO
def hsars(self, x=0.05, s=2):
"""
Horizontal Support And Resistance levelS
Input are regional locals
:param x: desired percentage that will give the bounds for the HSARs
:param s:
:return:
"""
prices = self.data["close"]
lower_bound = min(prices) / (1 + x / 2)
upper_bound = max(prices) * (1 + x / 2)
# approximate number of bins
approx_n = math.log(upper_bound / lower_bound) / math.log(1 + x)
# actual number of bins
n = int(approx_n + 0.5)
# actual percentage for each bin
actual_pct = (abs(upper_bound / lower_bound)) ** (1 / n) - 1
bounds = []
for i in range(n + 1):
bounds.append((lower_bound * (1 + actual_pct) * i))
freq = len(bounds) * [0]
for p in prices:
for i in range(len(bounds) - 1):
if bounds[i] <= p < bounds[i + 1]:
freq[i] = freq[i] + 1
sar = []
for i in range(len(freq)):
if freq[i] >= s:
sar.append([bounds[i], bounds[i + 1]])
return sar
| StarcoderdataPython |
1665943 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
tmp = ListNode(None)
tmp.next = head
start, end = tmp, tmp
for _ in range(n): start = start.next
while start.next:
start, end = start.next, end.next
end.next = end.next.next
return tmp.next
'''
加一个头结点,并使用双指针start和end。
start先向前移动n个节点,然后start和end同时移动,当start.next==None时,此时end.next指的就是需要删除的节点。
''' | StarcoderdataPython |
121823 | <gh_stars>1-10
import unittest
import numpy as np
from matchernet.fn import LinearFn, LinearFnXU
class TestLinearFn(unittest.TestCase):
def setUp(self):
self.x = np.array([10, 20, 30], dtype=np.float32)
self.test_A_patterns = [
np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32),
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32),
np.array([[1, 2, np.inf]], dtype=np.float32)
]
def test_linear_dot(self):
for A in self.test_A_patterns:
f = LinearFn(A)
expected_dot = np.dot(A, self.x)
with self.subTest():
actual_dot = f.value(self.x)
self.assertIsNone(np.testing.assert_array_equal(expected_dot, actual_dot))
def test_linear_x(self):
for A in self.test_A_patterns:
f = LinearFn(A)
expected_dx = A
with self.subTest():
actual_dx = f.x(self.x)
self.assertIsNone(np.testing.assert_array_equal(expected_dx, actual_dx))
class TestLinearFnXU(unittest.TestCase):
def setUp(self):
self.x = np.array([10, 20, 30], dtype=np.float32)
self.u = np.array([5, 15, 25], dtype=np.float32)
self.test_A_patterns = [
np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32),
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32),
np.array([[1, 2, np.inf]], dtype=np.float32)
]
self.test_B_patterns = [
np.array([[6, 5, 4], [3, 2, 1]], dtype=np.float32),
np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=np.float32),
np.array([[np.inf, 2, 3]], dtype=np.float32)
]
def test_linear_XU_dot(self):
for A, B in zip(self.test_A_patterns, self.test_B_patterns):
f = LinearFnXU(A, B)
expected_dot = np.dot(A, self.x) + np.dot(B, self.u)
with self.subTest():
actual_dot = f.value(self.x, self.u)
self.assertIsNone(np.testing.assert_array_equal(expected_dot, actual_dot))
def test_linear_XU_dx(self):
for A, B in zip(self.test_A_patterns, self.test_B_patterns):
f = LinearFnXU(A, B)
expected_dx = A
with self.subTest():
actual_dx = f.x(self.x, self.u)
self.assertIsNone(np.testing.assert_array_equal(expected_dx, actual_dx))
def test_linear_XU_du(self):
for A, B in zip(self.test_A_patterns, self.test_B_patterns):
f = LinearFnXU(A, B)
expected_du = B
with self.subTest():
actual_du = f.u(self.x, self.u)
self.assertIsNone(np.testing.assert_array_equal(expected_du, actual_du))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3278029 | <gh_stars>0
from django.db import models
from django.db.models.fields.related import ForeignKey, ManyToManyField
from django_countries.fields import CountryField
from django.contrib.auth.models import User
class League(models.Model):
name = models.CharField(max_length=255)
rating = models.IntegerField(default=0)
image_url = models.CharField(max_length=255)
def __str__(self):
return self.name
class Team(models.Model):
name = models.CharField(max_length=255)
rating = models.IntegerField(default=0)
image_url = models.CharField(max_length=255)
country = CountryField()
league = models.ForeignKey(
League, on_delete=models.SET_NULL, blank=True, null=True)
def __str__(self):
return self.name
class Chairman(models.Model):
name = models.CharField(max_length=255)
rating = models.IntegerField(default=0)
team = models.OneToOneField(
Team, on_delete=models.SET_NULL, blank=True, null=True)
image_url = models.CharField(max_length=255)
def __str__(self):
return self.name
class Manager(models.Model):
name = models.CharField(max_length=255)
rating = models.IntegerField(default=0)
team = models.OneToOneField(
Team, on_delete=models.SET_NULL, blank=True, null=True)
image_url = models.CharField(max_length=255)
def __str__(self):
return self.name
class Scout(models.Model):
name = models.CharField(max_length=255)
country = CountryField()
rating = models.IntegerField(default=0)
image_url = models.CharField(max_length=255)
hiring_fee = models.IntegerField(default=0)
transfer_fee = models.IntegerField(default=0)
def __str__(self):
return self.name
class Coach(models.Model):
CHOACH_TYPE = (
('M', 'MENTAL'),
('P', 'PHYSICAL'),
)
name = models.CharField(max_length=255)
type = models.CharField(max_length=2, choices=CHOACH_TYPE)
reputation = models.IntegerField(default=0)
country = CountryField()
flag_url = models.CharField(max_length=255)
image_url = models.CharField(max_length=255)
wage = models.IntegerField(default=0)
transfer_fee = models.IntegerField(default=0)
def __str__(self):
return self.name
class Office(models.Model):
name = models.CharField(max_length=255)
image_url = models.CharField(max_length=255)
purchase_cost = models.IntegerField(default=0)
running_cost = models.IntegerField(default=0)
player_capacity = models.IntegerField(default=0)
level = models.IntegerField(default=0)
rating = models.IntegerField(default=0)
def __str__(self):
return self.name
class Player(models.Model):
name = models.CharField(max_length=255)
image_url = models.CharField(max_length=255)
age = models.IntegerField(default=0)
current_ability = models.IntegerField(default=0)
potential_ability = models.IntegerField(default=0)
overall_ability = models.IntegerField(default=0)
performance = models.IntegerField(default=0)
happiness = models.IntegerField(default=0)
agency = models.CharField(max_length=255)
current_club = ForeignKey(
Team, on_delete=models.SET_NULL, blank=True, null=True)
interested_club = ManyToManyField(
Team, related_name="%(app_label)s_%(class)s_related")
current_contract = models.IntegerField(default=0)
value = models.IntegerField(default=0)
salary = models.IntegerField(default=0)
signing_fee= models.IntegerField(default=0)
bonus = models.IntegerField(default=0)
wins = models.IntegerField(default=0)
country = CountryField()
flag_url = models.CharField(max_length=255)
def __str__(self):
return self.name
class MediaCompany(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Journalist(models.Model):
name = models.CharField(max_length=255)
country = CountryField()
company = models.OneToOneField(MediaCompany, on_delete=models.CASCADE)
image_url = models.CharField(max_length=255)
def __str__(self):
return self.name
class News(models.Model):
NEWS_TYPE = (
('G', 'GOOD'),
('B', 'BAD'),
)
title = models.CharField(max_length=255)
description = models.TextField()
journalist = ForeignKey(Journalist, on_delete=models.CASCADE)
news_type = models.CharField(max_length=2, choices=NEWS_TYPE)
player = ForeignKey(Player, on_delete=models.CASCADE)
news_date = models.DateField(auto_now_add=True)
def __str__(self):
return self.title
class UserCurrentTrack(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
player_list = models.ManyToManyField(Player)
current_mental_coach = models.TextField(blank=True, null=True)
current_physical_coach = models.TextField(blank=True, null=True)
current_office = models.TextField(blank=True, null=True)
upagrade_office_list = models.TextField(blank=True, null=True)
current_scout_list = models.TextField(blank=True, null=True)
base_commission = models.TextField(blank=True, null=True)
transfer_commission = models.TextField(blank=True, null=True)
sponsiorship_commission = models.TextField(blank=True, null=True)
miscellanious_income = models.TextField(blank=True, null=True)
miscellanious_expences = models.TextField(blank=True, null=True)
staff_wage = models.TextField(blank=True, null=True)
scout_hire_time = models.TextField(blank=True, null=True)
scout_hire_duration = models.TextField(blank=True, null=True)
physical_coach_hire_time = models.TextField(blank=True, null=True)
physical_coach_hire_duration = models.TextField(blank=True, null=True)
mental_coach_hire_time = models.TextField(blank=True, null=True)
mental_coach_hire_duration = models.TextField(blank=True, null=True)
current_week = models.TextField(blank=True, null=True)
agency_reputation = models.TextField(blank=True, null=True)
news_feed = models.TextField(blank=True, null=True)
negotiations = models.TextField(blank=True, null=True)
journalist_interactions = models.TextField(blank=True, null=True)
mail_box = models.TextField(blank=True, null=True)
def __str__(self):
return self.user.email
class Chat(models.Model):
CHAT_TYPE = (
('P', 'PLAYER'),
('C', 'CHAIRMAN'),
('M', 'MANAGER'),
)
message1 = models.TextField(blank=True, null=True)
message2 = models.TextField(blank=True, null=True)
message3 = models.TextField(blank=True, null=True)
message4 = models.TextField(blank=True, null=True)
reply = models.TextField(blank=True, null=True)
option1 = models.TextField(blank=True, null=True)
option2 = models.TextField(blank=True, null=True)
option3 = models.TextField(blank=True, null=True)
option4 = models.TextField(blank=True, null=True)
type = models.CharField(max_length=2, choices=CHAT_TYPE, blank=True, null=True)
def __str__(self):
return models.Model.__str__(self)
class ChatLog(models.Model):
ACTOR_TYPE = (
('Y', 'YOU'),
('B', 'BOT'),
)
user = models.ForeignKey(User, on_delete=models.CASCADE)
message = models.TextField()
actor = models.CharField(max_length=2, choices=ACTOR_TYPE)
date_added = models.DateTimeField(auto_now_add=True, blank=True)
player = models.ForeignKey(Player, blank=True, null=True , on_delete=models.CASCADE)
chairman = models.ForeignKey(Chairman, blank=True, null=True , on_delete=models.CASCADE)
manager = models.ForeignKey(Manager, blank=True, null=True , on_delete=models.CASCADE)
def __str__(self):
return self.user.username
class Meta:
ordering = ['-date_added']
| StarcoderdataPython |
164692 | <filename>grid/__init__.py
from __future__ import print_function # Only Python 2.x
import sys
import subprocess
import os
import syft
from grid.client import GridClient
from grid.websocket_client import WebsocketGridClient
from grid import utils as gr_utils
from grid import deploy
from grid.grid_network import GridNetwork
from grid.utils import connect_all_nodes
__all__ = ["workers", "connect_all_nodes", "syft"]
# ======= Providing a friendly API on top of Syft ===============
def encrypt(self, worker_1, worker_2, crypto_provider):
"""tensor.fix_prec().share()"""
return self.fix_prec().share(worker_1, worker_2, crypto_provider=crypto_provider)
syft.frameworks.torch.tensors.interpreters.native.TorchTensor.encrypt = encrypt
syft.messaging.plan.Plan.encrypt = encrypt
def request_decryption(self):
"""tensor.get().float_prec()"""
return self.get().float_prec()
syft.frameworks.torch.tensors.interpreters.native.TorchTensor.request_decryption = (
request_decryption
)
# =============== Heroku related functions =======================
def run_commands_in(commands, logs, tmp_dir="tmp", cleanup=True, verbose=False):
assert len(commands) == len(logs)
gr_utils.execute_command("mkdir " + tmp_dir)
outputs = list()
cmd = ""
for i in range(len(commands)):
if verbose:
print(logs[i] + "...")
cmd = "cd " + str(tmp_dir) + "; " + commands[i] + "; cd ..;"
output = gr_utils.execute_command(cmd)
outputs.append(str(output))
if verbose:
print("\t" + str(output).replace("\n", "\n\t"))
if cleanup:
gr_utils.execute_command("rm -rf " + tmp_dir)
return outputs
def check_dependency(
lib="git", check="usage:", error_msg="Error: please install git.", verbose=False
):
if verbose:
sys.stdout.write("\tChecking for " + str(lib) + " dependency...")
output = gr_utils.execute_command(lib)
if check not in output:
raise Exception(error_msg)
if verbose:
print("DONE!")
def execute(cmd):
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
for stdout_line in iter(popen.stdout.readline, ""):
yield stdout_line
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
def launch_on_heroku(
grid_name: str,
app_type: str = "pg_rest_api",
verbose=True,
check_deps=True,
dev_user: str = "OpenMined",
branch: str = "dev",
):
"""Launches a node as a heroku application. User needs to be logged in to heroku prior to calling this function.
Args:
grid_name (str): The name of the node / Heroku application.
app_type (str): Type of node being deployed to heroku. Defaults to "pg_rest_api".
verbose (bool): Specifies logging level. Set true for more logs. Default to True.
check_deps (bool): Checks before deployment for local git, heroku and pip installations. Defaults to True.
dev_user (str): Github username of the user/ organization whose Grid repo will be used. Leave undefined to use 'OpenMined' repo.
branch (str): The default branch to use from the Grid repo of the defined dev_user. Leave undefined to use 'dev' branch.
Returns:
str: heroku application address (url)
"""
app_addr = "https://" + str(grid_name) + ".herokuapp.com"
if check_deps:
if verbose:
print("Step 0: Checking Dependencies")
check_dependency(
lib="git",
check="usage:",
error_msg="Missing Git command line dependency - please install it: https://gist.github.com/derhuerst/1b15ff4652a867391f03",
verbose=verbose,
)
check_dependency(
lib="heroku --version",
check="heroku/7",
error_msg="Missing Heroku command line dependency - please install it: https://toolbelt.heroku.com/",
verbose=verbose,
)
check_dependency(
lib="pip",
check="\nUsage: \n pip <command> [options]",
error_msg="Missing Pip command line dependency - please install it: https://www.makeuseof.com/tag/install-pip-for-python/",
verbose=verbose,
)
if verbose:
sys.stdout.write("\tChecking to see if heroku is logged in...")
res = gr_utils.execute_command("heroku create app")
if res == "Enter your Heroku credentials:\n":
raise Exception(
"You are not logged in to Heroku. Run 'heroku login'"
" from the command line and follow the instructions. "
"If you need to create an account. Don't forget to add "
" your credit card. Even though you can use Grid on the"
" FREE tier, it won't let you activate a Redis database "
"without adding your credit card information to your account."
)
if verbose:
print("DONE!")
if verbose:
print("\nStep 1: Making sure app name '" + grid_name + "' is available")
try:
output = list(execute(("heroku create " + grid_name).split(" ")))
if verbose:
print("\t" + str(output))
except:
if os.name != "nt":
output = list(execute(("rm -rf tmp").split(" ")))
if verbose:
print("\t" + str(output))
print("APP EXISTS: You can already connect to your app at " + app_addr)
return app_addr
commands = list()
logs = list()
if verbose:
print(
"\nStep 2: Making Sure Postgres Database Can Be Spun Up on Heroku (this can take a couple seconds)..."
)
try:
output = list(
# execute(("heroku addons:create heroku-postgresql:hobby-dev -a " + grid_name).split(" ")),
execute(
(
"heroku addons:create heroku-postgresql:hobby-dev -a " + grid_name
).split(" ")
)
)
if verbose:
print("\t" + str(output))
except:
try:
print("Cleaning up...")
output = list(execute(("rm -rf tmp").split(" ")))
output = list(
execute(
("heroku destroy " + grid_name + " --confirm " + grid_name).split(
" "
)
)
)
print("Success in cleaning up!")
except:
print(
"ERROR: cleaning up... good chance Heroku still has the app or the tmp directory still exists"
)
msg = (
"""Creating heroku-postgresql:hobby-dev on ⬢ """
+ grid_name
+ """... ⣾
⣽⣻⢿⡿⣟⣯⣷⣾⣽Creating heroku-postgresql:hobby-dev on ⬢ """
+ grid_name
+ """... !
▸ Please verify your account to install this add-on plan (please enter a
▸ credit card) For more information, see
▸ https://devcenter.heroku.com/categories/billing Verify now at
▸ https://heroku.com/verify
NOTE: OpenMined's Grid nodes can be run on the FREE tier of Heroku,
but you still have to enter a credit card on Heroku to spin up FREE nodes."""
)
raise Exception(msg)
if verbose:
print("\nStep 3: Cleaning up heroku/postgres checks...")
output = list(
execute(("heroku destroy " + grid_name + " --confirm " + grid_name).split(" "))
)
commands = list()
logs = list()
logs.append("\nStep 4: cleaning up git")
commands.append("rm -rf .git")
# Using the dev user and branch specified. Fetches, clones and then deploys the branch as a heroku application
# If no dev user/ branch is defined, then it defaults to OpenMined user and dev branch
logs.append("Step 5: cloning heroku app code from Github")
if branch:
commands.append(
"git clone -b {} https://github.com/{}/Grid".format(branch, dev_user)
)
else:
commands.append("git clone -b dev https://github.com/{}/Grid".format(dev_user))
logs.append("Checking out dev version...")
commands.append("git checkout origin/dev")
logs.append("Step 6: copying app code from cloned repo")
commands.append("cp -r Grid/app/{}/* ./".format(app_type))
logs.append("Step 7: removing the rest of the cloned code")
commands.append("rm -rf Grid")
logs.append("Step 8: Initializing new github (for Heroku)")
commands.append("git init")
logs.append("Step 9: Adding files to heroku github")
commands.append("git add .")
logs.append("Step 10: Committing files to heroku github")
commands.append('git commit -am "init"')
run_commands_in(commands, logs, cleanup=False, verbose=verbose)
logs = list()
commands = list()
logs.append(
"\nStep 11: Pushing code to Heroku (this can take take a few seconds)..."
)
commands.append("heroku create " + grid_name)
logs.append("Step 12: Creating Postgres database... (this can take a few seconds)")
commands.append(f"heroku addons:create heroku-postgresql:hobby-dev -a {grid_name}")
logs.append(
"Step 13: Pushing code to Heroku (this can take take a few minutes"
" - if you're running this in a Jupyter Notebook you can watch progress "
"in the notebook server terminal)..."
)
commands.append("git push heroku master")
logs.append("Step 14: Create Database")
commands.append(f"heroku run -a {grid_name} flask db upgrade")
logs.append("Step 15: Cleaning up!")
commands.append("rm -rf .git")
run_commands_in(commands, logs, cleanup=True, verbose=verbose)
print("SUCCESS: You can now connect to your app at " + app_addr)
return app_addr
| StarcoderdataPython |
6418 | <filename>appr/commands/logout.py
from __future__ import absolute_import, division, print_function
from appr.auth import ApprAuth
from appr.commands.command_base import CommandBase, PackageSplit
class LogoutCmd(CommandBase):
name = 'logout'
help_message = "logout"
def __init__(self, options):
super(LogoutCmd, self).__init__(options)
self.status = None
self.registry_host = options.registry_host
self.package_parts = options.package_parts
pname = self.package_parts.get('package', None)
namespace = self.package_parts.get('namespace', None)
self.package = None
if pname:
self.package = "%s/%s" % (namespace, pname)
elif namespace:
self.package = namespace
@classmethod
def _add_arguments(cls, parser):
cls._add_registryhost_option(parser)
parser.add_argument('registry', nargs='?', default=None, action=PackageSplit,
help="registry url: quay.io[/namespace][/repo]\n" +
"If namespace and/or repo are passed, creds only logout for them")
def _call(self):
client = self.RegistryClient(self.registry_host)
ApprAuth().delete_token(client.host, scope=self.package)
self.status = "Logout complete"
if self.registry_host != '*':
self.status += " from %s" % self.registry_host
def _render_dict(self):
return {"status": self.status, 'host': self.registry_host, "scope": self.package}
def _render_console(self):
return " >>> %s" % self.status
| StarcoderdataPython |
3244735 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Libaddon for Anki
#
# Copyright (C) 2018 <NAME>. <https//glutanimate.com/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version, with the additions
# listed at the end of the accompanied license file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# NOTE: This program is subject to certain additional terms pursuant to
# Section 7 of the GNU Affero General Public License. You should have
# received a copy of these additional terms immediately following the
# terms and conditions of the GNU Affero General Public License which
# accompanied this program.
#
# If not, please request a copy through one of the means of contact
# listed here: <https://glutanimate.com/contact/>.
#
# Any modifications to this file must keep this entire header intact.
"""
Package-wide constants
In addition to defining a number of constants specific to libaddon, this
module also provides access to all constants in the parent add-on
(if any). Add-on specific constants take precedence and overwrite
constants in this module.
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
__all__ = ["ADDON_NAME", "ADDON_ID", "ADDON_VERSION", "LICENSE",
"LIBRARIES", "AUTHORS", "CONTRIBUTORS", "SPONSORS",
"MAIL_AUTHOR", "LINKS", "PATRONS", "PATRONS_TOP"]
# ADD-ON SPECIFIC CONSTANTS
# Define placeholders, in case add-on specific consts are incomplete
ADDON_NAME = "Glutanimate's add-on"
ADDON_ID = "0000000000"
ADDON_VERSION = "0.1.0"
LICENSE = "GNU AGPLv3"
LIBRARIES = ()
AUTHORS = (
{"name": "<NAME>. <https//glutanimate.com/>", "years": "2018",
"contact": "https://glutanimate.com"},
) # main developers
CONTRIBUTORS = () # single code contributions
SPONSORS = () # sponsors / development commissions
# Merge in add-on specific consts:
try:
from ..consts import * # noqa: F401
from ..consts import LINKS as ADDON_LINKS
except ImportError:
pass
# ADD-ON AGNOSTIC CONSTANTS
# Social
_mail_author_snippets = ["ankiglutanimate", "ατ", "gmail.com"] # anti-spam
MAIL_AUTHOR = "".join(_mail_author_snippets).replace("ατ", "@")
LINKS = {
"patreon": "https://www.patreon.com/glutanimate",
"coffee": "http://ko-fi.com/glutanimate",
"description": "https://ankiweb.net/shared/info/{}".format(ADDON_ID),
"rate": "https://ankiweb.net/shared/review/{}".format(ADDON_ID),
"twitter": "https://twitter.com/glutanimate",
"youtube": "https://www.youtube.com/c/glutanimate"
}
LINKS.update(ADDON_LINKS)
# Credits
# https://www.patreon.com/glutanimate
# automatically sorted:
PATRONS = ("<NAME>", "<NAME>", "<NAME>", "<NAME>",
"<NAME>", "<NAME>", "<NAME>", "PapelMagico",
"<NAME>", "<NAME>", "<NAME>", "<NAME>",
"<NAME>", "<NAME>", "spiraldancing",
"<NAME>", "<NAME>", "JessC",
"<NAME>", "Andrew", "<NAME>", "<NAME>")
PATRONS_TOP = ("<NAME>", "Blacky 372", "<NAME>", "The Academy")
| StarcoderdataPython |
174456 | <filename>python/class-challenges/src/image_analysis.py
# Built-int imports
import sys
# External imports
import cv2 as cv
import numpy as np
class ImageAnalysis:
"""
This is a python class that was implemented to be able to perform
a correct analysis to an image, this class has a method to plot the
histogram of a specific image and also has another method which
allows to change the gamma factor of a specific image.
:param image: BGR-image to plot its histogram or to change its gamma factor
"""
def __init__(self, image):
self.image = image
def nothing(self,x):
"""
nothing function that is necessary for the trackbar event
"""
pass
def create_trackbar(self,name,title):
return cv.createTrackbar(
name,
title,
1,
20,
self.nothing
)
def plot_histogram(self):
"""
This is a method which allows to plot the histogram of a
specific bgr image.
"""
gray_image = cv.cvtColor(self.image, cv.COLOR_BGR2GRAY)
# Define the dimensions of the plot
wbins = 256
hbins = 256
#cv.calcHist(images, channels, mask, histSize, ranges)
histr = cv.calcHist([gray_image],[0],None,[hbins],[0,wbins])
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(histr)
hist_image = np.zeros([hbins, wbins], np.uint8)
# Plot the lines to make the histogram
for w in iter(range(wbins)):
binVal = histr[w]
intensity = int(binVal*(hbins-1)/max_val)
cv.line(hist_image, (w,hbins), (w,hbins-intensity),255)
cv.imshow("IMAGE'S HISTOGRAM",hist_image)
def set_gamma_value(self, gamma):
"""
This is a method to set and adjust the gamma value of a
specific rgb image.
"""
image = self.image.copy()
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv.LUT(image, table)
def get_gamma_value(self):
"""
This is a method which allows to iterate using a trackbar
in order to change the gamma value of an image and thus to find
the better value of gamma.
"""
cv.namedWindow("Image Gamma adjust")
self.create_trackbar("Gamma-X10", "Image Gamma adjust")
while(True):
gamma_value = 0.1*cv.getTrackbarPos("Gamma-X10","Image Gamma adjust")
if (gamma_value != 0):
gamma_image = self.set_gamma_value(gamma_value)
else:
gamma_image = self.set_gamma_value(1)
cv.imshow("Image Gamma adjust", gamma_image)
if (cv.waitKey(1) & 0xFF == ord('q')):
break
cv.destroyAllWindows()
def main():
pass
if __name__=='__main__':
sys.exit(main()) | StarcoderdataPython |
69822 | import mysql.connector
PORT = "3306"
DATABASE = "stockDB"
SERVER_NO = 1
SERVER = {"host": ["localhost", "cadb.cc0uav02d2tx.ap-southeast-2.rds.amazonaws.com"],
"user": ["root", "admin"],
"password": ["<PASSWORD>", "<PASSWORD>"]}
def my_connector(host=SERVER["host"][SERVER_NO],
user=SERVER["user"][SERVER_NO],
password=SERVER["password"][SERVER_NO],
connect_database=True):
if connect_database:
conn = mysql.connector.connect(
host=host,
user=user,
passwd=password,
port=PORT,
database=DATABASE
)
else:
conn = mysql.connector.connect(
host=host,
user=user,
passwd=password,
port=PORT
)
return conn
| StarcoderdataPython |
3313741 | from flask import Flask, render_template, request, redirect, session, url_for
from flask_socketio import SocketIO, emit, join_room, leave_room
app = Flask(__name__)
app.config['SECRET_KEY'] = '<KEY>'
socketio = SocketIO(app, manage_session=False)
socketio.init_app(app, cors_allowed_origins="*")
data = {}
users = []
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/admin211', methods=['GET', 'POST'])
def admin():
return render_template('admin.html')
@app.route('/update_vidurl', methods=['GET', 'POST'])
def update_vidurl():
data['video_url'] = str(request.form['videoUrl']).strip(' \t\n\r') or None
return render_template('chatroom.html', session = session, data=data)
@app.route('/chat', methods=['GET', 'POST'])
def chat():
prev_url_endpoint = str(request.referrer).split('/')[-1]
admin_url_endpoint = str(url_for('admin')).strip('/')
if(request.method == 'POST'):
username = str(request.form['username']).strip(' \t\n\r').capitalize() or None
room = str(request.form['room']).strip(' \t\n\r').upper() or None
if("password" in request.form):
pwd = str(request.form['password']).strip(' \t\n\r') or None
if(username in users):
error = "Username already logged in. Try another name."
return render_template('index.html', error=error)
if(username == None or room == None):
error = "Check your credentials"
return render_template('index.html', error=error)
elif(prev_url_endpoint == admin_url_endpoint):
if(username =='Admin' and pwd == 'password'):
session['is_admin_logged_in'] = True
data['video_url'] = request.form['videoUrl']
session['username'] = username
session['room'] = room
return render_template('chatroom.html', session = session, data = data)
else:
return redirect(url_for('admin'))
elif(request.method=='POST' and request.referrer != url_for('admin')):
if(username == "Admin"):
error = "Choose another name"
return render_template('index.html', error=error)
session['username'] = username
session['room'] = room
return render_template('chatroom.html', session = session, data = data)
else:
return redirect(url_for('index'))
if(request.method == 'GET'):
return redirect(url_for('index'))
@socketio.on('join', namespace='/chat')
def join(message):
room = session.get('room')
username = session.get('username')
join_room(room)
users.append(username)
print(users)
emit('status', {'msg': session.get('username') + ' entered the room.'}, room=room)
@socketio.on('text', namespace='/chat')
def text(message):
print(message['msg'])
room = session.get('room')
emit('message', {'msg': session.get('username') + ' : ' + message['msg']}, room=room)
@socketio.on('left', namespace='/chat')
def left(message):
print(users)
room = session.get('room')
username = session.get('username')
if(username == "Admin"):
data={}
session['is_admin_logged_in'] = False
leave_room(room)
users.remove(username)
session.clear()
emit('status', {'msg': username + ' left the room.'}, room=room)
@socketio.on('disconnect', namespace='/chat')
def disconnect_user():
room = session.get('room')
username = session.get('username')
if(username == "Admin"):
data={}
session['is_admin_logged_in'] = False
leave_room(room)
users.remove(username)
session.clear()
emit('status', {'msg': 'Network issues...user: ' + username + ' disconnected.'}, room=room)
if __name__ == '__main__':
socketio.run(app, debug=True) | StarcoderdataPython |
3338212 | import unittest
import os
import os.path
from poly_juice.polyjuice import zip_folder
from poly_juice.lumberjack import Lumberjack
class TestZipFolder(unittest.TestCase):
"""
This test makes sure that the processed folders are successfully zipped.
"""
def setUp(self):
self.directory = os.path.dirname('tests/testOutput/')
if not os.path.exists(self.directory):
os.makedirs(self.directory)
def test_zips(self):
dicom_folders = ['tests/testInput/MRI/101_01_01_2010']
zip_dir = 'tests/testOutput'
log = Lumberjack()
zip_folder(dicom_folders, zip_dir, log)
result = check_zipped_folder()
self.assertTrue(result)
def tearDown(self):
os.remove('tests/testOutput/101_01_01_2010.zip')
print("Successfully removed tests/testOutput/101_01_01_2010.zip")
def check_zipped_folder() -> bool:
existing = os.path.exists("tests/testOutput/101_01_01_2010.zip")
return existing
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3306888 | import unittest
import sys
import os
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, path)
from src.translator import Translator
from src.base import Vector2D
class TranslationTest(unittest.TestCase):
def test_starting_out(self):
translator = Translator((100, 100))
v1 = Vector2D( [1, 1] )
translation1 = translator.translate(v1)
self.assertEqual(translation1, Vector2D( [60, 40] ))
v2 = Vector2D( [-20, 0] )
translation2 = translator.translate(v2)
self.assertEqual(translation2, Vector2D( [-150, 50] ))
class SpacingTest(unittest.TestCase):
def test_starting_out(self):
translator = Translator((100, 100))
self.assertEqual(translator.global_origin_vector, Vector2D( [50, 50] ))
self.assertEqual(translator.spacing, 10)
translator = Translator((400, 400))
self.assertEqual(translator.spacing, 40)
class OriginTest(unittest.TestCase):
def test_starting_out(self):
translator = Translator((100, 100))
self.assertEqual(translator.global_origin_vector, Vector2D( [50, 50] ))
translator.update(Vector2D( [10, 0] ), 1)
self.assertEqual(translator.spacing, 10)
self.assertEqual(translator.local_origin_vector, Vector2D( [150, 50] ))
translator.update(Vector2D( [5, 5] ), 1)
self.assertEqual(translator.spacing, 10)
self.assertEqual(translator.local_origin_vector, Vector2D( [100, 0] ))
class ScalingTest(unittest.TestCase):
def test_starting_out(self):
translator = Translator((100, 100))
translator.update(Vector2D( [0, 0] ), 0.5)
self.assertEqual(translator.spacing, 5)
self.assertEqual(translator.local_origin_vector, translator.global_origin_vector)
translator.update(Vector2D( [5, 5] ), 0.5)
self.assertEqual(translator.spacing, 5)
self.assertEqual(translator.local_origin_vector, Vector2D( [75, 25] ))
translator.update(Vector2D( [5, 5] ), 2)
self.assertEqual(translator.spacing, 20)
self.assertEqual(translator.local_origin_vector, Vector2D( [150, -50] ))
class VisibilityTest(unittest.TestCase):
def test_starting_out(self):
translator = Translator((100, 100))
v1 = Vector2D( [1, 1] )
self.assertTrue(translator.is_in_range(v1))
translator.update(Vector2D([10, 0]), 1)
self.assertFalse(translator.is_in_range(v1))
translator.update(Vector2D([0, 0]), 1)
v2 = Vector2D( [0, 5] )
self.assertTrue(translator.is_in_range(v2))
v2.y += 1
self.assertFalse(translator.is_in_range(v2))
def main():
unittest.main()
if __name__ == '__main__':
main() | StarcoderdataPython |
101202 | <gh_stars>1-10
"""Mock OAUTH2 aiohttp.web server."""
from aiohttp import web
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend
from authlib.jose import jwt, jwk
from typing import Tuple
import urllib
import logging
def generate_token() -> Tuple:
"""Generate RSA Key pair to be used to sign token and the JWT Token itself."""
private_key = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
public_key = private_key.public_key().public_bytes(
encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo
)
pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
# we set no `exp` and other claims as they are optional in a real scenario these should bde set
# See available claims here: https://www.iana.org/assignments/jwt/jwt.xhtml
# the important claim is the "authorities"
public_jwk = jwk.dumps(public_key, kty="RSA")
private_jwk = jwk.dumps(pem, kty="RSA")
return (public_jwk, private_jwk)
nonce = ""
jwk_pair = generate_token()
user_eppn = ""
user_given_name = ""
user_family_name = ""
header = {"jku": "http://mockauth:8000/jwk", "kid": "rsa1", "alg": "RS256", "typ": "JWT"}
async def setmock(req: web.Request) -> web.Response:
"""Auth endpoint."""
global user_eppn, user_family_name, user_given_name
user_eppn = req.query["eppn"]
user_family_name = req.query["family"]
user_given_name = req.query["given"]
logging.info(user_eppn, user_family_name, user_given_name)
return web.HTTPOk()
async def auth(req: web.Request) -> web.Response:
"""Auth endpoint."""
params = {
"state": req.query["state"],
"code": "code",
}
global nonce, user_family_name, user_given_name
nonce = req.query["nonce"]
callback_url = req.query["redirect_uri"]
url = f"{callback_url}?{urllib.parse.urlencode(params)}"
logging.info(url)
response = web.HTTPSeeOther(url)
return response
async def token(req: web.Request) -> web.Response:
"""Auth endpoint."""
global nonce, user_eppn, user_family_name, user_given_name
id_token = {
"at_hash": "fSi3VUa5i2o2SgY5gPJZgg",
"sub": "smth",
"eduPersonAffiliation": "member;staff",
"eppn": user_eppn,
"displayName": f"{user_given_name} {user_family_name}",
"iss": "http://mockauth:8000",
"schacHomeOrganizationType": "urn:schac:homeOrganizationType:test:other",
"given_name": user_given_name,
"nonce": nonce,
"aud": "aud2",
"acr": "http://mockauth:8000/LoginHaka",
"nsAccountLock": "false",
"eduPersonScopedAffiliation": "<EMAIL>;<EMAIL>",
"auth_time": 1606579533,
"name": f"{user_given_name} {user_family_name}",
"schacHomeOrganization": "test.what",
"exp": 9999999999,
"iat": 1561621913,
"family_name": user_family_name,
"email": user_eppn,
}
data = {"access_token": "test", "id_token": jwt.encode(header, id_token, jwk_pair[1]).decode("utf-8")}
logging.info(data)
return web.json_response(data)
async def jwk_response(request: web.Request) -> web.Response:
"""Mock JSON Web Key server."""
keys = [jwk_pair[0]]
keys[0]["kid"] = "rsa1"
data = {"keys": keys}
logging.info(data)
return web.json_response(data)
async def userinfo(request: web.Request) -> web.Response:
"""Mock an authentication to ELIXIR AAI for GA4GH claims."""
global nonce, user_eppn, user_family_name, user_given_name
user_info = {
"sub": "smth",
"eduPersonAffiliation": "member;staff",
"eppn": user_eppn,
"displayName": f"{user_given_name} {user_family_name}",
"schacHomeOrganizationType": "urn:schac:homeOrganizationType:test:other",
"given_name": user_given_name,
"uid": user_eppn,
"nsAccountLock": "false",
"eduPersonScopedAffiliation": "<EMAIL>;<EMAIL>",
"name": f"{user_given_name} {user_family_name}",
"schacHomeOrganization": "test.what",
"family_name": user_family_name,
"email": user_eppn,
}
logging.info(user_info)
return web.json_response(user_info)
def init() -> web.Application:
"""Start server."""
app = web.Application()
app.router.add_get("/setmock", setmock)
app.router.add_get("/authorize", auth)
app.router.add_post("/token", token)
app.router.add_get("/keyset", jwk_response)
app.router.add_get("/userinfo", userinfo)
return app
if __name__ == "__main__":
web.run_app(init(), port=8000)
| StarcoderdataPython |
3236674 | import copy
from enum import Enum
import sty
from .board import Board
from .location import Location
from .piece import Color, Piece
class Printer:
r"""
Dedicated printer class that caches information about the board so it does not need to be
regenerated each time.
"""
SEP = "|"
EMPTY_LOCATION = " "
HIDDEN = " "
IMPASSABLE = chr(9608) # White box
class Visibility(Enum):
NONE = set()
RED = {Color.RED}
BLUE = {Color.BLUE}
ALL = RED | BLUE
def __init__(self, brd: Board, red_pieces, blue_pieces,
visibility: 'Printer.Visibility'):
r"""
:param brd: Board information.
:param red_pieces: Iterable for the set of red pieces
:param blue_pieces: Iterable for the set of blue pieces
:param visibility: Player(s) if any that are visible.
"""
self._brd = brd
# Needs to be a list of list since later join in write function
self._cells = [[" ".join([" "] + [str(i) for i in range(self._brd.num_cols)] + ["\n"])]]
# Construct list elements that will hold piece info
base_row = [""] + [Printer.EMPTY_LOCATION for _ in range(self._brd.num_cols)] + ["\n"]
for i in range(self._brd.num_rows):
self._cells.append(copy.copy(base_row))
self._cells[-1][0] = '\n{:2d}'.format(i)
self._cells.append([""])
self._visible = visibility.value
self._row_sep = "".join([" ", "-".join(["+"] * (self._n_cols + 1))])
# Fill in the locations that cannot be entered
impass_str = self._impassable_piece()
for l in self._brd.blocked:
self._set_piece(l, impass_str, ignore_impassable=True)
# Add the existing pieces
for pieces in [red_pieces, blue_pieces]:
for p in pieces:
self._set_piece(p.loc, self._format_piece(p), exist_ok=False)
@property
def _n_rows(self) -> int:
r""" Number of rows in the board """
return len(self._cells) - 2 # Two buffer cells due to printing requirements
@property
def _n_cols(self) -> int:
r""" Number of columns in the board """
# Two buffer cells due to printing requirements
# Use second row since first row is filler empty string
return len(self._cells[1]) - 2
def _get_piece(self, loc: Location) -> str:
r""" Get the string for the piece at the specified \p Location """
self._verify_piece_loc(loc)
return self._cells[loc.r + 1][loc.c + 1]
def _set_piece(self, loc: Location, value: str, exist_ok: bool = True,
ignore_impassable: bool = False) -> None:
r""" Set the string for the piece at the specified \p Location with \p value """
if not ignore_impassable:
self._verify_piece_loc(loc)
else:
assert loc.is_inside_board(self._brd.num_rows, self._brd.num_cols)
if not exist_ok:
assert self._is_loc_empty(loc), "Setting a location that should be empty"
self._cells[loc.r + 1][loc.c + 1] = value
def _verify_piece_loc(self, loc: Location) -> None:
r""" Verifies whether the piece location is inside the board boundaries"""
assert self._brd.is_inside(loc), "Invalid location in board"
def _is_loc_empty(self, loc: Location) -> bool:
r""" Returns true if the specified location is empty """
return self._get_piece(loc) == Printer.EMPTY_LOCATION
def delete_piece(self, loc: Location) -> None:
r""" Deletes piece at the specified location """
assert not self._is_loc_empty(loc), "Tried to delete piece that does not exist"
self._set_piece(loc, Printer.EMPTY_LOCATION)
# def move_piece(self, orig: Location, new: Location) -> None:
# r""" Move piece from \p orig to \p new """
# assert not self._is_loc_empty(orig), "Trying to move an empty location"
# piece_str = self._get_piece(orig)
# self._set_piece(new, piece_str)
# self.delete_piece(orig)
def add_piece(self, piece: Piece) -> None:
r""" Add the specified piece to the printer """
assert self._is_loc_empty(piece.loc), "Trying to add piece to non-empty location"
self._set_piece(piece.loc, self._format_piece(piece))
def write(self) -> str:
r""" Prints the board to a large string """
return self._row_sep.join([Printer.SEP.join(x) for x in self._cells])
# noinspection PyProtectedMember
def _is_visible(self, color: Color) -> bool:
r""" Returns True if the piece color is visible """
return color in self._visible
def _format_piece(self, piece: Piece) -> str:
r""" Generates the string for a piece to appear in the output """
# white_fg = fg(255, 255, 255)
white_fg = sty.fg.li_white
return "".join([sty.rs.all,
sty.bg.da_red if piece.color == Color.RED else sty.bg.blue,
sty.ef.bold, white_fg, # White writing over the background
str(piece.rank) if self._is_visible(piece.color) else Printer.HIDDEN,
sty.rs.all # Go back to normal printing
])
@staticmethod
def _impassable_piece() -> str:
r""" Generates string for a square that is impassable """
white_bg = sty.bg.li_white
white_fg = sty.fg.li_white
return "".join([sty.rs.all, white_bg,
sty.ef.bold, white_fg, # White writing over the background
Printer.IMPASSABLE,
sty.rs.all # Go back to normal printing
])
| StarcoderdataPython |
3221445 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from botframework.connector.auth import AppCredentials
class OAuthPromptSettings:
def __init__(
self,
connection_name: str,
title: str,
text: str = None,
timeout: int = None,
oauth_app_credentials: AppCredentials = None,
):
"""
Settings used to configure an `OAuthPrompt` instance.
Parameters:
connection_name (str): Name of the OAuth connection being used.
title (str): The title of the cards signin button.
text (str): (Optional) additional text included on the signin card.
timeout (int): (Optional) number of milliseconds the prompt will wait for the user to authenticate.
`OAuthPrompt` defaults value to `900,000` ms (15 minutes).
oauth_app_credentials (AppCredentials): (Optional) AppCredentials to use for OAuth. If None,
the Bots credentials are used.
"""
self.connection_name = connection_name
self.title = title
self.text = text
self.timeout = timeout
self.oath_app_credentials = oauth_app_credentials
| StarcoderdataPython |
84276 | import torch
from scipy import io
import numpy as np
#import visdom
#vis = visdom.Visdom()
file_PATH = '/home/leejeyeol/Documents/ground_truth_demo/testing_label_mask'
num_of_files = 21
for videos in range(1, num_of_files+1):
volLabel = io.loadmat(file_PATH+'/%d_label.mat' % videos)['volLabel'].tolist()[0]
data = [0 for j in range(len(volLabel)-1)]
for frames in range(0, len(volLabel)-1):
if(sum(sum(volLabel[frames])) > 0):
data[frames] = 0
else:
data[frames] = 1
torch.save(data, file_PATH+'/Ground_truth_%d.t7' % videos)
#torch.load(torch.load(file_PATH+'/Ground_truth_21.t7')
for videos in range(1,num_of_files+1):
a = torch.IntTensor(torch.load(file_PATH+'/Ground_truth_%d.t7'%videos))
#vis.line(a,opts=dict(title='%d video' % videos, xlabel='frames',ylabel='anormaly'))
| StarcoderdataPython |
3389587 | import matplotlib.pyplot as plot
from Chapter15_RandomWalk_class import RandomWalk
rw = RandomWalk()
rw.fill_walk()
plot.scatter(rw.x_values,rw.y_values,s = 1)
plot.show()
| StarcoderdataPython |
118625 | <reponame>LechMadeyski/PhD19MarekSosnicki
from ArticlesDataDownloader.ArticlesDataDownloader import ArticlesDataDownloader
from ArticlesServer.database.DatabaseManager import DatabaseManager
from ArticlesServer.directories import OUTPUT_DIRECTORY, FINDER_FILE
from TextSearchEngine.parse_finder import parse_finder
import os
PROXY = 'proxy_auth_plugin.zip'
def reload_article(article_id):
print('Reloading article ' + article_id)
db = DatabaseManager.get_instance()
if not db:
return
article_data = db.get_full_article(article_id)
print('Starting download')
with open(FINDER_FILE, 'r') as finder_file:
finder = parse_finder(finder_file.read())
downloader = ArticlesDataDownloader(OUTPUT_DIRECTORY, PROXY)
search_base = article_data.search_base
article_filename = os.path.join(OUTPUT_DIRECTORY, search_base.filename_base + '.json')
if os.path.isfile(article_filename):
os.remove(article_filename)
article_pdf = os.path.join(OUTPUT_DIRECTORY, search_base.filename_base + '.pdf')
if os.path.isfile(article_pdf):
os.remove(article_pdf)
article_filename, data = downloader.read_article(search_base)
search_result = finder(data.to_dict()) or {}
db.reload_article(article_id, search_base, data, search_result)
| StarcoderdataPython |
Subsets and Splits