hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7b788c4e537ccfe9b9a19c03459ac9310b0314ff | 662 | py | Python | setup.py | yuji-koseki/django-home-urls | ef42ad08101f83c2aff941e00abd50e60c57ac51 | [
"MIT"
]
| null | null | null | setup.py | yuji-koseki/django-home-urls | ef42ad08101f83c2aff941e00abd50e60c57ac51 | [
"MIT"
]
| null | null | null | setup.py | yuji-koseki/django-home-urls | ef42ad08101f83c2aff941e00abd50e60c57ac51 | [
"MIT"
]
| null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="django_home_urls",
version="0.1.0",
author="Yuji Koseki",
author_email="[email protected]",
description="Django home urlconf.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/yuji-koseki/django-home-urls",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
'Framework :: Django',
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 28.782609 | 58 | 0.663142 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 302 | 0.456193 |
7b792f428ffd2ed8a9d5df151157eca526120574 | 3,553 | py | Python | lib/DataFileIO.py | cttsai1985/Kaggle-Home-Credit-Default-Risk | a378d5fcee1895a6229c740779f64b286532de8c | [
"Apache-2.0"
]
| null | null | null | lib/DataFileIO.py | cttsai1985/Kaggle-Home-Credit-Default-Risk | a378d5fcee1895a6229c740779f64b286532de8c | [
"Apache-2.0"
]
| null | null | null | lib/DataFileIO.py | cttsai1985/Kaggle-Home-Credit-Default-Risk | a378d5fcee1895a6229c740779f64b286532de8c | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script provide a class to read and save files
Created on Sat July 21 2018
@author: cttsai
"""
import pandas as pd
from Utility import CheckFileExist
from LibConfigs import logger, hdf5_compress_option, fast_hdf5_compress_option
class DataFileIO(object):
"""
"""
def __init__(self):
self.data_lastet_load = {}
def getLastestLoaded(self):
return self.data_lastet_load.copy()
@staticmethod
def checkFile(filename):
return CheckFileExist(filename, silent=False)
@staticmethod
def loadEmpty(configs):
return {k: pd.DataFrame() for k in configs.keys()}
@staticmethod
def readHDF(filename, configs={}, opt_load=True):
with pd.HDFStore(filename, 'r', **hdf5_compress_option) as store:
logger.info("{} contained {} items".format(filename, len(store.keys())))
for k in store.keys():
logger.info("{}: {}".format(k, store[k].shape))
if opt_load and configs: # load and limited by configs
ret = {k: pd.DataFrame() for k in configs.keys()}
ret.update({k.strip('/'): store[k] for k in store.keys() if k.strip('/') in configs.keys()})
return ret
if opt_load: # load all saved dataframes
return {k.strip('/'): store[k] for k in store.keys()}
return {}
def showHDF(self, filename):
self.checkFile(filename)
self.readHDF(filename, opt_load=False)
def loadCSV(self, configs={}):
"""
configs = {'name': 'file_path'}
return load_data = {'name': dataframe}
"""
logger.info("Read Data from CSV")
load_data = {}
for k, f_path in configs.items():
if not self.checkFile(f_path):
continue
load_data[k] = pd.read_csv(f_path)
logger.info("Read in {}: from {}, shape={}".format(k, f_path, load_data[k].shape))
self.data_lastet_load = load_data.copy()
return load_data
def loadHDF(self, filename, configs={}, limited_by_configs=True):
"""
"""
logger.info("Read Data from HDFS")
if not self.checkFile(filename):
return self.loadEmpty(configs)
if limited_by_configs:
logger.info("Load selected DataFrame Only")
load_data = self.readHDF(filename, configs, opt_load=True)
else: # full loaded
load_data = self.readHDF(filename, opt_load=True)
for k, v in load_data.items():
if isinstance(v, pd.DataFrame):
logger.info('memory usage on {} is {:.3f} MB'.format(k, v.memory_usage().sum() / 1024. ** 2))
self.data_lastet_load = load_data#.copy()
return load_data
def saveHDF(self, filename, data, opt_overwrite=True, opt_fast=False):
if self.checkFile(filename):
if not opt_overwrite:
logger.warning("overwrite is not allowed")
return False
compress_option = hdf5_compress_option
if opt_fast:
logger.info("use faster compression option")
compress_option = fast_hdf5_compress_option
with pd.HDFStore(filename, 'w', **compress_option) as store:
logger.info("Save to {}".format(filename))
for k, d in data.items():
store.put(k, d, format='table')
#store.put(k, d, format='fixed')
logger.info("Save {}: {}".format(k, d.shape))
| 33.205607 | 109 | 0.587672 | 3,264 | 0.91866 | 0 | 0 | 935 | 0.263158 | 0 | 0 | 655 | 0.184351 |
7b7fac5e786fffa0981a48a959c7b50a97194205 | 885 | py | Python | tests/testSevenKing.py | yooyoo2004/RoomAI | 7f4d655581a03ded801f6c6d7d18f9fff47aa6f5 | [
"MIT"
]
| null | null | null | tests/testSevenKing.py | yooyoo2004/RoomAI | 7f4d655581a03ded801f6c6d7d18f9fff47aa6f5 | [
"MIT"
]
| null | null | null | tests/testSevenKing.py | yooyoo2004/RoomAI | 7f4d655581a03ded801f6c6d7d18f9fff47aa6f5 | [
"MIT"
]
| 1 | 2021-08-15T16:19:01.000Z | 2021-08-15T16:19:01.000Z | #!/bin/python
from roomai.sevenking import SevenKingEnv
from roomai.sevenking import SevenKingAction
import unittest
class testSevenKing(unittest.TestCase):
def show_hand_card(self,hand_card):
str = ""
for c in hand_card:
str += "," + c.key
print (str)
def testEnv(self):
env = SevenKingEnv()
env.num_players = 2
infos, public_state, person_states, private_state = env.init()
assert(len(infos) == 2)
turn = public_state.turn
self.show_hand_card(person_states[turn].hand_card)
print (turn)
print ("available_actions=",person_states[turn].available_actions.keys())
print ("available_actions_v=",person_states[turn].available_actions.values())
action = SevenKingAction("%s,%s" % (person_states[turn].hand_card[0].key, person_states[turn].hand_card[1].key))
| 30.517241 | 120 | 0.662147 | 763 | 0.862147 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.075706 |
7b817580d6dc21506efb8434e6050e6f651bf968 | 1,776 | py | Python | pyamazonlandsat/product.py | eamanu/pyamazonlandsat | cf16c5acc8fa44a89a8fcd5276e4a46421e3aa3e | [
"MIT"
]
| null | null | null | pyamazonlandsat/product.py | eamanu/pyamazonlandsat | cf16c5acc8fa44a89a8fcd5276e4a46421e3aa3e | [
"MIT"
]
| null | null | null | pyamazonlandsat/product.py | eamanu/pyamazonlandsat | cf16c5acc8fa44a89a8fcd5276e4a46421e3aa3e | [
"MIT"
]
| null | null | null | import attr
import os
import tarfile
from pyamazonlandsat.utils import get_path_row_from_name
from pyamazonlandsat.downloader import Downloader
@attr.s
class Product:
"""Class that represent a Product
:param name: name of the Product.
type name: str.
:param output_path: path where save the downloaded prodcuct.
:type output_path: str.
"""
name = attr.ib()
output_path = attr.ib()
_path_files = attr.ib(init=False)
_link = attr.ib(init=False,
default='https://landsat-pds.s3.amazonaws.com/c1/L8/%s/%s/%s')
def _generate_link(self):
"""Method to generate the link to download from S3
Amazon Service
"""
path, row = get_path_row_from_name(self.name)
self._link = self._link % (path, row, self.name)
def _compress_product(self):
"""Method to compress product into a tar file.
"""
with tarfile.open('%s.tar.gz' %
os.path.join(self.output_path, self.name), 'w:gz') as tar:
for ff in os.listdir(self._path_files):
tar.add(
os.path.join(
self._path_files, ff),
ff)
def get_image_product(self):
"""Method to download the product.
This method create a `Downloader`_ object and download
the images. Then compressed it and move to `output_path`
The downloaded images are saved into a temporal folder,
then is compresed into a tar file and then move to
`output_path`.
"""
self._generate_link()
downloader = Downloader(self._link)
self._path_files = downloader.download_images()
self._compress_product()
downloader.remove_tmp_files()
| 31.714286 | 84 | 0.614865 | 1,620 | 0.912162 | 0 | 0 | 1,628 | 0.916667 | 0 | 0 | 727 | 0.409347 |
7b83240c1ea862333830ef3e4b3423db43db8c92 | 5,352 | py | Python | segmentation.py | IgnacioPardo/RoadTrip | 6cdded860a67bb99cc1fc81e85cd8c09eaf46431 | [
"MIT"
]
| 2 | 2021-04-13T18:54:08.000Z | 2021-09-21T23:08:08.000Z | segmentation.py | IgnacioPardo/RoadTrip | 6cdded860a67bb99cc1fc81e85cd8c09eaf46431 | [
"MIT"
]
| null | null | null | segmentation.py | IgnacioPardo/RoadTrip | 6cdded860a67bb99cc1fc81e85cd8c09eaf46431 | [
"MIT"
]
| null | null | null | from __future__ import division
from skimage.segmentation import slic, mark_boundaries
from skimage.util import img_as_float
from skimage import io
import numpy as np
import matplotlib.pyplot as plt
import os
from cv2 import boundingRect
#from argparse import ArgumentParser
img_width = 50
img_height = 50
img_depth = 4
_selected_segments = set()
_current_segments = []
_current_image = []
_original_image = []
_plt_img = []
_shift = False
def segment(image, **kwargs):
return slic(img_as_float(image), n_segments = int(kwargs.get("n_segments", max(image.shape) * 1.5)), sigma = 5)
def on_click(event):
if _shift:
x, y = int(round(event.xdata)), int(round(event.ydata))
segment_value = _current_segments[y, x]
if segment_value not in _selected_segments:
_selected_segments.add(segment_value)
_current_image[_current_segments == segment_value] = [255, 0, 0]
else:
_selected_segments.remove(segment_value)
_current_image[_current_segments == segment_value] = _original_image[_current_segments == segment_value]
_plt_img.set_data(_current_image)
plt.draw()
print(segment_value)
def on_key_press(event):
global _shift
if event.key == 'shift':
_shift = True
def on_key_release(event):
global _shift
if event.key == 'shift':
_shift = False
def select(image, segments):
global _selected_segments
global _current_segments
global _current_image
global _original_image
global _plt_img
_selected_segments = set()
_current_segments = segments
_current_image = np.copy(image)
_original_image = image
fig = plt.figure(f"Segmentation")
ax = fig.add_subplot(1, 1, 1)
_plt_img = ax.imshow(image)
fig.canvas.mpl_connect('button_press_event', on_click)
fig.canvas.mpl_connect('key_press_event', on_key_press)
fig.canvas.mpl_connect('key_release_event', on_key_release)
plt.show()
return _selected_segments
def mask_from_segments(segments, value):
mask = np.zeros(segments.shape, dtype="uint8")
mask[segments == value] = 255
return mask
def padded_image(image, segments, value):
mask = mask_from_segments(segments, value)
positions = np.transpose(mask.nonzero())
x, y, width, height = boundingRect(positions[:,::-1])
global_height, global_width, _ = image.shape
left_padding_x, top_padding_y = (img_width - width) // 2, (img_height - height) // 2
right_padding_x, bottom_padding_y = left_padding_x, top_padding_y
right_padding_x += (img_width - width) % 2
bottom_padding_y += (img_height - height) % 2
if top_padding_y > y:
return None
if left_padding_x > x:
return None
if bottom_padding_y > global_height - (y + height):
return None
if right_padding_x > global_width - (x + width):
return None
result_image = np.zeros((img_height, img_width, 4), dtype="float32")
# i is result_image's index, ii is original image's index
for i, ii in zip(range(img_height), range(y - top_padding_y, y + height + bottom_padding_y)):
for j, jj in zip(range(img_width), range(x - left_padding_x, x + width + right_padding_x)):
# Add a channel to whether each pixel belongs to the original segment
result_image[i, j] = np.array(list(image[ii, jj]) + [mask[ii, jj]], dtype="float32")
# returns a 4-channel image with dimensions (image_utils.img_width x image_utils.img_height)
return result_image
def padded_segments(image, segments, selection, mask=None):
padded_segments = []
segment_val = []
max_val = segments.max() + 1
for i in selection:
if mask is not None:
and_mask = np.logical_and(mask_from_segments(segments, i), mask)
if not and_mask.any():
continue
img = padded_image(image, segments, i)
if img is not None:
padded_segments.append(img)
segment_val.append(i)
print(f"Padding images [{int((i / max_val) * 100)}%]\r", end="")
print('\n')
return (np.array(padded_segments), segment_val)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--name", default="new")
args = parser.parse_args()
image_paths = os.listdir("inputs")
images = [io.imread(os.path.join("inputs", image_path)) for image_path in image_paths]
print(f"Found {len(images)} inputs")
output_path = os.path.join("datasets", args.name)
existing_segments = os.listdir(output_path)
if 'c0' in existing_segments:
false_index = existing_segments.index('c0')
true_index = len(existing_segments) - false_index
else:
false_index = len(existing_segments)
true_index = 0
print("Segmenting")
segments = [segment(image) for image in images]
for i in range(len(images)):
selection = select(images[i], segments[i])
true_padded_images, _ = padded_segments(images[i], segments[i], selection)
print(f"Saving {len(true_padded_images)} car images")
for img in true_padded_images:
# Can't save it as an image: it has an extra channel
with open(os.path.join(output_path, f"c{str(true_index)}"), 'wb') as save_file:
np.save(save_file, img)
true_index += 1
not_selection = set(range(segments[i].max())) - selection
false_padded_images, _ = padded_segments(images[i], segments[i], not_selection)
print(f"Saving {len(false_padded_images)} non-car images")
for img in false_padded_images:
with open(os.path.join(output_path, str(false_index)), 'wb') as save_file:
np.save(save_file, img)
false_index += 1
os.rename(os.path.join("inputs", image_paths[i]), os.path.join("processed", image_paths[i])) | 30.409091 | 112 | 0.734865 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 726 | 0.13565 |
7b842e0e690c82590e6a6533bd9a6cab6937e48f | 1,797 | py | Python | benten/code/workflowgraph.py | stain/benten | 40440d36025e0b27b8dfa6752aa76b15e7abc0d1 | [
"Apache-2.0"
]
| null | null | null | benten/code/workflowgraph.py | stain/benten | 40440d36025e0b27b8dfa6752aa76b15e7abc0d1 | [
"Apache-2.0"
]
| null | null | null | benten/code/workflowgraph.py | stain/benten | 40440d36025e0b27b8dfa6752aa76b15e7abc0d1 | [
"Apache-2.0"
]
| null | null | null | """Parse CWL and create a JSON file describing the workflow. This dictionary
is directly suitable for display by vis.js, but can be parsed for any other
purpose."""
# Copyright (c) 2019 Seven Bridges. See LICENSE
from ..cwl.lib import ListOrMap
def cwl_graph(cwl: dict):
graph = {
"nodes": [],
"edges": [],
"lines": {}
}
inputs = ListOrMap(cwl.get("inputs", {}), key_field="id", problems=[])
_add_nodes(graph, inputs, "inputs")
steps = ListOrMap(cwl.get("steps", {}), key_field="id", problems=[])
_add_nodes(graph, steps, "steps")
outputs = ListOrMap(cwl.get("outputs", {}), key_field="id", problems=[])
_add_nodes(graph, outputs, "outputs")
_add_edges(graph, inputs, outputs, steps)
return graph
def _add_nodes(graph, grp, grp_id):
for k, v in grp.as_dict.items():
graph["nodes"] += [{
"id": k,
"label": v.get("label", k) if isinstance(v, dict) else k,
"title": v.get("label", k) if isinstance(v, dict) else k,
"group": grp_id
}]
graph["lines"][k] = grp.get_range_for_value(k).start.line
def _add_edges(graph, inputs, outputs, steps):
for k, v in steps.as_dict.items():
_to = k
for _, prt in ListOrMap(v.get("in", {}), key_field="id", problems=[]).as_dict.items():
graph["edges"] += [{"from": _f, "to": _to} for _f in _get_source_step(prt, "source")]
for k, v in outputs.as_dict.items():
_to = k
graph["edges"] += [{"from": _f, "to": _to} for _f in _get_source_step(v, "outputSource")]
def _get_source_step(v, key):
src = v.get(key) if isinstance(v, dict) else v
if not isinstance(src, list):
src = [src]
return [s.split("/")[0] for s in src if isinstance(s, str)]
| 29.95 | 97 | 0.590428 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 413 | 0.229827 |
7b87f62b10bc328cd7870120d84ededf89b99acb | 21 | py | Python | data/studio21_generated/introductory/4853/starter_code.py | vijaykumawat256/Prompt-Summarization | 614f5911e2acd2933440d909de2b4f86653dc214 | [
"Apache-2.0"
]
| null | null | null | data/studio21_generated/introductory/4853/starter_code.py | vijaykumawat256/Prompt-Summarization | 614f5911e2acd2933440d909de2b4f86653dc214 | [
"Apache-2.0"
]
| null | null | null | data/studio21_generated/introductory/4853/starter_code.py | vijaykumawat256/Prompt-Summarization | 614f5911e2acd2933440d909de2b4f86653dc214 | [
"Apache-2.0"
]
| null | null | null | def double_char(s):
| 10.5 | 19 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7b882a00a99da3e2e17e41e9f577ca3003e8abd3 | 2,561 | py | Python | app/core/models.py | fxavier/abt-epts | 021a8140db32afba106a7a9e122b98452d88c225 | [
"MIT"
]
| null | null | null | app/core/models.py | fxavier/abt-epts | 021a8140db32afba106a7a9e122b98452d88c225 | [
"MIT"
]
| null | null | null | app/core/models.py | fxavier/abt-epts | 021a8140db32afba106a7a9e122b98452d88c225 | [
"MIT"
]
| null | null | null | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new user"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new super user"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that suppors using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
class Provincia(models.Model):
name = models.CharField(max_length=200)
def __str__(self):
return self.name
class Distrito(models.Model):
"""Model definition for District."""
# TODO: Define fields here
name = models.CharField(max_length=100)
provincia = models.ForeignKey('Provincia', on_delete=models.CASCADE)
def __str__(self):
"""Unicode representation of District."""
return self.name
class UnidadeSanitaria(models.Model):
"""Model definition for HealthFacility."""
id = models.CharField(max_length=255, primary_key=True)
name = models.CharField(max_length=255)
# openmrs_name = models.CharField(max_length=255, null=True, blank=True)
distrito = models.ForeignKey('Distrito', on_delete=models.CASCADE)
class Meta:
"""Meta definition for HealthFacility."""
verbose_name = 'Unidade Sanitaria'
verbose_name_plural = 'Unidades Sanitarias'
def __str__(self):
"""Unicode representation of HealthFacility."""
return self.name
class Livro(models.Model):
tipo = models.CharField(max_length=100)
numero = models.IntegerField()
pagina = models.IntegerField()
linha = models.IntegerField()
def __str__(self):
return f'{self.tipo} {self.numero}'
| 30.488095 | 76 | 0.673565 | 2,342 | 0.914487 | 0 | 0 | 0 | 0 | 0 | 0 | 577 | 0.225303 |
7b8b21db4d1b5bb95da77aaaeac80ad479fa1496 | 477 | py | Python | reviews/migrations/0006_review_no_login.py | moshthepitt/answers | 9febf465a18c41e7a48130e987a8fd64ceae3358 | [
"MIT"
]
| 6 | 2015-07-28T09:36:39.000Z | 2020-08-11T17:15:18.000Z | reviews/migrations/0006_review_no_login.py | Swifilaboroka/answers | 9febf465a18c41e7a48130e987a8fd64ceae3358 | [
"MIT"
]
| 8 | 2015-12-17T22:56:16.000Z | 2022-01-13T00:43:16.000Z | reviews/migrations/0006_review_no_login.py | Swifilaboroka/answers | 9febf465a18c41e7a48130e987a8fd64ceae3358 | [
"MIT"
]
| 3 | 2017-07-15T12:13:03.000Z | 2022-02-02T10:04:10.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reviews', '0005_auto_20160203_1247'),
]
operations = [
migrations.AddField(
model_name='review',
name='no_login',
field=models.BooleanField(default=False, help_text='Is this review open to the world?', verbose_name='No Login'),
),
]
| 23.85 | 125 | 0.628931 | 368 | 0.771488 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.251572 |
7b8c7e8a4741c68754a4c124370afe960c3a82b1 | 2,191 | py | Python | qbism/kraus.py | heyredhat/qbism | 192333b725495c6b66582f7a7b0b4c18a2f392a4 | [
"Apache-2.0"
]
| 2 | 2021-01-27T18:39:12.000Z | 2021-02-01T06:57:02.000Z | qbism/kraus.py | heyredhat/qbism | 192333b725495c6b66582f7a7b0b4c18a2f392a4 | [
"Apache-2.0"
]
| null | null | null | qbism/kraus.py | heyredhat/qbism | 192333b725495c6b66582f7a7b0b4c18a2f392a4 | [
"Apache-2.0"
]
| null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: 04kraus.ipynb (unless otherwise specified).
__all__ = ['apply_kraus', 'partial_trace_kraus', 'povm_map']
# Cell
import numpy as np
import qutip as qt
# Cell
def apply_kraus(dm, kraus):
r"""
Applies a Kraus map to a density matrix $\rho$. The Kraus map consists in some number of operators
satisfying $\sum_{i} \hat{K}_{i}^{\dagger}\hat{K}_{i} = \hat{I}$. $\rho$ is transformed via:
$$\rho \rightarrow \sum_{i} \hat{K}_{i}\rho\hat{K}_{i}^{\dagger} $$
"""
return sum([kraus[j]*dm*kraus[j].dag() for j in range(len(kraus))])
# Cell
def partial_trace_kraus(keep, dims):
r"""
Constructs the Kraus map corresponding to the partial trace. Takes `keep` which is a single index or list of indices denoting
subsystems to keep, and a list `dims` of dimensions of the overall tensor product Hilbert space.
For illustration, to trace over the $i^{th}$ subsystem of $n$, one would construct Kraus operators:
$$ \hat{K}_{i} = I^{\otimes i - 1} \otimes \langle i \mid \otimes I^{\otimes n - i}$$.
"""
if type(keep) == int:
keep = [keep]
trace_over = [i for i in range(len(dims)) if i not in keep]
indices = [{trace_over[0]:t} for t in range(dims[trace_over[0]])]
for i in trace_over[1:]:
new_indices = []
for t in range(dims[i]):
new_indices.extend([{**j, **{i: t}} for j in indices])
indices = new_indices
return [qt.tensor(*[qt.identity(d) if i in keep else qt.basis(d, index[i]).dag() for i, d in enumerate(dims)]) for index in indices]
# Cell
def povm_map(kraus, A, B=None):
r"""
Represents a Kraus map on Qbist probability vectors. Takes a list of Kraus operators, a POVM $A$ on the initial Hilbert space,
and a POVM $B$ on the final Hilbert space. If $B$ isn't provided, it's assumed to be the same as $A$. Then the matrix elements of the map are:
$$K_{j, i} = tr( \mathbb{K}(\frac{\hat{A}_{i}}{tr \hat{A}_{i}})\hat{B}_{j} ) $$
Where $\mathbb{K}(\hat{O})$ denotes the Kraus map applied to $O$.
"""
B = B if type(B) != type(None) else A
return np.array([[(apply_kraus(a/a.tr(), kraus)*b).tr() for a in A] for b in B]).real | 42.960784 | 146 | 0.64126 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,314 | 0.599726 |
7b8d3bfd9dda43412dd61ee3a956e43a5295cf1f | 78 | py | Python | Python Book/12. Complex Loops/05_sequence_2k_plus_one/sequence_2k_plus_one.py | alexanderivanov2/Softuni-Software-Engineering | 8adb96f445f1da17dbb6eded9e9594319154c7e7 | [
"MIT"
]
| null | null | null | Python Book/12. Complex Loops/05_sequence_2k_plus_one/sequence_2k_plus_one.py | alexanderivanov2/Softuni-Software-Engineering | 8adb96f445f1da17dbb6eded9e9594319154c7e7 | [
"MIT"
]
| null | null | null | Python Book/12. Complex Loops/05_sequence_2k_plus_one/sequence_2k_plus_one.py | alexanderivanov2/Softuni-Software-Engineering | 8adb96f445f1da17dbb6eded9e9594319154c7e7 | [
"MIT"
]
| null | null | null | n = int(input())
num = 1
while num <= n:
print(num)
num = num * 2 + 1 | 13 | 21 | 0.487179 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7b8dd1f4d57db9568b64c88454ba16b6a105aa77 | 4,129 | py | Python | run_all_benchmark_functions.py | ntienvu/KnowingOptimumValue_BO | 42225cb9d61c1225bd757fe9dd02834a0bc7a3e6 | [
"MIT"
]
| 14 | 2020-06-30T00:36:14.000Z | 2022-01-11T13:15:53.000Z | run_all_benchmark_functions.py | ntienvu/KnowingOptimumValue_BO | 42225cb9d61c1225bd757fe9dd02834a0bc7a3e6 | [
"MIT"
]
| null | null | null | run_all_benchmark_functions.py | ntienvu/KnowingOptimumValue_BO | 42225cb9d61c1225bd757fe9dd02834a0bc7a3e6 | [
"MIT"
]
| 2 | 2020-10-17T15:27:06.000Z | 2021-02-27T10:34:04.000Z | import sys
sys.path.insert(0,'..')
sys.path.insert(0,'../..')
from bayes_opt import BayesOpt,BayesOpt_KnownOptimumValue
import numpy as np
#from bayes_opt import auxiliary_functions
from bayes_opt import functions
from bayes_opt import utilities
import warnings
#from bayes_opt import acquisition_maximization
import sys
import itertools
import matplotlib.pyplot as plt
np.random.seed(6789)
warnings.filterwarnings("ignore")
counter = 0
myfunction_list=[]
#myfunction_list.append(functions.sincos())
#myfunction_list.append(functions.branin())
#myfunction_list.append(functions.hartman_3d())
#myfunction_list.append(functions.ackley(input_dim=5))
myfunction_list.append(functions.alpine1(input_dim=5))
#myfunction_list.append(functions.hartman_6d())
#myfunction_list.append(functions.gSobol(a=np.array([1,1,1,1,1])))
#myfunction_list.append(functions.gSobol(a=np.array([1,1,1,1,1,1,1,1,1,1])))
acq_type_list=[]
temp={}
temp['name']='erm' # expected regret minimization
temp['IsTGP']=0 # recommended to use tgp for ERM
acq_type_list.append(temp)
temp={}
temp['name']='cbm' # confidence bound minimization
temp['IsTGP']=1 # recommended to use tgp for CBM
#acq_type_list.append(temp)
#temp={}
#temp['name']='kov_mes' # MES+f*
#temp['IsTGP']=0 # we can try 'tgp'
#acq_type_list.append(temp)
temp={}
temp['name']='kov_ei' # this is EI + f*
temp['IsTGP']=0 # we can try 'tgp' by setting it =1
#acq_type_list.append(temp)
temp={}
temp['name']='ucb' # vanilla UCB
temp['IsTGP']=0 # we can try 'tgp' by setting it =1
#acq_type_list.append(temp)
temp={}
temp['name']='ei' # vanilla EI
temp['IsTGP']=0 # we can try 'tgp' by setting it =1
#acq_type_list.append(temp)
temp={}
temp['name']='random' # vanilla EI
temp['IsTGP']=0 # we can try 'tgp' by setting it =1
#acq_type_list.append(temp)
fig=plt.figure()
color_list=['r','b','k','m','c','g','o']
marker_list=['s','x','o','v','^','>','<']
for idx, (myfunction,acq_type,) in enumerate(itertools.product(myfunction_list,acq_type_list)):
print("=====================func:",myfunction.name)
print("==================acquisition type",acq_type)
IsTGP=acq_type['IsTGP']
acq_name=acq_type['name']
nRepeat=10
ybest=[0]*nRepeat
MyTime=[0]*nRepeat
MyOptTime=[0]*nRepeat
marker=[0]*nRepeat
bo=[0]*nRepeat
[0]*nRepeat
for ii in range(nRepeat):
if 'kov' in acq_name or acq_name == 'erm' or acq_name == 'cbm':
bo[ii]=BayesOpt_KnownOptimumValue(myfunction.func,myfunction.bounds,myfunction.fstar, \
acq_name,IsTGP,verbose=1)
else:
bo[ii]=BayesOpt(myfunction.func,myfunction.bounds,acq_name,verbose=1)
ybest[ii],MyTime[ii]=utilities.run_experiment(bo[ii],n_init=3*myfunction.input_dim,\
NN=10*myfunction.input_dim,runid=ii)
MyOptTime[ii]=bo[ii].time_opt
print("ii={} BFV={:.3f}".format(ii,myfunction.ismax*np.max(ybest[ii])))
Score={}
Score["ybest"]=ybest
Score["MyTime"]=MyTime
Score["MyOptTime"]=MyOptTime
utilities.print_result_sequential(bo,myfunction,Score,acq_type)
## plot the result
# process the result
y_best_sofar=[0]*len(bo)
for uu,mybo in enumerate(bo):
y_best_sofar[uu]=[ (myfunction.fstar - np.max(mybo.Y_ori[:ii+1]) ) for ii in range(len(mybo.Y_ori))]
y_best_sofar[uu]=y_best_sofar[uu][3*myfunction.input_dim:] # remove the random phase for plotting purpose
y_best_sofar=np.asarray(y_best_sofar)
myxaxis=range(y_best_sofar.shape[1])
plt.errorbar(myxaxis,np.mean(y_best_sofar,axis=0), np.std(y_best_sofar,axis=0)/np.sqrt(nRepeat),
label=acq_type['name'],color=color_list[idx],marker=marker_list[idx])
plt.ylabel("Simple Regret",fontsize=14)
plt.xlabel("Iterations",fontsize=14)
plt.legend(prop={'size': 14})
strTitle="{:s} D={:d}".format(myfunction.name,myfunction.input_dim)
plt.title(strTitle,fontsize=18)
| 25.80625 | 125 | 0.654154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,467 | 0.355292 |
7b8e2e97334a2cce55aad103330d605ea89ea8e4 | 2,258 | py | Python | coursesical/ical.py | cdfmlr/coursesical | d027db60dca6bcf543a74d3a6dd635fd8d1ee5ba | [
"MIT"
]
| 2 | 2021-03-19T02:23:24.000Z | 2021-12-22T15:01:46.000Z | coursesical/ical.py | cdfmlr/coursesical | d027db60dca6bcf543a74d3a6dd635fd8d1ee5ba | [
"MIT"
]
| null | null | null | coursesical/ical.py | cdfmlr/coursesical | d027db60dca6bcf543a74d3a6dd635fd8d1ee5ba | [
"MIT"
]
| null | null | null | import icalendar
import uuid
from datetime import datetime
import pytz
cst = pytz.timezone('Asia/Shanghai')
class Calendar(icalendar.Calendar):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add('prodid', '-//CDFMLR//coursesical//CN')
self.add('VERSION', '2.0')
self.add('X-WR-CALNAME', 'coursesical')
self.add('X-APPLE-CALENDAR-COLOR', '#ff5a1d')
self.add('X-WR-TIMEZONE', 'Asia/Shanghai')
def add_event(self, event):
self.add_component(event)
# def fCalendar():
# cal = icalendar.Calendar()
# cal.add('prodid', '-//CDFMLR//coursesical//CN')
# cal.add('VERSION', '2.0')
# cal.add('X-WR-CALNAME', 'coursesical')
# cal.add('X-APPLE-CALENDAR-COLOR', '#ff5a1d')
# cal.add('X-WR-TIMEZONE', 'Asia/Shanghai')
# return cal
class Event(icalendar.Event):
def __init__(self,
summary: str, start: datetime, end: datetime, location: str, description: str,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.add('SUMMARY', summary)
self.add('LOCATION', location)
self.add('DESCRIPTION', description)
self.add('DTSTART', datetime(start.year, start.month, start.day,
start.hour, start.minute, start.second, tzinfo=cst))
self.add('DTEND', datetime(end.year, end.month, end.day,
end.hour, end.minute, end.second, tzinfo=cst))
self.add('SEQUENCE', '0')
self.add('UID', str(uuid.uuid3(uuid.NAMESPACE_DNS, f'{summary}{str(uuid.uuid4())}')))
def alarm(self, before_minutes: int):
alarm = icalendar.Alarm()
alarm.add('UID', str(uuid.uuid3(
uuid.NAMESPACE_DNS,
str(self["summary"]) + str(uuid.uuid4()) + str(before_minutes)
)))
alarm.add('ACTION', 'DISPLAY')
alarm['TRIGGER'] = f'-PT{before_minutes}M'
alarm.add('DESCRIPTION', '提醒事项')
self.add_component(alarm)
return self
def weekly_repeat(self, until: datetime):
self.add('rrule', {'freq': 'WEEKLY',
'INTERVAL': 1,
'UNTIL': until})
return self
| 31.361111 | 95 | 0.569088 | 1,847 | 0.815093 | 0 | 0 | 0 | 0 | 0 | 0 | 671 | 0.296117 |
7b8f6c6edc977e548344a0694966296691f0f034 | 816 | py | Python | minesweeper/test/message_tests.py | newnone/Multiplayer-Minesweeper | 054adc4a14a710dfdd479791b9d1d40df061211c | [
"MIT"
]
| null | null | null | minesweeper/test/message_tests.py | newnone/Multiplayer-Minesweeper | 054adc4a14a710dfdd479791b9d1d40df061211c | [
"MIT"
]
| null | null | null | minesweeper/test/message_tests.py | newnone/Multiplayer-Minesweeper | 054adc4a14a710dfdd479791b9d1d40df061211c | [
"MIT"
]
| null | null | null | #!/usr/bin/python3.2
import unittest
from minesweeper.message import *
class UTSMessageTest(unittest.TestCase):
def test_parse_infer_type(self):
"""
Instantiates one object for every concrete subclass of UTSMessage using the type-inferring
factory method parse_infer_type(), checking that the instance returned is of the expected
type.
"""
factory_strings = ("look", "dig 5 2", "flag 6 2", "deflag 3 6",
"help", "bye")
message_classes = UTSMessage.message_types
for string, mclass in zip(factory_strings, message_classes):
o = UTSMessage.parse_infer_type(string)
self.assertIsInstance(
o,
mclass
)
if __name__ == "__main__":
unittest.main()
| 27.2 | 98 | 0.61152 | 692 | 0.848039 | 0 | 0 | 0 | 0 | 0 | 0 | 304 | 0.372549 |
7b917e46393f05ca669d8af2e30bf77af89da6ab | 1,640 | py | Python | setup.py | RunnerPyzza/RunnerPyzza | 47f46339ab510635120613ac683f0be462f54ca4 | [
"BSD-2-Clause-FreeBSD"
]
| null | null | null | setup.py | RunnerPyzza/RunnerPyzza | 47f46339ab510635120613ac683f0be462f54ca4 | [
"BSD-2-Clause-FreeBSD"
]
| null | null | null | setup.py | RunnerPyzza/RunnerPyzza | 47f46339ab510635120613ac683f0be462f54ca4 | [
"BSD-2-Clause-FreeBSD"
]
| null | null | null | from distutils.core import setup
from distutils.command.build_py import build_py
import os
import shutil
import stat
from RunnerPyzza import __version__
class runner_build_py(build_py):
def runner_install(self):
print "RunnerPyzza basic configuration ..."
try:
os.mkdir("/etc/runnerpyzza/")
except:
pass
try:
os.mkdir("/etc/runnerpyzza/log")
os.system("chmod 777 /etc/runnerpyzza/log")
except:
pass
shutil.copy2("RPdaemon.conf", "/etc/runnerpyzza/RPdaemon.conf")
shutil.copy2("runnerpyzza", "/etc/runnerpyzza/runnerpyzza")
try:
os.system("chmod 755 /etc/runnerpyzza/runnerpyzza")
except:
pass
print "RunnerPyzza basic configuration ... Done!"
def run(self):
self.runner_install()
build_py.run(self) #run superclass method
setup(
name = 'RunnerPyzza',
version = __version__,
author = 'Marco Galardini - Emilio Potenza',
author_email = '[email protected] - [email protected]',
packages = ['RunnerPyzza','RunnerPyzza.ClientCommon', 'RunnerPyzza.Common', 'RunnerPyzza.LauncherManager', 'RunnerPyzza.ServerCommon'],
scripts = ['RPdaemon','RPlauncher','RPaddservice','RPadduser','RPpreparedir','RPsshkeys'],
#url = 'http://RunnerPyzza',
license = 'LICENSE.txt',
description = 'An easy to use queue system for laboratory networks',
long_description = open('README.txt').read(),
install_requires = ["paramiko >= 1.7.7.2", "argparse >= 1.1"],
cmdclass = {"build_py" : runner_build_py}
)
| 33.469388 | 140 | 0.646951 | 757 | 0.461585 | 0 | 0 | 0 | 0 | 0 | 0 | 747 | 0.455488 |
7b92c51e95df7d865e1969f7a3d0f8febc341130 | 1,142 | py | Python | recursion/0043_string_multiplication.py | MartinMa28/Algorithms_review | 3f2297038c00f5a560941360ca702e6868530f34 | [
"MIT"
]
| null | null | null | recursion/0043_string_multiplication.py | MartinMa28/Algorithms_review | 3f2297038c00f5a560941360ca702e6868530f34 | [
"MIT"
]
| null | null | null | recursion/0043_string_multiplication.py | MartinMa28/Algorithms_review | 3f2297038c00f5a560941360ca702e6868530f34 | [
"MIT"
]
| null | null | null | class Solution:
def _equalize_length(self, *args) -> tuple:
max_len = max(map(len, args))
return tuple(map(lambda x: x.zfill(max_len), args))
def _add(self, *args) -> str:
return str(sum(map(int, args)))
def _sub(self, num1: str, num2: str) -> str:
return str(int(num1) - int(num2))
def multiply(self, num1: str, num2: str) -> str:
num1, num2 = self._equalize_length(num1, num2)
n = len(num1)
if n == 1:
# multiply by single digit
return str(int(num1) * int(num2))
num1_h = num1[: n // 2]
num1_l = num1[n // 2:]
num2_h = num2[: n // 2]
num2_l = num2[n // 2:]
num1_h_num2_h = self.multiply(num1_h, num2_h)
num1_l_num2_l = self.multiply(num1_l, num2_l)
combo = self._sub(self.multiply(self._add(num1_h, num1_l), self._add(num2_h, num2_l)), self._add(num1_h_num2_h, num1_l_num2_l))
return self._add(num1_h_num2_h + '0' * 2 * (n - n // 2), combo + '0' * (n - n // 2), num1_l_num2_l)
if __name__ == "__main__":
solu = Solution()
print(solu.multiply('123', '456')) | 27.853659 | 135 | 0.565674 | 1,049 | 0.918564 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.045534 |
7b941b7e926180fee64edecc4bb32c18fe4b75b2 | 220 | py | Python | galaxy/exceptions.py | jmchilton/pulsar | 783b90cf0bce893a11c347fcaf6778b98e0bb062 | [
"Apache-2.0"
]
| 1 | 2016-08-17T06:36:03.000Z | 2016-08-17T06:36:03.000Z | galaxy/exceptions.py | jmchilton/pulsar | 783b90cf0bce893a11c347fcaf6778b98e0bb062 | [
"Apache-2.0"
]
| null | null | null | galaxy/exceptions.py | jmchilton/pulsar | 783b90cf0bce893a11c347fcaf6778b98e0bb062 | [
"Apache-2.0"
]
| null | null | null | """
Here for compat. with objectstore.
"""
class ObjectNotFound(Exception):
""" Accessed object was not found """
pass
class ObjectInvalid(Exception):
""" Accessed object store ID is invalid """
pass
| 15.714286 | 47 | 0.663636 | 171 | 0.777273 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.554545 |
7b95c23e30524cab22ee7e5bbccde48a49bfd895 | 9,432 | py | Python | fluid/node.py | quantmind/aio-fluid | e75f91646ac9a0c9ca5679bda12319c208166d64 | [
"BSD-3-Clause"
]
| null | null | null | fluid/node.py | quantmind/aio-fluid | e75f91646ac9a0c9ca5679bda12319c208166d64 | [
"BSD-3-Clause"
]
| 21 | 2021-08-13T06:11:55.000Z | 2022-03-18T06:13:05.000Z | fluid/node.py | quantmind/aio-fluid | e75f91646ac9a0c9ca5679bda12319c208166d64 | [
"BSD-3-Clause"
]
| null | null | null | import asyncio
import inspect
import logging
import os
import random
import time
import uuid
from abc import ABC, abstractmethod
from functools import cached_property, wraps
from logging import Logger
from typing import Any, Callable, Dict, List, Optional, Tuple
from aiohttp.client import ClientConnectionError, ClientConnectorError
from aiohttp.web import Application, GracefulExit
from .log import get_logger
from .utils import close_task, dot_name, underscore
class Id:
@classmethod
def name(cls) -> str:
"""My name"""
return underscore(cls.__name__)
@cached_property
def uid(self) -> str:
"""My unique ID"""
return uuid.uuid4().hex
@classmethod
def create_logger(cls, logger: Optional[logging.Logger] = None) -> logging.Logger:
return logger or get_logger(dot_name(cls.name()))
class IdLog(Id):
@cached_property
def logger(self):
return self.create_logger()
class NodeBase(ABC, Id):
exit_lag: int = 1
app: Optional[Application] = None
async def start_app(self, app: Application) -> None:
"""Start application"""
self.app = app
await self.start()
async def close_app(self, app: Application) -> None:
await self.close()
@abstractmethod
def is_running(self) -> bool:
"""True if the Node is running"""
@abstractmethod
async def start(self) -> None:
"""called when the node worker has started"""
pass
@abstractmethod
async def close(self) -> None:
"""called when the node worker closed"""
pass
async def setup(self) -> None:
"""Called by the :meth:`.start` method when the worker starts
This can be optionally implemented by derived classes
"""
pass
async def teardown(self) -> None:
"""Called my :meth:`close` when the worker is stopping.
This can be optionally implemented by derived classes
"""
pass
async def done(self) -> None:
try:
await self.teardown()
except Exception:
self.logger.exception("unhandled exception while tear down worker")
async def system_exit(self) -> None:
"""Gracefully exiting the app if possible"""
if self.is_running():
await self.done()
self.system_exit_sync()
def system_exit_sync(self) -> None:
"""Exit the app"""
self.logger.warning("bailing out!")
asyncio.get_event_loop().call_later(self.exit_lag, self._exit)
def _exit(self) -> None: # pragma: no cover
if os.getenv("PYTHON_ENV") != "test":
raise GracefulExit
class NodeWorker(NodeBase):
def __init__(self, *, logger: Optional[Logger] = None) -> None:
self.logger: Logger = self.create_logger(logger)
self._worker = None
@property
def debug(self) -> bool:
return self.logger.isEnabledFor(logging.DEBUG)
# FOR DERIVED CLASSES
async def work(self) -> None:
"""Main work coroutine, this is where you define the asynchronous loop.
Must be implemented by derived classes
"""
raise NotImplementedError
# API
def is_running(self) -> bool:
"""True if the Node is running"""
return bool(self._worker)
async def start(self) -> None:
"""Start the node"""
assert not self.is_running(), "Node already running - cannot start"
await self.setup()
self._worker = asyncio.ensure_future(self._work())
async def close(self, close_worker: bool = True) -> None:
if self._worker:
self.logger.info("closing")
worker = self._worker
self._worker = None
if close_worker:
await close_task(worker, self.done)
else:
await self.done()
self.logger.warning("closed")
# INTERNAL
async def _work(self) -> None:
self.logger.warning("started")
try:
await self.work()
except asyncio.CancelledError:
pass
except Exception:
self.logger.exception("unhandled exception in worker")
await self.system_exit()
else:
await self.close(close_worker=False)
class WorkerApplication(Dict[str, Any]):
def __init__(self):
super().__init__()
self.on_startup = []
self.on_shutdown = []
async def startup(self):
for on_startup in self.on_startup:
await on_startup(self)
async def shutdown(self):
for on_shutdown in self.on_shutdown:
await on_shutdown(self)
class NodeWorkers(NodeBase):
def __init__(self, *workers: NodeWorker, logger: Optional[Logger] = None) -> None:
self.logger: Logger = self.create_logger(logger)
self._closing: bool = False
self._workers: List[NodeBase] = list(workers)
@property
def debug(self) -> bool:
return self.logger.isEnabledFor(logging.DEBUG)
def is_running(self) -> bool:
return isinstance(self._workers, tuple)
def is_closing(self) -> bool:
return self._closing
def add_workers(self, *workers: NodeBase) -> None:
if self.is_running():
raise RuntimeError("Cannot add workers when started")
self._workers.extend(workers)
async def start(self) -> None:
await self.setup()
self.logger.warning("started")
workers = self._freeze_workers()
await asyncio.gather(*[w.start_app(self.app) for w in workers])
async def close(self) -> None:
if self.is_running():
self._closing = True
await asyncio.gather(*[w.close_app(self.app) for w in self._workers])
await self.teardown()
def _freeze_workers(self) -> Tuple[NodeBase, ...]:
if isinstance(self._workers, tuple):
raise RuntimeError("worker already started")
self._workers = tuple(self._workers)
return self._workers
class Node(NodeWorker):
"""A nodeworker with an heartbeat work loop and ability to publish
messages into a pubsub
"""
heartbeat: float = 1
ticks: int = 0
async def tick(self) -> None:
"""called at every iteration in the worker"""
pass
async def work(self) -> None:
while True:
start = time.monotonic()
self.ticks += 1
await self.tick()
dt = time.monotonic() - start
await asyncio.sleep(max(self.heartbeat - dt, 0))
class Consumer(NodeWorker):
def __init__(
self,
process_message,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.process_message = process_message
self._message_queue: Optional[asyncio.Queue] = None
def qsize(self) -> int:
return 0 if self._message_queue is None else self._message_queue.qsize()
async def setup(self) -> None:
self._message_queue = asyncio.Queue()
async def work(self):
while self.is_running():
message = await self._message_queue.get()
await self.process_message(message)
await asyncio.sleep(0)
def submit(self, message) -> None:
if self._message_queue is None:
raise RuntimeError("cannot submit to a non running consumer")
self._message_queue.put_nowait(message)
class Worker(NodeWorker):
def __init__(
self,
work: Callable[[], None],
logger: Optional[Logger] = None,
) -> None:
super().__init__(logger=logger)
self.work = work
class TickWorker(Node):
def __init__(
self,
tick: Callable[[], None],
heartbeat: float = 1,
logger: Optional[Logger] = None,
) -> None:
super().__init__(logger=logger)
self.heartbeat = heartbeat
self.tick = tick
class every:
def __init__(self, seconds: float, noise: float = 0) -> None:
self.seconds = seconds
self.noise = min(noise, seconds)
self.last = 0
self.gap = self._gap()
self.ticks = 0
def __call__(self, method):
method.every = self
@wraps(method)
async def _(node, *args) -> None:
now = time.time()
if now - self.last > self.gap:
self.last = now
self.gap = self._gap()
self.ticks += 1
try:
await method(node, *args)
except (ClientConnectionError, ClientConnectorError) as exc:
node.logger.error(str(exc))
return _
def _gap(self) -> float:
return self.seconds + self.noise * (random.random() - 0.5)
def on_error_exit(
method: Callable[[NodeBase, Any], None]
) -> Callable[[NodeBase, Any], None]:
@wraps(method)
def sync_wrap(node: NodeBase, *args) -> None:
try:
method(node, *args)
except Exception:
node.logger.exception("unhandled exception, bailing out!")
node.system_exit_sync()
@wraps(method)
async def async_wrap(node: NodeBase, *args) -> None:
try:
await method(node, *args)
except Exception:
node.logger.exception("unhandled exception, bailing out!")
await node.system_exit()
return async_wrap if inspect.iscoroutinefunction(method) else sync_wrap
| 28.155224 | 86 | 0.603584 | 8,244 | 0.874046 | 0 | 0 | 1,860 | 0.197201 | 4,160 | 0.441052 | 1,231 | 0.130513 |
7b967b35c19a8e35142f9fb160d57122b85d9056 | 860 | py | Python | python/testcase.py | AurySystem/SYAML | 7bc6e6cae023bfb8c3f2f15f0ce9d3618f879593 | [
"MIT"
]
| null | null | null | python/testcase.py | AurySystem/SYAML | 7bc6e6cae023bfb8c3f2f15f0ce9d3618f879593 | [
"MIT"
]
| null | null | null | python/testcase.py | AurySystem/SYAML | 7bc6e6cae023bfb8c3f2f15f0ce9d3618f879593 | [
"MIT"
]
| null | null | null | import syaml
testcase = """---
key:
key: f
key2: l
nest:
inner: g
nest2:
nestted: 3
inner2: s
outnest: 3
ha: g
je: r
---
key: value
a_list:
- itema
- listlist:
- itemitem
- itemb
- key1: bweh
key2: bweh
key3: bweh
key4: bweh
- innerList:
- innerItem
- indict: reh
rar: dd
sublist:
- iteml
- itemc
-
- itm
- [44,55,66,"7t","8t","eeee"]
- ohno
- "test"
- "ending": obj
key: last of inner
- aa: aaa
- lastitem
anotherkey: value
...
"""
a = syaml.load(testcase)
print(a)
depth = 12
def recurse(dict):
global depth
depth -= 1 #will this be a problem?
bleh = {}
if depth == 0:
return
dict.update({"keys":recurse(bleh)})
return dict
#a[0] = recurse(a[0])
b = syaml.dump(2,a)
print(b)
print(syaml.load(b)) | 14.576271 | 37 | 0.522093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 612 | 0.711628 |
7b98ab75092f0df028f96b2d93df9ca2c2ab75d6 | 478 | py | Python | lib/csvtools.py | mtyates/scrapers | 1fe55314b1235a971a436a8a17f05cea22b40f49 | [
"Apache-2.0"
]
| null | null | null | lib/csvtools.py | mtyates/scrapers | 1fe55314b1235a971a436a8a17f05cea22b40f49 | [
"Apache-2.0"
]
| null | null | null | lib/csvtools.py | mtyates/scrapers | 1fe55314b1235a971a436a8a17f05cea22b40f49 | [
"Apache-2.0"
]
| 1 | 2021-12-20T16:55:50.000Z | 2021-12-20T16:55:50.000Z | #!/usr/bin/env python
import os
import sys
def dict_to_csv(comps, filename):
## print column headings then all attributes for each company
f = open(filename, 'wb')
columns = [x for x in comps[comps.keys()[0]].keys() if x != 'name']
columns = ['name'] + columns
f.write(','.join(columns) + '\n')
for k,v in comps.items():
for column in columns:
f.write('"' + v[column] + '"' + ',')
f.write('\n')
f.close()
| 22.761905 | 71 | 0.546025 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.246862 |
7b9a53fe727088aa66ec964d3fe2b9eeb158dba7 | 543 | py | Python | dashboard/migrations/0016_auto_20200222_2336.py | BDALab/GENEActiv-sleep-analyses-system | f0458de041153f2dee240a53571149827de00a2e | [
"MIT"
]
| null | null | null | dashboard/migrations/0016_auto_20200222_2336.py | BDALab/GENEActiv-sleep-analyses-system | f0458de041153f2dee240a53571149827de00a2e | [
"MIT"
]
| null | null | null | dashboard/migrations/0016_auto_20200222_2336.py | BDALab/GENEActiv-sleep-analyses-system | f0458de041153f2dee240a53571149827de00a2e | [
"MIT"
]
| null | null | null | # Generated by Django 2.2.5 on 2020-02-22 22:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0015_auto_20200222_2200'),
]
operations = [
migrations.RemoveField(
model_name='subject',
name='handedness',
),
migrations.RemoveField(
model_name='subject',
name='height',
),
migrations.RemoveField(
model_name='subject',
name='weight',
),
]
| 20.884615 | 49 | 0.546961 | 458 | 0.843462 | 0 | 0 | 0 | 0 | 0 | 0 | 138 | 0.254144 |
7b9bed79cdfa84b20637330716a10344fca07de2 | 799 | py | Python | examples.py/Basics/Shape/LoadDisplayShape.py | timgates42/processing.py | 78a237922c2a928b83f4ad579dbf8d32c0099890 | [
"Apache-2.0"
]
| 1,224 | 2015-01-01T22:09:23.000Z | 2022-03-29T19:43:56.000Z | examples.py/Basics/Shape/LoadDisplayShape.py | timgates42/processing.py | 78a237922c2a928b83f4ad579dbf8d32c0099890 | [
"Apache-2.0"
]
| 253 | 2015-01-14T03:45:51.000Z | 2022-02-08T01:18:19.000Z | examples.py/Basics/Shape/LoadDisplayShape.py | timgates42/processing.py | 78a237922c2a928b83f4ad579dbf8d32c0099890 | [
"Apache-2.0"
]
| 225 | 2015-01-13T18:38:33.000Z | 2022-03-30T20:27:39.000Z | """
Load and Display a Shape.
Illustration by George Brower.
(Rewritten in Python by Jonathan Feinberg.)
The loadShape() command is used to read simple SVG (Scalable Vector Graphics)
files into a Processing sketch. This library was specifically tested under
SVG files created from Adobe Illustrator. For now, we can't guarantee that
it'll work for SVGs created with anything else.
"""
# The file "bot1.svg" must be in the data folder
# of the current sketch to load successfully
bot = loadShape("bot1.svg")
def setup():
size(640, 360)
smooth()
noLoop() # Only run draw() once
def draw():
background(102)
shape(bot, 110, 90, 100, 100) # Draw at coordinate (10, 10) at size 100 x 100
shape(bot, 280, 40) # Draw at coordinate (70, 60) at the default size
| 33.291667 | 84 | 0.700876 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 613 | 0.767209 |
7b9c4e6a952c20a965aae8106ca3b0f977bd503c | 4,015 | py | Python | deidentify/tokenizer/tokenizer_ons.py | bbieniek/deidentify | 7021bf0540e0a7f931e65544d12a2909c79a14eb | [
"MIT"
]
| 64 | 2020-01-16T16:20:47.000Z | 2022-03-31T12:59:19.000Z | deidentify/tokenizer/tokenizer_ons.py | HabibMrad/deidentify | d8960a74c852a71b29a6ee0fd6a3cf7f946a5f60 | [
"MIT"
]
| 14 | 2020-01-28T08:47:06.000Z | 2022-02-12T08:32:12.000Z | deidentify/tokenizer/tokenizer_ons.py | HabibMrad/deidentify | d8960a74c852a71b29a6ee0fd6a3cf7f946a5f60 | [
"MIT"
]
| 12 | 2020-01-21T07:54:04.000Z | 2022-02-19T06:42:53.000Z | """
Custom tokenization routines for the 'ons' corpus. Special care is taken to metadata tokens such as
=== Report: 12345 === that were inserted to distinguish between multiple documents of a client.
They will be properly handled during the tokenization and sentence segmentation stage.
"""
import re
import spacy
from spacy.matcher import Matcher
from spacy.symbols import ORTH
from deidentify.tokenizer import Tokenizer
META_REGEX = re.compile(r'=== (?:Report|Answer): [0-9]+ ===\n')
TOKENIZER_SPECIAL_CASES = [
'B.Sc.',
'Co.',
'Dhr.',
'Dr.',
'M.Sc.',
'Mevr.',
'Mgr.',
'Mr.',
'Mw.',
'O.K.',
'a.u.b.',
'ca.',
'e.g.',
'etc.',
'v.d.'
]
def _metadata_complete(doc, i):
return doc[i].text[0] == '\n' \
and doc[i - 1].text == '=' \
and META_REGEX.match(doc[i - 9: i + 1].text)
def _metadata_sentence_segmentation(doc):
"""Custom sentence segmentation rule of the Ons corpus. It segments metadata text into separate
sentences.
Metadata consists of 10 tokens:
['=', '=', '=', 'Report|Answer', ':', 'DDDDDD', '=', '=', '=', '\n']
During sentence segmentation, we want that the metadata is always a sentence in itself.
Therefore, the first token (i.e., '=') is marked as sentence start. All other tokens
are explicitly marked as non-sentence boundaries.
To ensure that anything immediately following after metadata is a new sentece, the next token
is marked as sentence start.
"""
for i in range(len(doc)):
if not _metadata_complete(doc, i):
continue
# All metadata tokens excluding the leading '='.
meta_span = doc[i - 8: i + 1]
for meta_token in meta_span:
meta_token.is_sent_start = False
# The leading '=' is a sentence boundary
doc[i - 9].is_sent_start = True
# Any token following the metadata is also a new sentence.
doc[i + 1].is_sent_start = True
return doc
NLP = spacy.load('nl_core_news_sm')
try:
NLP.add_pipe(_metadata_sentence_segmentation, before="parser") # Insert before the parser
except ValueError:
# spacy>=3
from spacy.language import Language
Language.component('meta-sentence-segmentation')(_metadata_sentence_segmentation) # pylint: disable=E1101
NLP.add_pipe('meta-sentence-segmentation', before="parser") # Insert before the parser
for case in TOKENIZER_SPECIAL_CASES:
NLP.tokenizer.add_special_case(case, [{ORTH: case}])
NLP.tokenizer.add_special_case(case.lower(), [{ORTH: case.lower()}])
infixes = NLP.Defaults.infixes + [r'\(', r'\)', r'(?<=[\D])\/(?=[\D])']
infix_regex = spacy.util.compile_infix_regex(infixes)
NLP.tokenizer.infix_finditer = infix_regex.finditer
class TokenizerOns(Tokenizer):
def parse_text(self, text: str) -> spacy.tokens.doc.Doc:
"""Custom spacy tokenizer for the 'ons' corpus that takes care of special metadata tokens.
Example:
['=', '=', '=', 'Report', ':', '1234', '=', '=', '=', '\n'] is converted to
['=== Report: 1234 ===\n']
Furthermore, common Dutch abbreviations are handled.
Parameters
----------
text : str
The text to tokenize.
Returns
-------
doc : spacy.tokens.doc.Doc
Parsed spacy document.
"""
matcher = Matcher(NLP.vocab)
pattern = [
{"ORTH": "="}, {"ORTH": "="}, {"ORTH": "="},
{"ORTH": {"IN": ['Answer', 'Report']}}, {'ORTH': ':'},
{'IS_DIGIT': True, 'OP': '+'},
{"ORTH": "="}, {"ORTH": "="}, {"ORTH": "="},
{"ORTH": "\n"}
]
matcher.add("METADATA", [pattern])
doc = NLP(text, disable=self.disable)
matches = matcher(doc)
with doc.retokenize() as retokenizer:
for _, start, end in matches:
attrs = {"LEMMA": str(doc[start:end])}
retokenizer.merge(doc[start:end], attrs=attrs)
return doc
| 31.124031 | 109 | 0.596762 | 1,263 | 0.31457 | 0 | 0 | 0 | 0 | 0 | 0 | 2,003 | 0.498879 |
7b9c889768e3496393e2ee54739cb4b6ccbaab96 | 1,219 | py | Python | systemtest/quality/utils/models.py | IBM-Power-SystemTest/systemtest | a29e6d54500ca13f554073cc66a4a2d403ea5b14 | [
"BSD-3-Clause"
]
| 1 | 2022-03-09T18:07:11.000Z | 2022-03-09T18:07:11.000Z | systemtest/quality/utils/models.py | IBM-Power-SystemTest/systemtest | a29e6d54500ca13f554073cc66a4a2d403ea5b14 | [
"BSD-3-Clause"
]
| null | null | null | systemtest/quality/utils/models.py | IBM-Power-SystemTest/systemtest | a29e6d54500ca13f554073cc66a4a2d403ea5b14 | [
"BSD-3-Clause"
]
| null | null | null |
# Django
from django.conf import Settings, settings
# APPs
from systemtest.quality import forms as quality_forms, models as quality_models
from systemtest.utils.db2 import Database
def get_quality_status(status_name: str) -> quality_models.QualityStatus:
"""
Gets a specific QualityStatus by exact name
Args:
status_name:
Name of status to fetch
Raises:
DoesNotExist:
QualityStatus matching query does not exist
Returns:
QualityStatus object
"""
return quality_models.QualityStatus.objects.get(name=status_name)
def fetch_database() -> dict:
database = Database(**settings.DATABASES.get("db2"))
sql = database.get_sql(settings.QUALITY_SQL_PATH)
required_columns = {
"SYSTEM_NUMBER",
"WORKUNIT",
"OPERATION_STATUS"
}
optional_columns = {
"WORKUNIT_QTY",
"PRODUCT_LINE",
"OPERATION_NUMBER"
}
for row in database.fetch(sql):
columns = set(row.keys())
if (required_columns - columns):
continue
data = {column.lower(): row.get(column)
for column in (required_columns | optional_columns)}
yield data
| 23.442308 | 79 | 0.646432 | 0 | 0 | 620 | 0.508614 | 0 | 0 | 0 | 0 | 369 | 0.302707 |
7b9c9c8690ed96b25a9028c69ebb2b7c65845147 | 1,849 | py | Python | cibopath/scraper.py | hackebrot/cibopath | 7b341cb92942a0ed70e21c9e5f23d281a625e30c | [
"BSD-3-Clause"
]
| 11 | 2016-02-08T11:45:26.000Z | 2017-05-19T16:07:31.000Z | cibopath/scraper.py | hackebrot/cibopath | 7b341cb92942a0ed70e21c9e5f23d281a625e30c | [
"BSD-3-Clause"
]
| 5 | 2016-02-11T22:11:54.000Z | 2016-06-09T20:54:07.000Z | cibopath/scraper.py | hackebrot/cibopath | 7b341cb92942a0ed70e21c9e5f23d281a625e30c | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
import asyncio
import logging
import aiohttp
from cibopath import readme_parser, github_api
from cibopath.templates import Template
logger = logging.getLogger('cibopath')
class CibopathError(Exception):
"""Custom error class for the app."""
class CookiecutterReadmeError(CibopathError):
"""Unable to retrieve readme from github.com/audreyr/cookiecutter."""
class UnableToFindTemplateLinks(CibopathError):
"""Cannot find links to templates in README."""
def fetch_template_data(username, token):
semaphore = asyncio.Semaphore(10)
loop = asyncio.get_event_loop()
auth = aiohttp.BasicAuth(username, token)
with aiohttp.ClientSession(loop=loop, auth=auth) as client:
logger.debug('Load Cookiecutter readme')
cookiecutter_readme = loop.run_until_complete(
github_api.get_readme(semaphore, client, 'audreyr', 'cookiecutter')
)
if not cookiecutter_readme:
raise CookiecutterReadmeError
logger.debug('Find GitHub links in Cookiecutter readme')
github_links, _ = readme_parser.read(cookiecutter_readme)
if not github_links:
raise UnableToFindTemplateLinks
tasks = [
github_api.get_template(semaphore, client, link)
for link in github_links
]
logger.debug('Fetch template data from links')
results = loop.run_until_complete(asyncio.gather(*tasks))
yield from filter(None, results) # Ignore all invalid templates
def load_templates(username, token):
templates = []
template_data = fetch_template_data(username, token)
for name, author, repo, context, readme in template_data:
_, tags = readme_parser.read(readme)
templates.append(Template(name, author, repo, context, sorted(tags)))
return templates
| 29.822581 | 79 | 0.70146 | 291 | 0.157382 | 1,025 | 0.554354 | 0 | 0 | 0 | 0 | 339 | 0.183342 |
7b9ce56039cc41fcf712d566d9141353c7327dc4 | 5,400 | py | Python | using_force_sense_selector_switch/A-B_force_sense_switching/ForceSenseSwitchSample.py | sjdemartini/SpikeSafePythonSamples | 60dc9cd175577e9601c0709ac471c72c5a666f1b | [
"MIT"
]
| 4 | 2020-06-11T00:11:17.000Z | 2022-03-17T22:58:13.000Z | using_force_sense_selector_switch/A-B_force_sense_switching/ForceSenseSwitchSample.py | sjdemartini/SpikeSafePythonSamples | 60dc9cd175577e9601c0709ac471c72c5a666f1b | [
"MIT"
]
| null | null | null | using_force_sense_selector_switch/A-B_force_sense_switching/ForceSenseSwitchSample.py | sjdemartini/SpikeSafePythonSamples | 60dc9cd175577e9601c0709ac471c72c5a666f1b | [
"MIT"
]
| 2 | 2021-12-20T20:03:05.000Z | 2022-01-12T18:51:54.000Z | # Goal:
# Demonstrate the A/B switch functionality of the SpikeSafe PSMU while operating in DC mode
#
# Expectation:
# Channel 1 will run in DC mode with the switch set to Primary.
# Afterward the Switch be set to Auxiliary mode, in which another source may operate connected to the SpikeSafe
# After the Auxiliary source has completed operation, the switch will be set to Primary to operate the SpikeSafe in DC mode again
import sys
import time
import logging
from spikesafe_python.MemoryTableReadData import log_memory_table_read
from spikesafe_python.ReadAllEvents import log_all_events
from spikesafe_python.TcpSocket import TcpSocket
from spikesafe_python.Threading import wait
from spikesafe_python.SpikeSafeError import SpikeSafeError
from tkinter import messagebox
### set these before starting application
# SpikeSafe IP address and port number
ip_address = '10.0.0.220'
port_number = 8282
### setting up sequence log
log = logging.getLogger(__name__)
logging.basicConfig(filename='SpikeSafePythonSamples.log',format='%(asctime)s, %(levelname)s, %(message)s',datefmt='%m/%d/%Y %I:%M:%S',level=logging.INFO)
### start of main program
try:
log.info("ForceSenseSwitchSample.py started.")
# instantiate new TcpSocket to connect to SpikeSafe
tcp_socket = TcpSocket()
tcp_socket.open_socket(ip_address, port_number)
# reset to default state
tcp_socket.send_scpi_command('*RST')
log_all_events(tcp_socket)
# check that the Force Sense Selector Switch is available for this SpikeSafe. We need the switch to run this sequence
# If switch related SCPI is sent and there is no switch configured, it will result in error "386, Output Switch is not installed"
tcp_socket.send_scpi_command('OUTP1:CONN:AVAIL?')
isSwitchAvailable = tcp_socket.read_data()
if isSwitchAvailable != 'Ch:1':
raise Exception('Force Sense Selector Switch is not available, and is necessary to run this sequence.')
# set the Force Sense Selector Switch state to Primary (A) so that the SpikeSafe can output to the DUT
# the default switch state can be manually adjusted using SCPI, so it is best to send this command even after sending a *RST
tcp_socket.send_scpi_command('OUTP1:CONN PRI')
# set Channel 1 settings to operate in DC mode
tcp_socket.send_scpi_command('SOUR1:FUNC:SHAP DC')
tcp_socket.send_scpi_command('SOUR1:CURR:PROT 50')
tcp_socket.send_scpi_command('SOUR1:CURR 0.1')
tcp_socket.send_scpi_command('SOUR1:VOLT 20')
# log all SpikeSafe event after settings are adjusted
log_all_events(tcp_socket)
# turn on Channel 1
tcp_socket.send_scpi_command('OUTP1 1')
# check for all events and measure readings on Channel 1 once per second for 10 seconds
time_end = time.time() + 10
while time.time() < time_end:
log_all_events(tcp_socket)
log_memory_table_read(tcp_socket)
wait(1)
# turn off Channel 1 and check for all events
# When operating in DC mode, the channel must be turned off before adjusting the switch state
tcp_socket.send_scpi_command('OUTP1 0')
log_all_events(tcp_socket)
# set the Force Sense Selector Switch state to Auxiliary (B) so that the Auxiliary Source will be routed to the DUT and the SpikeSafe will be disconnected
tcp_socket.send_scpi_command('OUTP1:CONN AUX')
# Show a message box so any tasks using the Auxiliary source may be performed before adjusting the switch back to Primary
# The SpikeSafe is not electrically connected to the DUT at this time
messagebox.showinfo("Auxiliary Source Active", "Force Sense Selector Switch is in Auxiliary (B) mode. Perform any tests using the auxiliary source, then close this window to adjust the switch back to Primary (A) mode.")
# set the Force Sense Selector Switch state to Primary (A) so that the SpikeSafe can output to the DUT
tcp_socket.send_scpi_command('OUTP1:CONN PRI')
# turn on Channel 1
tcp_socket.send_scpi_command('OUTP1 1')
# check for all events and measure readings on Channel 1 once per second for 10 seconds
time_end = time.time() + 10
while time.time() < time_end:
log_all_events(tcp_socket)
log_memory_table_read(tcp_socket)
wait(1)
# turn off Channel 1 and check for all events
tcp_socket.send_scpi_command('OUTP1 0')
log_all_events(tcp_socket)
# disconnect from SpikeSafe
tcp_socket.close_socket()
log.info("ForceSenseSwitchSample.py completed.\n")
except SpikeSafeError as ssErr:
# print any SpikeSafe-specific error to both the terminal and the log file, then exit the application
error_message = 'SpikeSafe error: {}\n'.format(ssErr)
log.error(error_message)
print(error_message)
sys.exit(1)
except Exception as err:
# print any general exception to both the terminal and the log file, then exit the application
error_message = 'Program error: {}\n'.format(err)
log.error(error_message)
print(error_message)
sys.exit(1) | 46.551724 | 223 | 0.696111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,972 | 0.55037 |
7b9d392017b7f0eb08e175d175e38cee08ff4854 | 1,886 | py | Python | tools/perf/contrib/cluster_telemetry/screenshot.py | metux/chromium-deb | 3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
]
| null | null | null | tools/perf/contrib/cluster_telemetry/screenshot.py | metux/chromium-deb | 3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
]
| null | null | null | tools/perf/contrib/cluster_telemetry/screenshot.py | metux/chromium-deb | 3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
]
| null | null | null | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import py_utils
import time
from telemetry.page import legacy_page_test
from telemetry.util import image_util
class Screenshot(legacy_page_test.LegacyPageTest):
"""Takes a PNG screenshot of the page."""
def __init__(self, png_outdir, wait_time=0):
super(Screenshot, self).__init__()
self._png_outdir = png_outdir
self._wait_time = wait_time
def ValidateAndMeasurePage(self, page, tab, results):
if not tab.screenshot_supported:
raise legacy_page_test.MeasurementFailure(
'Screenshotting not supported on this platform')
try:
tab.WaitForDocumentReadyStateToBeComplete()
except py_utils.TimeoutException:
logging.warning("WaitForDocumentReadyStateToBeComplete() timeout, " +
"page: %s", page.name)
return
time.sleep(self._wait_time)
if not os.path.exists(self._png_outdir):
logging.info("Creating directory %s", self._png_outdir)
try:
os.makedirs(self._png_outdir)
except OSError:
logging.warning("Directory %s could not be created", self._png_outdir)
raise
outpath = os.path.abspath(
os.path.join(self._png_outdir, page.file_safe_name)) + '.png'
# Replace win32 path separator char '\' with '\\'.
outpath = outpath.replace('\\', '\\\\')
screenshot = tab.Screenshot()
# TODO(lchoi): Add logging to image_util.py and/or augment error handling of
# image_util.WritePngFile
logging.info("Writing PNG file to %s. This may take awhile.", outpath)
start = time.time()
image_util.WritePngFile(screenshot, outpath)
logging.info("PNG file written successfully. (Took %f seconds)",
time.time()-start)
| 33.087719 | 80 | 0.698303 | 1,585 | 0.840403 | 0 | 0 | 0 | 0 | 0 | 0 | 630 | 0.33404 |
7b9dada36fd7bad56b1a0092534a61252ce1c05e | 2,474 | py | Python | tripleoclient/tests/v1/overcloud_delete/test_overcloud_delete.py | mail2nsrajesh/python-tripleoclient | 6646b2fc4a37b2a52c1cf7d7edb42c8007e905d8 | [
"Apache-2.0"
]
| null | null | null | tripleoclient/tests/v1/overcloud_delete/test_overcloud_delete.py | mail2nsrajesh/python-tripleoclient | 6646b2fc4a37b2a52c1cf7d7edb42c8007e905d8 | [
"Apache-2.0"
]
| null | null | null | tripleoclient/tests/v1/overcloud_delete/test_overcloud_delete.py | mail2nsrajesh/python-tripleoclient | 6646b2fc4a37b2a52c1cf7d7edb42c8007e905d8 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from tripleoclient.tests.v1.overcloud_deploy import fakes
from tripleoclient.v1 import overcloud_delete
class TestDeleteOvercloud(fakes.TestDeployOvercloud):
def setUp(self):
super(TestDeleteOvercloud, self).setUp()
self.cmd = overcloud_delete.DeleteOvercloud(self.app, None)
self.app.client_manager.workflow_engine = mock.Mock()
self.workflow = self.app.client_manager.workflow_engine
@mock.patch(
'tripleoclient.workflows.stack_management.delete_stack', autospec=True)
def test_stack_delete(self, mock_delete_stack):
clients = self.app.client_manager
orchestration_client = clients.orchestration
stack = mock.Mock()
stack.id = 12345
orchestration_client.stacks.get.return_value = stack
self.cmd._stack_delete(clients, 'overcloud')
orchestration_client.stacks.get.assert_called_once_with('overcloud')
mock_delete_stack.assert_called_once_with(
clients, stack=12345)
def test_stack_delete_no_stack(self):
clients = self.app.client_manager
orchestration_client = clients.orchestration
type(orchestration_client.stacks.get).return_value = None
self.cmd.log.warning = mock.MagicMock()
self.cmd._stack_delete(clients, 'overcloud')
orchestration_client.stacks.get.assert_called_once_with('overcloud')
self.cmd.log.warning.assert_called_once_with(
"No stack found ('overcloud'), skipping delete")
@mock.patch(
'tripleoclient.workflows.plan_management.delete_deployment_plan',
autospec=True)
def test_plan_delete(self, delete_deployment_plan_mock):
self.cmd._plan_delete(self.workflow, 'overcloud')
delete_deployment_plan_mock.assert_called_once_with(
self.workflow,
container='overcloud')
| 36.382353 | 79 | 0.719887 | 1,753 | 0.708569 | 0 | 0 | 923 | 0.37308 | 0 | 0 | 818 | 0.330639 |
7b9f976e658245e57765789e6e80ca7112711034 | 8,621 | py | Python | bird_view/models/agent_IAs_RL.py | magh24/carla_RL_IAs | a38fb353bd84330c6c20b9cc8e824d7bbb02cfe5 | [
"MIT"
]
| 39 | 2020-03-17T10:12:49.000Z | 2022-03-12T14:18:45.000Z | bird_view/models/agent_IAs_RL.py | marintoro/LearningByCheating | a13b331ee8d69071570c97b35f1348758d658ee5 | [
"MIT"
]
| null | null | null | bird_view/models/agent_IAs_RL.py | marintoro/LearningByCheating | a13b331ee8d69071570c97b35f1348758d658ee5 | [
"MIT"
]
| 16 | 2020-06-11T20:15:57.000Z | 2022-03-13T01:55:16.000Z | import numpy as np
import torch
from collections import deque, namedtuple
import cv2
import os
import carla
from .model_supervised import Model_Segmentation_Traffic_Light_Supervised
from .model_RL import DQN, Orders
class AgentIAsRL:
def __init__(self, args=None, **kwargs):
super().__init__(**kwargs)
self.args = args
path_to_folder_with_model = args.path_folder_model
path_to_model_supervised = os.path.join(path_to_folder_with_model, "model_supervised/")
path_model_supervised = None
for file in os.listdir(path_to_model_supervised):
if ".pth" in file:
if path_model_supervised is not None:
raise ValueError(
"There is multiple model supervised in folder " +
path_to_model_supervised +
" you must keep only one!",
)
path_model_supervised = os.path.join(path_to_model_supervised, file)
if path_model_supervised is None:
raise ValueError("We didn't find any model supervised in folder " +
path_to_model_supervised)
# All this magic number should match the one used when training supervised...
model_supervised = Model_Segmentation_Traffic_Light_Supervised(
len(args.steps_image), len(args.steps_image), 1024, 6, 4, args.crop_sky
)
model_supervised.load_state_dict(
torch.load(path_model_supervised, map_location=args.device)
)
model_supervised.to(device=args.device)
self.encoder = model_supervised.encoder
self.last_conv_downsample = model_supervised.last_conv_downsample
self.action_space = (args.nb_action_throttle + 1) * args.nb_action_steering
path_to_model_RL = os.path.join(path_to_folder_with_model, "model_RL")
os.chdir(path_to_model_RL)
tab_model = []
for file in os.listdir(path_to_model_RL):
if ".pth" in file:
tab_model.append(os.path.join(path_to_model_RL, file))
if len(tab_model) == 0:
raise ValueError("We didn't find any RL model in folder "+ path_to_model_RL)
self.tab_RL_model = []
for current_model in tab_model:
current_RL_model = DQN(args, self.action_space).to(device=args.device)
current_RL_model_dict = current_RL_model.state_dict()
print("we load RL model ", current_model)
checkpoint = torch.load(current_model)
# 1. filter out unnecessary keys
pretrained_dict = {
k: v
for k, v in checkpoint["model_state_dict"].items()
if k in current_RL_model_dict
}
# 2. overwrite entries in the existing state dict
current_RL_model_dict.update(pretrained_dict)
# 3. load the new state dict
current_RL_model.load_state_dict(current_RL_model_dict)
self.tab_RL_model.append(current_RL_model)
self.window = (
max([abs(number) for number in args.steps_image]) + 1
) # Number of frames to concatenate
self.RGB_image_buffer = deque([], maxlen=self.window)
self.device = args.device
self.state_buffer = deque([], maxlen=self.window)
self.State = namedtuple("State", ("image", "speed", "order", "steering"))
if args.crop_sky:
blank_state = self.State(
np.zeros(6144, dtype=np.float32), -1, -1, 0
) # RGB Image, color channet first for torch
else:
blank_state = self.State(np.zeros(8192, dtype=np.float32), -1, -1, 0)
for _ in range(self.window):
self.state_buffer.append(blank_state)
if args.crop_sky:
self.RGB_image_buffer.append(
np.zeros((3, args.front_camera_height - 120, args.front_camera_width))
)
else:
self.RGB_image_buffer.append(
np.zeros((3, args.front_camera_height, args.front_camera_width))
)
self.last_steering = 0
self.last_order = 0
self.current_timestep = 0
def act(self, state_buffer, RL_model):
speeds = []
order = state_buffer[-1].order
steerings = []
for step_image in self.args.steps_image:
state = state_buffer[step_image + self.window - 1]
speeds.append(state.speed)
steerings.append(state.steering)
images = torch.from_numpy(state_buffer[-1].image).to(self.device, dtype=torch.float32)
speeds = torch.from_numpy(np.stack(speeds).astype(np.float32)).to(
self.device, dtype=torch.float32
)
steerings = torch.from_numpy(np.stack(steerings).astype(np.float32)).to(
self.device, dtype=torch.float32
)
with torch.no_grad():
quantile_values, _ = RL_model(
images.unsqueeze(0),
speeds.unsqueeze(0),
order,
steerings.unsqueeze(0),
self.args.num_quantile_samples,
)
return quantile_values.mean(0).argmax(0).item()
# We had different mapping int/order in our training than in the CARLA benchmark,
# so we need to remap orders
def adapt_order(self, incoming_obs_command):
if incoming_obs_command == 1: # LEFT
return Orders.Left.value
if incoming_obs_command == 2: # RIGHT
return Orders.Right.value
if incoming_obs_command == 3: # STRAIGHT
return Orders.Straight.value
if incoming_obs_command == 4: # FOLLOW_LANE
return Orders.Follow_Lane.value
def run_step(self, observations):
self.current_timestep += 1
rgb = observations["rgb"].copy()
if self.args.crop_sky:
rgb = np.array(rgb)[120:, :, :]
else:
rgb = np.array(rgb)
if self.args.render:
bgr = rgb[:, :, ::-1]
cv2.imshow("network input", bgr)
cv2.waitKey(1)
rgb = np.rollaxis(rgb, 2, 0)
self.RGB_image_buffer.append(rgb)
speed = np.linalg.norm(observations["velocity"])
order = self.adapt_order(int(observations["command"]))
if self.last_order != order:
print("order = ", Orders(order).name)
self.last_order = order
np_array_RGB_input = np.concatenate(
[
self.RGB_image_buffer[indice_image + self.window - 1]
for indice_image in self.args.steps_image
]
)
torch_tensor_input = (
torch.from_numpy(np_array_RGB_input)
.to(dtype=torch.float32, device=self.device)
.div_(255)
.unsqueeze(0)
)
with torch.no_grad():
current_encoding = self.encoder(torch_tensor_input)
current_encoding = self.last_conv_downsample(current_encoding)
current_encoding_np = current_encoding.cpu().numpy().flatten()
current_state = self.State(current_encoding_np, speed, order, self.last_steering)
self.state_buffer.append(current_state)
tab_action = []
for RL_model in self.tab_RL_model:
current_action = self.act(self.state_buffer, RL_model)
tab_action.append(current_action)
steer = 0
throttle = 0
brake = 0
for action in tab_action:
steer += (
(action % self.args.nb_action_steering) - int(self.args.nb_action_steering / 2)
) * (self.args.max_steering / int(self.args.nb_action_steering / 2))
if action < int(self.args.nb_action_steering * self.args.nb_action_throttle):
throttle += (int(action / self.args.nb_action_steering)) * (
self.args.max_throttle / (self.args.nb_action_throttle - 1)
)
brake += 0
else:
throttle += 0
brake += 1.0
steer = steer / len(tab_action)
throttle = throttle / len(tab_action)
if brake < len(tab_action) / 2:
brake = 0
else:
brake = brake / len(tab_action)
control = carla.VehicleControl()
control.steer = np.clip(steer, -1.0, 1.0)
control.throttle = np.clip(throttle, 0.0, 1.0)
control.brake = np.clip(brake, 0.0, 1.0)
control.manual_gear_shift = False
self.last_steering = steer
return control
| 37.482609 | 95 | 0.594363 | 8,401 | 0.974481 | 0 | 0 | 0 | 0 | 0 | 0 | 732 | 0.084909 |
7b9fd2f8f9e605ca6783d2a3d4f02dcb90eb1482 | 162 | py | Python | Python/1079.py | Marcelalopes/Questoes-URI | e13894c1bcbcb252ed814d5b5e930d05c7a8494f | [
"MIT"
]
| 5 | 2020-10-12T16:21:31.000Z | 2021-12-15T20:27:22.000Z | Python/1079.py | Marcelalopes/Questoes-URI | e13894c1bcbcb252ed814d5b5e930d05c7a8494f | [
"MIT"
]
| null | null | null | Python/1079.py | Marcelalopes/Questoes-URI | e13894c1bcbcb252ed814d5b5e930d05c7a8494f | [
"MIT"
]
| 5 | 2019-06-21T04:26:14.000Z | 2021-05-01T14:15:44.000Z | n = int(input())
for i in range(1 , n + 1 ):
x = input().split()
a,b,c = x
print('{:.1f}'.format((float(a) * 2 + float(b) * 3 + float(c) * 5) / 10))
| 23.142857 | 77 | 0.45679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.049383 |
7b9fd98a85b6fed6891c0ba799c31065628711f4 | 10,547 | py | Python | bin_testing/diff_fuzzing.py | KristianMika/PA193-Bech32m | 6625c3883dd4ee4db40afc0b9eae1c945544a87b | [
"MIT"
]
| null | null | null | bin_testing/diff_fuzzing.py | KristianMika/PA193-Bech32m | 6625c3883dd4ee4db40afc0b9eae1c945544a87b | [
"MIT"
]
| null | null | null | bin_testing/diff_fuzzing.py | KristianMika/PA193-Bech32m | 6625c3883dd4ee4db40afc0b9eae1c945544a87b | [
"MIT"
]
| null | null | null | import base64
import binascii
import datetime
import os
import subprocess
import random
import sys
BECH_SYMBOLS = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
OUR_BINARY = None
LIBBECH32ENC_BINARY = None
LIBBECH32DEC_BINARY = None
NODE_REF = "node . "
# region Encoding
def node_encode(hrp, data_hex):
str_in = NODE_REF + f"encode {hrp} {data_hex}"
proc = subprocess.Popen(str_in.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
err = proc.stderr.read().decode(encoding='ASCII').strip()
if err != '':
print("*******")
print("Node error:\n" + err)
with open("fuzzing_results.txt", "a") as f:
f.write("*******\n")
f.write(f"HRP: {hrp}\n")
f.write(f"HEX: {data_hex}\n")
f.write("Node error:\n" + err + "\n")
f.write("*******\n")
return proc.stdout.read().decode(encoding='ASCII').strip(), err != ''
def external_encode(hrp, bech):
indexes = get_indexes(bech)
indexes_str = indexes_to_string(indexes)
str_in = f"{LIBBECH32ENC_BINARY} {hrp} {indexes_str}"
proc = subprocess.Popen(str_in.split(' '), stdout=subprocess.PIPE)
proc.wait()
return proc.stdout.read().decode(encoding='ASCII').strip()
def hex_encode(hrp, data_hex):
proc = subprocess.Popen(f"{OUR_BINARY} --input-text {data_hex} --input-format hex --hrp {hrp}".split(' '),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
err = proc.stderr.read().decode(encoding='ASCII').strip()
if err != '':
print("******* ENCODE ERROR *******")
print("HRP: " + hrp)
print("HEX: " + data_hex)
print("OUR error:\n" + err)
with open("fuzzing_results.txt", "a") as f:
f.write("*******\n")
f.write(f"HRP: {hrp}\n")
f.write(f"HEX: {data_hex}\n")
f.write("OUR error:\n" + err + "\n")
f.write("*******\n")
return proc.stdout.read().decode(encoding='ASCII').strip()
def base64_encode(hrp, data_base64, do_trim=True):
proc = subprocess.Popen(
f"{OUR_BINARY} --input-text {data_base64} --input-format base64 --hrp {hrp}{' --trim' if do_trim else ''}"
.split(' '),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
err = proc.stderr.read().decode(encoding='ASCII').strip()
if err != '':
print("******* ENCODE ERROR *******")
print("HRP: " + hrp)
print("B64: " + data_base64)
print("OUR error:\n" + err)
with open("fuzzing_results.txt", "a") as f:
f.write("*******\n")
f.write(f"HRP: {hrp}\n")
f.write(f"B64: {data_base64}\n")
f.write("OUR error:\n" + err + "\n")
f.write("*******\n")
return proc.stdout.read().decode(encoding='ASCII').strip()
def bin_encode(hrp, data_hex):
try:
with open('b.bin', 'wb') as f:
f.write(binascii.unhexlify(data_hex))
proc = subprocess.Popen(f"{OUR_BINARY} --input-file b.bin --input-format bin --hrp {hrp}".split(' '),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
err = proc.stderr.read().decode(encoding='ASCII').strip()
if err != '':
print("******* ENCODE ERROR *******")
print("HRP: " + hrp)
print("B64: " + data_hex + "(as binary)")
with open("fuzzing_results.txt", "a") as f:
f.write("*******\n")
f.write(f"HRP: {hrp}\n")
f.write(f"BIN: {data_hex} (as binary)\n")
f.write("OUR error:\n" + err + "\n")
f.write("*******\n")
return proc.stdout.read().decode(encoding='ASCII').strip()
finally:
os.remove('b.bin')
# endregion
# region Decoding
def node_decode(code):
str_in = NODE_REF + f"decode {code}"
proc = subprocess.Popen(str_in.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
err = proc.stderr.read().decode(encoding='ASCII').strip()
if err != '':
print("*******")
print("Node error:\n" + err)
with open("fuzzing_results.txt", "a") as f:
f.write("*******\n")
f.write(f"CODE: {code}\n")
f.write("Node error:\n" + err + "\n")
f.write("*******\n")
return proc.stdout.read().decode(encoding='ASCII').strip(), err != ''
def external_decode(code):
str_in = f"{LIBBECH32DEC_BINARY} {code}"
proc = subprocess.Popen(str_in.split(' '), stdout=subprocess.PIPE)
proc.wait()
return proc.stdout.read().decode(encoding='ASCII').strip()
def hex_decode(code):
proc = subprocess.Popen(
f"{OUR_BINARY} --decode --input-text {code} --output-format hex --allow-empty-hrp --trim".split(' '),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
err = proc.stderr.read().decode(encoding='ASCII').strip()
if err != '':
print("******* DECODE ERROR *******")
print("CODE: " + code)
print("OUR error:\n" + err)
with open("fuzzing_results.txt", "a") as f:
f.write("*******\n")
f.write(f"CODE: {code}\n")
f.write("OUR error:\n" + err + "\n")
f.write("*******\n")
return proc.stdout.read().decode(encoding='ASCII').strip()
# endregion
def generate_hrp():
chars = [chr(x) for x in range(33, 127)]
length = random.randint(1, 81)
ret = "".join(random.choice(chars) for _ in range(length)).lower()
while ret[0] == '-' and len(ret) > 1:
ret = ret[1:]
if ret[0] == '-':
ret = 'a'
return ret.replace("'", "").replace('"', '')
def generate_hex(hrp):
max_len = 83 - len(hrp)
chars = "0123456789abcdef"
length = random.randint(2, max_len)
if length % 2 == 1:
length -= 1
return "".join(random.choice(chars) for _ in range(length))
def to_base64(hex_str):
return base64.b64encode(base64.b16decode(hex_str)).decode(encoding='utf-8')
# Adapted from
# https://stackoverflow.com/questions/1425493/convert-hex-to-binary
def to_bin(hex_code):
return bin(int(hex_code, 16))[2:]
def extract_bech(code):
return code[code.rfind('1') + 1:-6]
def get_indexes(s):
return [BECH_SYMBOLS.index(c) for c in s]
def indexes_to_string(indexes):
return " ".join(str(i) for i in indexes)
def process(hrp, hex_str, base64_str):
success = True
try:
our_res = hex_encode(hrp, hex_str)
our_res_64 = base64_encode(hrp, base64_str, do_trim=False)
our_res_64_trim = base64_encode(hrp, base64_str)
our_res_bin = bin_encode(hrp, hex_str)
node_res, node_enc_err = node_encode(hrp, hex_str)
extract_our = extract_bech(our_res)
external_res = external_encode(hrp, extract_our)
dec_our = hex_decode(our_res)
dec_ext = external_decode(our_res)
_node_dec = node_decode(our_res)
hrp, dec_node = _node_dec[0].split(' ')
node_dec_err = _node_dec[1]
at_least_one_equal = our_res_64 == our_res or our_res_64_trim == our_res
if our_res_bin != our_res or \
not at_least_one_equal or \
our_res != external_res or \
(our_res != node_res and not node_enc_err):
success = False
print("ERROR: Our ENCODED result does not match reference result:")
print(f"HRP: {hrp}")
print(f"HEX: {hex_str}")
print(f"B64: {base64_str}")
print(f"BIN: {to_bin(hex_str)}")
print(f" Our result:\t\t{our_res}")
print(f" Our result B64:\t{our_res_64}")
print(f" Our result B64 T:\t{our_res_64_trim}")
print(f" Our result BIN:\t{our_res_bin}")
print(f" External result:\t{external_res}")
print(f" Node result:\t\t{node_res}")
with open("fuzzing_results.txt", "a") as f:
f.write("ERROR: Our ENCODED result does not match reference result:\n")
f.write(f"HRP: {hrp}\n")
f.write(f"HEX: {hex_str}\n")
f.write(f"B64: {base64_str}\n")
f.write(f"BIN: {to_bin(hex_str)}\n")
f.write(f" Our result:\t\t{our_res}\n")
f.write(f" Our result B64:\t{our_res_64}\n")
f.write(f" Our result B64 T:\t{our_res_64}\n")
f.write(f" Our result BIN:\t{our_res_bin}\n")
f.write(f" External result:\t{external_res}\n")
f.write(f" Node result:\t\t{node_res}\n")
f.write("\n")
if dec_ext not in extract_our or (dec_our != dec_node and not node_dec_err):
success = False
print("ERROR: Our DECODED result does not match node result:")
print(f" Our result:\t\t{dec_our}")
if dec_ext not in extract_our:
print(f" External result:\t{dec_ext} not in {extract_our}")
print(f" Node result:\t\t{dec_node}")
with open("fuzzing_results.txt", "a") as f:
f.write("ERROR: Our DECODED result does not match node result:\n")
f.write(f" Our result:\t\t{dec_our}\n")
if dec_ext not in extract_our:
f.write(f" External result: {dec_ext} not in {extract_our}\n")
f.write(f" Node result:\t\t{dec_node}\n")
f.write("\n")
except Exception as e:
success = False
print(e)
with open("fuzzing_results.txt", "a") as f:
f.write(f"{hrp}\n")
f.write(f"{e}\n")
f.write("\n")
return success
if __name__ == '__main__':
OUR_BINARY = sys.argv[1]
LIBBECH32ENC_BINARY = sys.argv[2]
LIBBECH32DEC_BINARY = sys.argv[3]
FUZZ_ITERATIONS = int(sys.argv[4])
FUZZ_SECONDS = int(sys.argv[5])
_hrp = 'v)zeod9[qg.ns)+}r}'
_hex_str = '857e'
_b64_str = to_base64(_hex_str.upper())
process('a', 'ff', to_base64('FF'))
fail_count = 0
start_time = datetime.datetime.now()
for _ in range(0, FUZZ_ITERATIONS):
if not process(_hrp, _hex_str, _b64_str): fail_count += 1
_hrp = generate_hrp()
_hex_str = generate_hex(_hrp)
_b64_str = to_base64(_hex_str.upper())
end_time = datetime.datetime.now()
if (end_time - start_time).seconds >= FUZZ_SECONDS:
print(f'Fuzzing stopped after {FUZZ_SECONDS} seconds')
break
print("DONE")
sys.exit(fail_count)
| 35.156667 | 114 | 0.561297 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,934 | 0.278183 |
7ba085171ad82d0c573dcc7bfc7f5421e63a5a9f | 3,166 | py | Python | ldt/utils/usaf/bcsd_preproc/forecast_task_07.py | andrewsoong/LISF | 20e3b00a72b6b348c567d0703550f290881679b4 | [
"Apache-2.0"
]
| 67 | 2018-11-13T21:40:54.000Z | 2022-02-23T08:11:56.000Z | ldt/utils/usaf/bcsd_preproc/forecast_task_07.py | andrewsoong/LISF | 20e3b00a72b6b348c567d0703550f290881679b4 | [
"Apache-2.0"
]
| 679 | 2018-11-13T20:10:29.000Z | 2022-03-30T19:55:25.000Z | ldt/utils/usaf/bcsd_preproc/forecast_task_07.py | andrewsoong/LISF | 20e3b00a72b6b348c567d0703550f290881679b4 | [
"Apache-2.0"
]
| 119 | 2018-11-08T15:53:35.000Z | 2022-03-28T10:16:01.000Z | #!/usr/bin/env python3
"""
#------------------------------------------------------------------------------
#
# SCRIPT: forecast_task_07.py
#
# PURPOSE: Combine all non-precip 6-hourly files into one file and copy BCSD
# precip files in to the same directory Based on FORECAST_TASK_07.sh.
#
# REVISION HISTORY:
# 24 Oct 2021: Ryan Zamora, first version
#
#------------------------------------------------------------------------------
"""
#
# Standard modules
#
import configparser
import os
import subprocess
import sys
#
# Local methods
#
def _usage():
"""Print command line usage."""
txt = f"[INFO] Usage: {(sys.argv[0])} current_year month_abbr config_file"
print(txt)
print("[INFO] where")
print("[INFO] current_year: Current year")
print("[INFO] month_abbr: Current month")
print("[INFO] config_file: Config file that sets up environment")
def _read_cmd_args():
"""Read command line arguments."""
if len(sys.argv) != 4:
print("[ERR] Invalid number of command line arguments!")
_usage()
sys.exit(1)
# current_year
try:
current_year = int(sys.argv[1])
except ValueError:
print(f"[ERR] Invalid argument for current_year! Received {(sys.argv[1])}")
_usage()
sys.exit(1)
if current_year < 0:
print(f"[ERR] Invalid argument for current_year! Received {(sys.argv[1])}")
_usage()
sys.exit(1)
# month_abbr
month_abbr = str(sys.argv[2])
# config_file
config_file = sys.argv[3]
if not os.path.exists(config_file):
print(f"[ERR] {config_file} does not exist!")
sys.exit(1)
return current_year, month_abbr, config_file
def read_config(config_file):
"""Read from bcsd_preproc config file."""
config = configparser.ConfigParser()
config.read(config_file)
return config
def _driver():
"""Main driver."""
current_year, month_abbr, config_file = _read_cmd_args()
# Setup local directories
config = read_config(config_file)
# Path of the main project directory
projdir = config["bcsd_preproc"]["projdir"]
# Number of precip ensembles needed
range_ens_fcst=list(range(1, 13)) + list(range(1,13)) + list(range(1,7))
range_ens_nmme=range(1,31)
fcst_date = f"{month_abbr}01"
# Path for where forecast files are located:
indir=f"{projdir}/data/forecast/CFSv2_25km/raw/6-Hourly/{fcst_date}/{current_year}"
# Path for where the linked precip files should be placed:
outdir=f"{projdir}/data/forecast/NMME/linked_cfsv2_precip_files/{fcst_date}/{current_year}"
if not os.path.exists(outdir):
os.makedirs(outdir)
for iens, ens_value in enumerate(range_ens_fcst):
src_file=f"{indir}/ens{ens_value}"
dst_file=f"{outdir}/ens{range_ens_nmme[iens]}"
cmd = f"ln -sfn {src_file} {dst_file}"
print(cmd)
returncode = subprocess.call(cmd, shell=True)
if returncode != 0:
print("[ERR] Problem calling creating symbolic links!")
sys.exit(1)
print("[INFO] Done creating symbolic links")
#
# Main Method
#
if __name__ == "__main__":
_driver()
| 26.830508 | 95 | 0.622236 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,673 | 0.528427 |
7ba2716e5d28bfa4af27d8788deae9d221d9561f | 271 | py | Python | 1_estrutura_sequencial/10_celsius_fahrenheit.py | cecilmalone/lista_de_exercicios_pybr | 6d7c4aeddf8d1b1d839ad05ef5b5813a8fe611b5 | [
"MIT"
]
| null | null | null | 1_estrutura_sequencial/10_celsius_fahrenheit.py | cecilmalone/lista_de_exercicios_pybr | 6d7c4aeddf8d1b1d839ad05ef5b5813a8fe611b5 | [
"MIT"
]
| null | null | null | 1_estrutura_sequencial/10_celsius_fahrenheit.py | cecilmalone/lista_de_exercicios_pybr | 6d7c4aeddf8d1b1d839ad05ef5b5813a8fe611b5 | [
"MIT"
]
| null | null | null | """
10. Faça um Programa que peça a temperatura em graus Celsius, transforme e mostre em graus Farenheit.
"""
celsius = float(input('Informe o valor em Celsius (ºC): '))
fahrenheit = (celsius * (9/5)) + 32
print('{} ºC é igual a {:.1f} ºF'.format(celsius, fahrenheit))
| 30.111111 | 101 | 0.682657 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 177 | 0.638989 |
7ba27d2ca0843358d969fed10afe5cbbd1851036 | 12,178 | py | Python | model/modules/capsules.py | lidq92/pytorch-dynamic-routing-between-capsules | 4388cd36193348cbb10035008360330e67acdd41 | [
"MIT"
]
| 10 | 2018-09-17T02:14:34.000Z | 2021-06-17T12:16:35.000Z | model/modules/capsules.py | lidq92/pytorch-dynamic-routing-between-capsules | 4388cd36193348cbb10035008360330e67acdd41 | [
"MIT"
]
| null | null | null | model/modules/capsules.py | lidq92/pytorch-dynamic-routing-between-capsules | 4388cd36193348cbb10035008360330e67acdd41 | [
"MIT"
]
| 2 | 2019-08-06T20:40:02.000Z | 2020-01-02T08:24:39.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.distributions import Normal
def squash(s, dim=-1, eps=1e-8):
"""
"Squashing" non-linearity that shrunks short vectors to almost zero
length and long vectors to a length slightly below 1
v_j = ||s_j||^2 / (1 + ||s_j||^2) * s_j / ||s_j||
Args:
s: Vector before activation
dim: Dimension along which to calculate the norm
Returns:
v: Squashed vector
"""
squared_norm = torch.sum(s**2, dim=dim, keepdim=True)
v = squared_norm / (1 + squared_norm) * \
s / (torch.sqrt(squared_norm) + eps)
return v
class PrimaryCapsules(nn.Module):
def __init__(self, in_channels, out_channels,
dim_caps, kernel_size=9, stride=2):
"""
Primary Capsules Layer
NIPS 2017
Args:
in_channels: Number of input channels
out_channels: Number of output channels
dim_caps: length of the output capsule vector
"""
super(PrimaryCapsules, self).__init__()
self.dim_caps = dim_caps
self._caps_channel = int(out_channels / dim_caps)
assert self._caps_channel * dim_caps == out_channels #
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
out = self.conv(x)
out = out.view(out.size(0), self._caps_channel,
out.size(2), out.size(3), self.dim_caps) #
out = out.view(out.size(0), -1, self.dim_caps) #
return squash(out)
class RoutingCapsules(nn.Module):
def __init__(self, in_dim, in_caps, num_caps, dim_caps,
num_routing=3, use_cuda=True):
"""
Routing Capsules Layer
NIPS 2017
Args:
in_dim: length of input capsule vector
in_caps: Number of input capsules if digits layer
num_caps: Number of capsules in the capsule layer
dim_caps: length of the output capsule vector
num_routing: Number of iterations during routing algorithm
"""
super(RoutingCapsules, self).__init__()
self.use_cuda = use_cuda
self.in_dim = in_dim
self.in_caps = in_caps
self.num_caps = num_caps
self.dim_caps = dim_caps
self.num_routing = num_routing
self.W = nn.Parameter(0.01 * torch.randn(1, num_caps, in_caps,
dim_caps, in_dim ))
def __repr__(self):
"""
"""
tab = ' '
line = '\n'
next = ' -> '
res = self.__class__.__name__ + '('
res = res + line + tab + '(' + str(0) + '): ' + 'CapsuleLinear('
res = res + str(self.in_dim) + ', ' + str(self.dim_caps) + ')'
res = res + line + tab + '(' + str(1) + '): ' + 'Routing('
res = res + 'num_routing=' + str(self.num_routing) + ')'
res = res + line + ')'
return res
def forward(self, x):
batch_size = x.size(0)
# (batch_size, in_caps, in_dim) -> (batch_size, 1, in_caps, in_dim, 1)
x = x.unsqueeze(3).unsqueeze(1)
#
# W @ x =
# (1, num_caps, in_caps, dim_caps, in_dim)
# @
# (batch_size, 1, in_caps, in_dim, 1)
# =
# (batch_size, num_caps, in_caps, dim_caps, 1)
u_hat = torch.matmul(self.W, x)
# (batch_size, num_caps, in_caps, dim_caps)
u_hat = u_hat.squeeze(-1)
'''
detach u_hat during routing iterations
to prevent gradients from flowing, i.e.,
- In forward pass, u_hat_detached = u_hat;
- In backward, no gradient can flow from u_hat_detached back to x_hat.
'''
u_hat_detached = u_hat.detach()
# Routing algorithm
b = Variable(torch.zeros(batch_size, self.num_caps, self.in_caps, 1))
if self.use_cuda:
b = b.cuda()
for route_iter in range(self.num_routing-1):
# (batch_size, num_caps, in_caps, 1) -> Softmax along num_caps
c = F.softmax(b, dim=1)
# element-wise multiplication
# (batch_size, num_caps, in_caps, 1)
# *
# (batch_size, in_caps, num_caps, dim_caps)
# -> (batch_size, num_caps, in_caps, dim_caps)
# sum across in_caps ->
# (batch_size, num_caps, dim_caps)
s = (c * u_hat_detached).sum(dim=2)
# apply "squashing" non-linearity along dim_caps
v = squash(s)
# dot product agreement
# between the current output vj and the prediction uj|i
# (batch_size, num_caps, in_caps, dim_caps)
# @
# (batch_size, num_caps, dim_caps, 1)
# -> (batch_size, num_caps, in_caps, 1)
uv = torch.matmul(u_hat_detached, v.unsqueeze(-1))
b += uv # Note: it seems more appropriate here to use b = uv
'''
last iteration is done on the original u_hat, without the routing
weights update
use u_hat to compute v in order to backpropagate gradient
'''
c = F.softmax(b, dim=1)
s = (c * u_hat).sum(dim=2)
v = squash(s)
return v
class PrimaryCaps(nn.Module):
def __init__(self, A=32, B=32):
"""
Primary Capsule Layer
ICLR 2018
Args:
A: input channel
B: number of types of capsules.
"""
super(PrimaryCaps, self).__init__()
self.B = B
self.capsules_pose = nn.ModuleList([nn.Conv2d(in_channels=A,
out_channels=4 * 4,
kernel_size=1, stride=1)
for _ in range(self.B)])
self.capsules_activation = nn.ModuleList([nn.Conv2d(in_channels=A,
out_channels=1,
kernel_size=1, stride=1)
for _ in range(self.B)])
def forward(self, x):
poses = [self.capsules_pose[i](x) for i in range(self.B)]
poses = torch.cat(poses, dim=1)
activations = [self.capsules_activation[i](x) for i in range(self.B)]
activations = F.sigmoid(torch.cat(activations, dim=1))
return poses, activations
class ConvCaps(nn.Module):
def __init__(self, B=32, C=32, K=3, stride=2, iteration=3,
coordinate_add=False, transform_share=False,
routing='EM_routing', use_cuda=True):
"""
Convolutional Capsule Layer
ICLR 2018
Args:
B: input number of types of capsules.
C: output number of types of capsules.
K: kernel size of convolution. K = 0 means the capsules in layer L+1's receptive field contain all capsules in layer L, which is used in the final ClassCaps layer.
stride: stride of convolution
iteration: number of EM iterations
coordinate_add: whether to use Coordinate Addition
transform_share: whether to share transformation matrix.
routing: 'EM_routing' or 'angle_routing'
"""
super(ConvCaps, self).__init__()
self.routing = routing
self.use_cuda = use_cuda
self.B = B
self.C = C
self.K = K # K = 0 means full receptive field like class capsules
self.Bkk = None
self.Cww = None
self.b = None # batch_size, get it in forword process
self.stride = stride
self.coordinate_add = coordinate_add
# transform_share is also set to True if K = 0
self.transform_share = transform_share or K == 0
self.beta_v = None
self.beta_a = None
if not transform_share:
self.W = nn.Parameter(torch.randn(B, K, K, C, 4, 4))
else:
self.W = nn.Parameter(torch.randn(B, C, 4, 4))
self.iteration = iteration
def coordinate_addition(self, width_in, votes):
add = [[i / width_in, j / width_in] for i in range(width_in) for j in range(width_in)] # K,K,w,w
add = Variable(torch.Tensor(add))
if self.use_cuda:
add = add.cuda()
add = add.view(1, 1, self.K, self.K, 1, 1, 1, 2)
add = add.expand(self.b, self.B, self.K, self.K, self.C, 1, 1, 2).contiguous()
votes[:, :, :, :, :, :, :, :2, -1] = votes[:, :, :, :, :, :, :, :2, -1] + add
return votes
def down_w(self, w):
return range(w * self.stride, w * self.stride + self.K)
def EM_routing(self, lambda_, a_, V):
# routing coefficient
R = Variable(torch.ones([self.b, self.Bkk, self.Cww]), requires_grad=False)
if self.use_cuda:
R = R.cuda()
R /= self.Cww
for i in range(self.iteration):
# M-step
R = (R * a_)[..., None]
sum_R = R.sum(1)
mu = ((R * V).sum(1) / sum_R)[:, None, :, :]
sigma_square = (R * (V - mu) ** 2).sum(1) / sum_R
# E-step
if i != self.iteration - 1:
mu, sigma_square, V_, a__ = mu.data, sigma_square.data, V.data, a_.data
normal = Normal(mu, sigma_square[:, None, :, :] ** (1 / 2))
p = torch.exp(normal.log_prob(V_))
ap = a__ * p.sum(-1)
R = Variable(ap / torch.sum(ap, -1)[..., None], requires_grad=False)
else:
const = (self.beta_v.expand_as(sigma_square) + torch.log(sigma_square)) * sum_R
a = torch.sigmoid(lambda_ * (self.beta_a.repeat(self.b, 1) - const.sum(2)))
return a, mu
def angle_routing(self, lambda_, a_, V):
# routing coefficient
R = Variable(torch.zeros([self.b, self.Bkk, self.Cww]), requires_grad=False)
if self.use_cuda:
R = R.cuda()
for i in range(self.iteration):
R = F.softmax(R, dim=1)
R = (R * a_)[..., None]
sum_R = R.sum(1)
mu = ((R * V).sum(1) / sum_R)[:, None, :, :]
if i != self.iteration - 1:
u_v = mu.permute(0, 2, 1, 3) @ V.permute(0, 2, 3, 1)
u_v = u_v.squeeze().permute(0, 2, 1) / V.norm(2, -1) / mu.norm(2, -1)
R = R.squeeze() + u_v
else:
sigma_square = (R * (V - mu) ** 2).sum(1) / sum_R
const = (self.beta_v.expand_as(sigma_square) + torch.log(sigma_square)) * sum_R
a = torch.sigmoid(lambda_ * (self.beta_a.repeat(self.b, 1) - const.sum(2)))
return a, mu
def forward(self, x, lambda_):
poses, activations = x
width_in = poses.size(2)
w = int((width_in - self.K) / self.stride + 1) if self.K else 1 # 5
self.Cww = w * w * self.C
self.b = poses.size(0) #
if self.beta_v is None:
if self.use_cuda:
self.beta_v = nn.Parameter(torch.randn(1, self.Cww, 1)).cuda()
self.beta_a = nn.Parameter(torch.randn(1, self.Cww)).cuda()
else:
self.beta_v = nn.Parameter(torch.randn(1, self.Cww, 1))
self.beta_a = nn.Parameter(torch.randn(1, self.Cww))
if self.transform_share:
if self.K == 0:
self.K = width_in # class Capsules' kernel = width_in
W = self.W.view(self.B, 1, 1, self.C, 4, 4).expand(self.B, self.K, self.K, self.C, 4, 4).contiguous()
else:
W = self.W # B,K,K,C,4,4
self.Bkk = self.K * self.K * self.B
# used to store every capsule i's poses in each capsule c's receptive field
pose = poses.contiguous() # b,16*32,12,12
pose = pose.view(self.b, 16, self.B, width_in, width_in).permute(0, 2, 3, 4, 1).contiguous() # b,B,12,12,16
poses = torch.stack([pose[:, :, self.stride * i:self.stride * i + self.K,
self.stride * j:self.stride * j + self.K, :] for i in range(w) for j in range(w)],
dim=-1) # b,B,K,K,w*w,16
poses = poses.view(self.b, self.B, self.K, self.K, 1, w, w, 4, 4) # b,B,K,K,1,w,w,4,4
W_hat = W[None, :, :, :, :, None, None, :, :] # 1,B,K,K,C,1,1,4,4
votes = W_hat @ poses # b,B,K,K,C,w,w,4,4
if self.coordinate_add:
votes = self.coordinate_addition(width_in, votes)
activation = activations.view(self.b, -1)[..., None].repeat(1, 1, self.Cww)
else:
activations_ = [activations[:, :, self.down_w(x), :][:, :, :, self.down_w(y)]
for x in range(w) for y in range(w)]
activation = torch.stack(
activations_, dim=4).view(self.b, self.Bkk, 1, -1) \
.repeat(1, 1, self.C, 1).view(self.b, self.Bkk, self.Cww)
votes = votes.view(self.b, self.Bkk, self.Cww, 16)
activations, poses = getattr(self, self.routing)(lambda_, activation, votes)
return poses.view(self.b, self.C, w, w, -1), activations.view(self.b, self.C, w, w)
| 36.029586 | 172 | 0.579323 | 11,529 | 0.946707 | 0 | 0 | 0 | 0 | 0 | 0 | 3,341 | 0.274347 |
7ba31e643aa2124a524e4368c26dcf7ed0147d91 | 16,807 | py | Python | ci/test_marathon_lb_dcos_e2e.py | vivint-smarthome/marathon-lb | d8dd02a1889d3db6e3e7fefa62ff178b3ab72ce9 | [
"Apache-2.0"
]
| 511 | 2015-10-17T09:28:28.000Z | 2022-02-20T21:58:56.000Z | ci/test_marathon_lb_dcos_e2e.py | vivint-smarthome/marathon-lb | d8dd02a1889d3db6e3e7fefa62ff178b3ab72ce9 | [
"Apache-2.0"
]
| 575 | 2015-10-09T11:54:09.000Z | 2021-11-22T20:50:19.000Z | ci/test_marathon_lb_dcos_e2e.py | vivint-smarthome/marathon-lb | d8dd02a1889d3db6e3e7fefa62ff178b3ab72ce9 | [
"Apache-2.0"
]
| 411 | 2015-10-29T13:41:45.000Z | 2022-02-11T09:27:50.000Z | #!python3
import contextlib
import json
import logging
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from dcos_e2e import cluster
from dcos_e2e import node
from dcos_test_utils import helpers as dcos_helpers
from dcos_test_utils import iam as dcos_iam
from dcos_test_utils import enterprise as dcos_ee_api
from dcos_test_utils import dcos_api
from dcos_test_utils import package
import dcos_installer_tools
import pytest
import test_marathon_lb
DCOS_E2E_BACKEND = 'DCOS_E2E_BACKEND'
DCOS_E2E_CLUSTER_ID = 'DCOS_E2E_CLUSTER_ID'
DCOS_E2E_NODE_TRANSPORT = 'DCOS_E2E_NODE_TRANSPORT'
DCOS_LOGIN_UNAME = 'DCOS_LOGIN_UNAME'
DCOS_LOGIN_PW = 'DCOS_LOGIN_PW'
BACKEND_AWS = 'aws'
BACKEND_DOCKER = 'docker'
BACKEND_VAGRANT = 'vagrant'
MARATHON_LB_IMAGE = os.environ.get('MARATHON_LB_IMAGE',
'marathon-lb:latest')
MARATHON_LB_VERSION = os.environ.get('MARATHON_LB_VERSION',
'dev')
OSS = 'oss'
ENTERPRISE = 'enterprise'
VARIANTS = {OSS: dcos_installer_tools.DCOSVariant.OSS,
ENTERPRISE: dcos_installer_tools.DCOSVariant.ENTERPRISE}
VARIANT_VALUES = dict((value.value, value) for value in VARIANTS.values())
logging.captureWarnings(True)
# NOTE(jkoelker) Define some helpers that should eventually be upstreamed
class Package(package.Cosmos):
def render(self, name, options=None, version=None):
params = {'packageName': name}
if version:
params['packageVersion'] = version
if options:
params['options'] = options
self._update_headers('render',
request_version=1,
response_version=1)
return self._post('/render', params).json().get('marathonJson')
class Secrets(dcos_helpers.ApiClientSession):
def __init__(self, default_url: dcos_helpers.Url, session=None):
super().__init__(default_url)
if session:
self.session = session
def list_stores(self):
r = self.get('/store')
r.raise_for_status()
return r.json()['array']
def list_secrets(self, store, path='/'):
params = {'list': True}
r = self.get(self.secret_uri(store, path), params=params)
r.raise_for_status()
return r.json()['array']
def create_secret(self, path, value, store='default'):
headers = None
data = None
if not isinstance(value, (str, bytes)):
value = json.dumps(value,
sort_keys=True,
indent=None,
ensure_ascii=False,
separators=(',', ':'))
json_value = {'value': value}
if isinstance(value, bytes):
headers = {'Content-Type': 'application/octet-stream'}
data = value
json_value = None
return self.put(self.secret_uri(store, path),
json=json_value,
data=data,
headers=headers)
def delete_secret(self, path, store='default'):
return self.delete(self.secret_uri(store, path))
@staticmethod
def secret_uri(store, path):
if not path.startswith('/'):
path = '/' + path
return '/secret/{}{}'.format(store, path)
def add_user_to_group(self, user, group):
return self.put('/groups/{}/users/{}'.format(group, user))
def delete_user_from_group(self, user, group):
if not self.user_in_group(user, group):
return
return self.delete('/groups/{}/users/{}'.format(group, user))
def list_group_users(self, group):
r = self.get('/groups/{}/users'.format(group))
r.raise_for_status()
return r.json()['array']
def user_in_group(self, user, group):
return user in [a['user']['uid']
for a in self.list_group_users(group)]
# NOTE(jkoelker) Monkey patch in our helpers
dcos_api.DcosApiSession.package = property(
lambda s: Package(default_url=s.default_url.copy(path='package'),
session=s.copy().session))
dcos_api.DcosApiSession.secrets = property(
lambda s: Secrets(
default_url=s.default_url.copy(path='secrets/v1'),
session=s.copy().session))
dcos_ee_api.EnterpriseApiSession.secrets = property(
lambda s: Secrets(
default_url=s.default_url.copy(path='secrets/v1'),
session=s.copy().session))
dcos_iam.Iam.add_user_to_group = add_user_to_group
dcos_iam.Iam.delete_user_from_group = delete_user_from_group
dcos_iam.Iam.list_group_users = list_group_users
dcos_iam.Iam.user_in_group = user_in_group
class Cluster(cluster.Cluster):
_USER_ZKCLI_CMD = (
'.',
'/opt/mesosphere/environment.export',
'&&',
'zkCli.sh',
'-server',
'"zk-1.zk:2181,zk-2.zk:2181,zk-3.zk:2181,zk-4.zk:2181,'
'zk-5.zk:2181"'
)
_USER_OSS_EMAIL = '[email protected]'
_USER_OSS_ZK_PATH = '/dcos/users/{}'.format(_USER_OSS_EMAIL)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._variant = dcos_installer_tools.DCOSVariant.OSS
@property
def _any_master(self):
return next(iter(self.masters))
def _any_master_run(self, cmd, *args, **kwargs):
return self._any_master.run(list(cmd), *args, **kwargs)
@property
def _oss_user_exists(self):
cmd = self._USER_ZKCLI_CMD + ('get',
self._USER_OSS_ZK_PATH)
output = self._any_master_run(cmd, shell=True)
stdout = output.stdout.decode()
if stdout.strip().split('\n')[-1] == self._USER_OSS_EMAIL:
return True
return False
def _create_oss_user(self):
if self._oss_user_exists:
return
cmd = self._USER_ZKCLI_CMD + ('create',
self._USER_OSS_ZK_PATH,
self._USER_OSS_EMAIL)
self._any_master_run(cmd, shell=True)
def _delete_oss_user(self):
cmd = self._USER_ZKCLI_CMD + ('delete', self._USER_OSS_ZK_PATH)
self._any_master_run(cmd, shell=True)
def _enterprise_session(self):
cmd = ('cat', '/opt/mesosphere/etc/bootstrap-config.json')
config_result = self._any_master_run(cmd)
config = json.loads(config_result.stdout.decode())
ssl_enabled = config['ssl_enabled']
scheme = 'https://' if ssl_enabled else 'http://'
dcos_url = scheme + str(self._any_master.public_ip_address)
api = dcos_ee_api.EnterpriseApiSession(
dcos_url=dcos_url,
masters=[str(n.public_ip_address) for n in self.masters],
slaves=[str(n.public_ip_address) for n in self.agents],
public_slaves=[
str(n.public_ip_address) for n in self.public_agents
],
auth_user=dcos_api.DcosUser(credentials=self.credentials),
)
if api.ssl_enabled:
api.set_ca_cert()
api.login_default_user()
api.set_initial_resource_ids()
return api
def _oss_session(self):
api = dcos_api.DcosApiSession(
dcos_url='http://{}'.format(self._any_master.public_ip_address),
masters=[str(n.public_ip_address) for n in self.masters],
slaves=[str(n.public_ip_address) for n in self.agents],
public_slaves=[
str(n.public_ip_address) for n in self.public_agents
],
auth_user=dcos_api.DcosUser(credentials=self.credentials),
)
api.login_default_user()
return api
def _session(self):
if self.enterprise:
return self._enterprise_session()
return self._oss_session()
@property
def credentials(self):
if self.enterprise:
return {
'uid': os.environ.get(DCOS_LOGIN_UNAME, 'admin'),
'password': os.environ.get(DCOS_LOGIN_PW, 'admin')
}
return dcos_helpers.CI_CREDENTIALS
@property
def enterprise(self):
return self._variant == dcos_installer_tools.DCOSVariant.ENTERPRISE
@property
def oss(self):
return self._variant == dcos_installer_tools.DCOSVariant.OSS
@property
def variant(self):
return self._variant
@variant.setter
def variant(self, value):
# NOTE(jkoelker) Hack becuase enums from vendored libraries
# are technically different
if hasattr(value, 'value') and value.value in VARIANT_VALUES:
value = VARIANT_VALUES[value.value]
if value in VARIANTS:
value = VARIANTS[value]
if value not in dcos_installer_tools.DCOSVariant:
msg = 'Expected one of {} or {} got {}'
raise ValueError(msg.format(tuple(VARIANTS.keys()),
dcos_installer_tools.DCOSVariant,
value))
self._variant = value
def create_user(self):
if self.enterprise:
return
self._create_oss_user()
def delete_user(self):
if self.enterprise:
return
self._delete_oss_user()
def create_service_account(self, name, secret, description=None,
superuser=False):
if not self.enterprise:
return
if description is None:
description = '{} service account'.format(name)
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend())
priv = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption())
pub = key.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
priv = priv.decode('ascii')
pub = pub.decode('ascii')
with self.session as session:
iam = session.iam
try:
iam.create_service(name, pub, description)
except AssertionError:
iam.delete_service(name)
iam.create_service(name, pub, description)
if superuser:
iam.add_user_to_group(name, 'superusers')
login_endpoint = 'https://leader.mesos/{}/auth/login'
# NOTE(jkoelker) override the login_endpoint to force it to
# use `leader.mesos` by default it is set
# to the dcos_url the sesion is created with
sa_creds = iam.make_service_account_credentials(name, priv)
sa_creds['login_endpoint'] = login_endpoint.format(
iam.default_url.path)
secret_ret = session.secrets.create_secret(secret, sa_creds)
if secret_ret.status_code != 201:
session.secrets.delete_secret(secret, store='default')
session.secrets.create_secret(secret, sa_creds)
def delete_service_account(self, name, secret):
if not self.enterprise:
return
with self.session as session:
iam = session.iam
iam.delete_user_from_group(name, 'superusers')
session.secrets.delete_secret(secret, store='default')
iam.delete_service(name)
@contextlib.contextmanager
def service_account(self, name, secret, description=None,
superuser=False):
try:
yield self.create_service_account(name,
secret,
description,
superuser)
finally:
self.delete_service_account(name, secret)
@property
@contextlib.contextmanager
def session(self):
with self.user:
yield self._session()
@property
@contextlib.contextmanager
def user(self):
try:
yield self.create_user()
finally:
self.delete_user()
def get_docker_cluster(cluster_id, transport, **kwargs):
from dcos_e2e_cli.dcos_docker.commands import _common
if cluster_id not in _common.existing_cluster_ids():
return None
cluster_containers = _common.ClusterContainers(cluster_id, transport)
cluster = Cluster.from_nodes(
masters=set(map(cluster_containers.to_node,
cluster_containers.masters)),
agents=set(map(cluster_containers.to_node,
cluster_containers.agents)),
public_agents=set(map(cluster_containers.to_node,
cluster_containers.public_agents)))
cluster.variant = cluster_containers.dcos_variant
return cluster
def get_cluster():
backend = os.environ.get(DCOS_E2E_BACKEND, BACKEND_DOCKER)
cluster_id = os.environ.get(DCOS_E2E_CLUSTER_ID, 'default')
if backend == BACKEND_AWS:
return None
if backend == BACKEND_VAGRANT:
return None
transport = os.environ.get(DCOS_E2E_NODE_TRANSPORT, 'docker-exec')
if transport == 'ssh':
transport = node.Transport.SSH
else:
transport = node.Transport.DOCKER_EXEC
return get_docker_cluster(cluster_id, transport)
@pytest.fixture(scope='session')
def dcos_marathon_lb_session():
'''Fixture to return `cluster.session` after deploying `marathon-lb`'''
cluster = get_cluster()
with cluster.session as session:
options = {
'marathon-lb': {
'sysctl-params': ' '.join(
['net.ipv4.tcp_fin_timeout=30',
'net.core.somaxconn=10000']),
}
}
if cluster.enterprise:
options['marathon-lb'].update({
'secret_name': 'mlb-secret',
'marathon-uri': 'https://master.mesos:8443',
'strict-mode': True
})
with cluster.service_account('mlb-principal',
'mlb-secret',
superuser=True):
app = session.package.render('marathon-lb', options=options)
app['container']['docker']['image'] = MARATHON_LB_IMAGE
app['labels']['DCOS_PACKAGE_VERSION'] = MARATHON_LB_VERSION
with session.marathon.deploy_and_cleanup(app):
yield session
@pytest.fixture(scope='session')
def agent_public_ip(dcos_marathon_lb_session):
'''Fixture to return the first public agents ip address'''
return dcos_marathon_lb_session.public_slaves[0]
@pytest.fixture(scope='session')
def dcos_version(dcos_marathon_lb_session):
'''Fixture to return the first dcos version'''
return dcos_marathon_lb_session.get_version()
@pytest.fixture(scope='session',
params=(['backends/' + f
for f in os.listdir('backends')] +
['backends_1.9/' + f
for f in os.listdir('backends_1.9')]))
def backend_app(request, dcos_version):
if dcos_version.startswith('1.9.'):
if not request.param.startswith('backends_1.9/'):
return pytest.skip('Not a 1.9 backend')
return test_marathon_lb.get_json(request.param)
if request.param.startswith('backends_1.9/'):
return pytest.skip('Not a 1.9 cluster')
return test_marathon_lb.get_json(request.param)
@pytest.fixture(scope='session')
def app_deployment(dcos_marathon_lb_session, backend_app):
session = dcos_marathon_lb_session
with session.marathon.deploy_and_cleanup(backend_app,
check_health=False):
app_id = backend_app['id']
backend_app['name'] = app_id[1:] if app_id[0] == '/' else app_id
yield backend_app
@pytest.fixture(scope='session')
def app_port(app_deployment, agent_public_ip):
return test_marathon_lb.get_app_port(app_deployment['name'],
agent_public_ip)
def test_port(app_deployment, app_port):
assert app_port == app_deployment["labels"]["HAPROXY_0_PORT"]
def test_response(app_deployment, app_port, agent_public_ip):
(response,
status_code) = test_marathon_lb.get_app_content(app_port,
agent_public_ip)
assert status_code == 200
assert response == app_deployment['name']
| 32.571705 | 76 | 0.609091 | 9,652 | 0.574285 | 2,037 | 0.1212 | 5,256 | 0.312727 | 0 | 0 | 2,088 | 0.124234 |
7ba7975d420153a385e3680b17a15d19e06af3c9 | 308 | py | Python | day1.py | danmana/adventofcode2017 | 6f80cd7c2382453b6e9d577975c2f02a024095c5 | [
"MIT"
]
| null | null | null | day1.py | danmana/adventofcode2017 | 6f80cd7c2382453b6e9d577975c2f02a024095c5 | [
"MIT"
]
| null | null | null | day1.py | danmana/adventofcode2017 | 6f80cd7c2382453b6e9d577975c2f02a024095c5 | [
"MIT"
]
| null | null | null |
def sumOf(s, offset):
sum = 0
n = len(s)
for i in range(0, len(s)):
if s[i] == s[(i + offset) % n]:
sum += int(s[i])
return sum
file = open("./input/input1.txt", "r")
for s in file:
s = s.strip()
print('Part 1: ', sumOf(s, 1))
print('Part 2: ', sumOf(s, int(len(s)/2)))
file.close() | 14 | 43 | 0.519481 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.13961 |
7ba983a2c839be1dfa3a88ffa4c32747f568686e | 2,123 | py | Python | tests/test_inflate.py | FilipKlaesson/cops | 67d2e5dd4534b3f3eec95b6cfda9d4c9c1746ef0 | [
"BSD-3-Clause"
]
| null | null | null | tests/test_inflate.py | FilipKlaesson/cops | 67d2e5dd4534b3f3eec95b6cfda9d4c9c1746ef0 | [
"BSD-3-Clause"
]
| null | null | null | tests/test_inflate.py | FilipKlaesson/cops | 67d2e5dd4534b3f3eec95b6cfda9d4c9c1746ef0 | [
"BSD-3-Clause"
]
| null | null | null | import numpy as np
from cops.graph import Graph
from cops.clustering import ClusterProblem, ClusterStructure, inflate_agent_clusters
def test_activationinflate1():
G = Graph()
G.add_connectivity_path([0, 1])
G.add_connectivity_path([0, 2])
agent_positions = {0: 0, 1: 1, 2: 2}
G.init_agents(agent_positions)
cs = ClusterStructure(agent_clusters={"c0": [0], "c1": [1], "c2": [2]})
master = 0
cp = ClusterProblem()
cp.graph = G
cp.master = master
cp.prepare_problem(remove_dead=False)
cs = inflate_agent_clusters(cp, cs)
np.testing.assert_equal(cs.subgraphs["c0"], set([0]))
np.testing.assert_equal(cs.subgraphs["c1"], set([1]))
np.testing.assert_equal(cs.subgraphs["c2"], set([2]))
np.testing.assert_equal(cs.child_clusters["c0"], {("c1", 1), ("c2", 2)})
np.testing.assert_equal(cs.child_clusters["c1"], set())
np.testing.assert_equal(cs.child_clusters["c2"], set())
np.testing.assert_equal(cs.parent_clusters["c1"], ("c0", 0))
np.testing.assert_equal(cs.parent_clusters["c2"], ("c0", 0))
def test_inflate2():
G = Graph()
G.add_transition_path(list(range(0, 12)))
G.add_connectivity_path(list(range(0, 12)))
G.add_connectivity_path([6, 8])
agent_positions = {0: 0, 1: 1, 2: 4, 3: 6, 4: 8, 5: 10}
G.init_agents(agent_positions)
cs = ClusterStructure(agent_clusters={"c0": [0, 1], "c1": [2, 3], "c2": [4, 5]})
master = 0
cp = ClusterProblem()
cp.graph = G
cp.master = master
cp.prepare_problem(remove_dead=False)
cs = inflate_agent_clusters(cp, cs)
np.testing.assert_equal(cs.subgraphs["c0"], set([0, 1, 2, 3]))
np.testing.assert_equal(cs.subgraphs["c1"], set([4, 5, 6, 7]))
np.testing.assert_equal(cs.subgraphs["c2"], set([8, 9, 10, 11]))
np.testing.assert_equal(cs.child_clusters["c0"], {("c1", 4)})
np.testing.assert_equal(cs.child_clusters["c1"], {("c2", 8)})
np.testing.assert_equal(cs.child_clusters["c2"], set())
np.testing.assert_equal(cs.parent_clusters["c1"], ("c0", 3))
np.testing.assert_equal(cs.parent_clusters["c2"], ("c1", 6))
| 31.686567 | 84 | 0.646726 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.056524 |
7bab917bc22f4ebc30925ed7bbdf5a597c5e9ff4 | 3,243 | py | Python | tools/STCG/STCG.py | ambertide/SASVM | ee699ec9b585ad0fccb0b70e11dde1b225ac56c1 | [
"MIT"
]
| null | null | null | tools/STCG/STCG.py | ambertide/SASVM | ee699ec9b585ad0fccb0b70e11dde1b225ac56c1 | [
"MIT"
]
| 7 | 2019-12-27T20:59:12.000Z | 2020-01-08T22:53:42.000Z | tools/STCG/STCG.py | ambertide/SASVM | ee699ec9b585ad0fccb0b70e11dde1b225ac56c1 | [
"MIT"
]
| null | null | null | import csv
from sys import argv
from os import getcwd
def generate_memory_list_view(expected_memory) -> str:
"""
Convert expected memory's bytestring to a list of Cells (in string)
:param expected_memory: "A00023"
:return: [Cell("A0"), Cell("00"), Cell("23")]
"""
list_view = "["
for i in range(0, len(expected_memory), 2):
list_view += f"Cell(\"{expected_memory[i] + expected_memory[i + 1]}\"),"
list_view += "]"
return list_view
def generate_test_case(file_name: str, expected_memory: str, expected_output: str) -> str:
"""
Generate a test case string to test an *.asm file.
:param file_name: *.asm file to test
:param expected_memory: Expected memory as bytestring
:param expected_output: Expected output from STDOUT
:return: String
"""
with open(file_name) as file:
code = file.read()
expected_memory_list: str = generate_memory_list_view(expected_memory)
output: str = f"""# Generated with SpaceCat TestCase Generator.
import unittest
from spacecat import assembler, simulator
from spacecat.common_utils import Cell
test_code = \"\"\"{code}\"\"\"
class AlphabetBenchmark(unittest.TestCase):
def test_assembler(self):
a_ = assembler.Assembler.instantiate(test_code, mem_size={len(expected_memory)//2})
self.assertEqual({expected_memory_list}, a_.memory)
def test_simulator(self):
a_ = assembler.Assembler.instantiate(test_code, mem_size=128)
s_ = simulator.Simulator(mem_size=128, register_size=16, stdout_register_indices=[15])
s_.load_memory(a_.memory)
output = ""
i = 0
for _ in s_:
output += s_.return_stdout()
i += 1
if i == 10_000:
self.fail("Failed to resolve in given CPU Cycles.")
self.assertEqual('{expected_output}', output)
if __name__ == '__main__':
unittest.main()
"""
return output
def generate_test_file(output_directory: str, file_name: str, expected_memory: str, expected_output: str) -> None:
file_only_name = file_name.split("/")[-1].strip(".asm")
output_directory += f"/test_{file_only_name}.py"
with open(output_directory, "w+") as file:
try:
file.write(generate_test_case(file_name, expected_memory, expected_output))
except (IOError, FileNotFoundError):
print(f"Couldn't generate test case for {file_name}")
def generate_from_config(input_directory: str, config_file: str, output_directory: str) -> None:
with open(config_file) as file:
reader = csv.reader(file, delimiter=",")
for i, row in enumerate(reader):
if i == 0:
continue
generate_test_file(output_directory, input_directory + "/" + row[0], row[1], row[2])
if __name__ == "__main__":
print("Generating...")
if len(argv) > 1:
relative_import_directory = argv[1]
config_file = argv[2]
output_directory = argv[3]
else:
relative_import_directory = "../../src/data/sample_scripts"
config_file="test_files.csv"
output_directory="../../src/test/integration_tests"
generate_from_config(relative_import_directory, config_file, output_directory) | 36.438202 | 114 | 0.662041 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,627 | 0.501696 |
7bad82b4e2d7cbdb41d0bbaab31ed7d1164ed27e | 108 | py | Python | Crash Course Python, 2nd Edition/Chapter 3, Introducing List/YourOwnList.py | EdgarCastillo101/Crash-Course-Python-2nd-edition | 484c9096076c0ba69b1b9d78c6c974064fc1eda3 | [
"MIT"
]
| null | null | null | Crash Course Python, 2nd Edition/Chapter 3, Introducing List/YourOwnList.py | EdgarCastillo101/Crash-Course-Python-2nd-edition | 484c9096076c0ba69b1b9d78c6c974064fc1eda3 | [
"MIT"
]
| null | null | null | Crash Course Python, 2nd Edition/Chapter 3, Introducing List/YourOwnList.py | EdgarCastillo101/Crash-Course-Python-2nd-edition | 484c9096076c0ba69b1b9d78c6c974064fc1eda3 | [
"MIT"
]
| null | null | null | car = ['volvo', 'toyota', 'BMW', 'Yes?']
message = f"I would like to own a {car[1].title()}"
print(message) | 27 | 51 | 0.601852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.62037 |
7baf6ff631178bc7ddca808d29592a1384d2ce35 | 10,677 | py | Python | stanCode_projects/my_drawing/my_drawing.py | ShihYesWei/stanCode-projects | 69104b7be3d8c3fbd34935c1d4e15e40961e4556 | [
"MIT"
]
| null | null | null | stanCode_projects/my_drawing/my_drawing.py | ShihYesWei/stanCode-projects | 69104b7be3d8c3fbd34935c1d4e15e40961e4556 | [
"MIT"
]
| null | null | null | stanCode_projects/my_drawing/my_drawing.py | ShihYesWei/stanCode-projects | 69104b7be3d8c3fbd34935c1d4e15e40961e4556 | [
"MIT"
]
| null | null | null | """
File: my_drawing
Author name: Alan Chen
----------------------
This program will draw a recently famous picture of Gian(技安), one of the main characters in doraemon(哆啦A夢).
This is a picture that originally Gian was scared by something. Here, I reassign the things that scared him is the
Illuminati symbol with a string of PYTHON.
"""
from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc
from campy.graphics.gwindow import GWindow
w = GWindow(1000, 650)
def main():
"""
Draw a scared Gian.
"""
'''
#This is for adjusting the position
for i in range(0, 1000, 100):
li = GLine(i, 0, i, 650)
locatei = GLabel(str(i))
w.add(li)
w.add(locatei, i, 20)
for j in range(0, 700, 100):
lj = GLine(0, j, 1000, j)
locatej = GLabel(str(j))
w.add(lj)
w.add(locatej, 0, j)
'''
#background
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((0, 0))
bg.add_vertex((0, 325))
bg.filled = True
bg.fill_color = 'red'
bg.color = 'red'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((0, 325))
bg.add_vertex((0, 650))
bg.filled = True
bg.fill_color = 'orange'
bg.color = 'orange'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((333, 650))
bg.add_vertex((0, 650))
bg.filled = True
bg.fill_color = 'lightgreen'
bg.color = 'lightgreen'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((333, 650))
bg.add_vertex((666, 650))
bg.filled = True
bg.fill_color = 'slategrey'
bg.color = 'slategrey'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((1000, 650))
bg.add_vertex((666, 650))
bg.filled = True
bg.fill_color = 'darkcyan'
bg.color = 'darkcyan'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((1000, 650))
bg.add_vertex((1000, 400))
bg.filled = True
bg.fill_color = 'greenyellow'
bg.color = 'greenyellow'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((1000, 400))
bg.add_vertex((1000, 200))
bg.filled = True
bg.fill_color = 'khaki'
bg.color = 'khaki'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((1000, 0))
bg.add_vertex((1000, 200))
bg.filled = True
bg.fill_color = 'mistyrose'
bg.color = 'mistyrose'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((1000, 0))
bg.add_vertex((666, 0))
bg.filled = True
bg.fill_color = 'plum'
bg.color = 'plum'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((350, 0))
bg.add_vertex((666, 0))
bg.filled = True
bg.fill_color = 'magenta'
bg.color = 'magenta'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((350, 0))
bg.add_vertex((0, 0))
bg.filled = True
bg.fill_color = 'tomato'
bg.color = 'tomato'
w.add(bg)
#body
body = GOval(900, 200)
body.filled = True
body.fill_color = 'Steelblue'
body.color = 'blue'
w.add(body, 220, 570)
#face
lower_face = GOval(530, 380)
lower_face.filled = True
lower_face.fill_color = 'Steelblue'
lower_face.color = 'navy'
w.add(lower_face, 405, 260)
upper_face = GOval(485, 575)
upper_face.filled = True
upper_face.fill_color = 'Steelblue'
upper_face.color = 'Steelblue'
w.add(upper_face, 423, 40)
shadow_on_face = GOval(420, 330)
shadow_on_face.filled = True
shadow_on_face.fill_color = 'Cadetblue'
shadow_on_face.color = 'Cadetblue'
w.add(shadow_on_face, 455, 230)
shadow_on_face2 = GOval(390, 370)
shadow_on_face2.filled = True
shadow_on_face2.fill_color = 'Cadetblue'
shadow_on_face2.color = 'Cadetblue'
w.add(shadow_on_face2, 480, 170)
# right_eye
right_eye1 = GOval(90, 90)
right_eye1.filled = True
right_eye1.fill_color = 'powderblue'
right_eye1.color = 'black'
w.add(right_eye1, 525, 225)
right_eye2 = GOval(45, 80)
right_eye2.color = 'black'
w.add(right_eye2, 546, 231)
right_eye3 = GOval(30, 45)
right_eye3.color = 'black'
w.add(right_eye3, 552, 253)
right_eye4 = GOval(5, 10)
right_eye4.filled = True
right_eye4.fill_color = 'black'
right_eye4.color = 'black'
w.add(right_eye4, 565, 271)
# left_eye
left_eye1 = GOval(90, 90)
left_eye1.filled = True
left_eye1.fill_color = 'powderblue'
left_eye1.color = 'black'
w.add(left_eye1, 710, 230)
left_eye2 = GOval(60, 80)
left_eye2.color = 'black'
w.add(left_eye2, 725, 235)
left_eye3 = GOval(25, 50)
left_eye3.color = 'black'
w.add(left_eye3, 740, 250)
left_eye4 = GOval(5, 10)
left_eye4.filled = True
left_eye4.fill_color = 'black'
left_eye4.color = 'black'
w.add(left_eye4, 750, 270)
# nose
nose = GOval(80, 52) # 610 351
nose.filled = True
nose.fill_color = 'DarkSeaGreen'
nose.color = 'black'
w.add(nose, 610, 347)
# mouse
for i in range(10):
mouse = GOval(50, 80)
mouse.filled = True
mouse.fill_color = 'navy'
mouse.color = 'navy'
w.add(mouse, 560 + 4 * i, 430 - i)
for i in range(100):
mouse = GOval(50, 80)
mouse.filled = True
mouse.fill_color = 'navy'
mouse.color = 'navy'
w.add(mouse, 600 + i, 420)
# tongue
for i in range(15):
tongue = GOval(50, 40)
tongue.filled = True
tongue.fill_color = 'mediumblue'
tongue.color = 'mediumblue'
w.add(tongue, 570 + 2 * i, 470 - i)
for i in range(10):
tongue = GOval(50, 45)
tongue.filled = True
tongue.fill_color = 'mediumblue'
tongue.color = 'mediumblue'
w.add(tongue, 600 + i, 455)
for i in range(25):
tongue = GOval(50, 30)
tongue.filled = True
tongue.fill_color = 'mediumblue'
tongue.color = 'mediumblue'
w.add(tongue, 600 + i, 475)
for i in range(50):
tongue = GOval(50, 45)
tongue.filled = True
tongue.fill_color = 'mediumblue'
tongue.color = 'mediumblue'
w.add(tongue, 650 + i, 455)
# hair
top_hair = GOval(330, 95)
top_hair.filled = True
top_hair.fill_color = 'navy'
top_hair.color = 'navy'
w.add(top_hair, 505, 25)
bangs = GPolygon()
bangs.add_vertex((510, 82))
bangs.add_vertex((620, 82))
bangs.add_vertex((560, 147))
bangs.filled = True
bangs.fill_color = 'navy'
bangs.color = 'navy'
w.add(bangs)
bangs = GPolygon()
bangs.add_vertex((580, 98))
bangs.add_vertex((690, 98))
bangs.add_vertex((635, 155))
bangs.filled = True
bangs.fill_color = 'navy'
bangs.color = 'navy'
w.add(bangs)
bangs = GPolygon()
bangs.add_vertex((650, 96))
bangs.add_vertex((770, 96))
bangs.add_vertex((710, 150))
bangs.filled = True
bangs.fill_color = 'navy'
bangs.color = 'navy'
w.add(bangs)
bangs = GPolygon()
bangs.add_vertex((740, 85))
bangs.add_vertex((825, 85))
bangs.add_vertex((780, 148))
bangs.filled = True
bangs.fill_color = 'navy'
bangs.color = 'navy'
w.add(bangs)
for i in range(80): # rightside
side = GOval(40, 90)
side.filled = True
side.fill_color = 'navy'
side.color = 'navy'
w.add(side, 800 + i, 55 + i ** 1.2)
for i in range(100): # leftside
side = GOval(40, 40)
side.filled = True
side.fill_color = 'navy'
side.color = 'navy'
w.add(side, 500 - i, 60 + i ** 1.2)
# right_ear
right_ear = GOval(70, 130)
right_ear.filled = True
right_ear.fill_color = 'Steelblue'
right_ear.color = 'blue'
w.add(right_ear, 395, 250)
right_inear = GOval(50, 80)
right_inear.filled = True
right_inear.fill_color = 'royalblue'
right_inear.color = 'blue'
w.add(right_inear, 410, 290)
# left_ear
left_ear = GOval(70, 130)
left_ear.filled = True
left_ear.fill_color = 'Steelblue'
left_ear.color = 'blue'
w.add(left_ear, 880, 260)
left_inear = GOval(50, 80)
left_inear.filled = True
left_inear.fill_color = 'royalblue'
left_inear.color = 'blue'
w.add(left_inear, 890, 290)
# tears
t1 = GOval(50, 25)
t1.filled = True
t1.fill_color = 'aqua'
w.add(t1, 525, 300)
t1 = GOval(50, 25)
t1.filled = True
t1.fill_color = 'aqua'
w.add(t1, 750, 300)
#left tears
for i in range(0, 10, 2):
tear = GOval(15, 50)
tear.filled = True
tear.fill_color = 'aqua'
tear.color = 'aqua'
w.add(tear, 525 - 2* i, 300 + 10 * i)
for i in range(0, 10, 2):
tear = GOval(21, 40)
tear.filled = True
tear.fill_color = 'aqua'
tear.color = 'aqua'
w.add(tear, 515 + i, 400 + 10 * i)
for i in range(0, 10, 2):
tear = GOval(18, 40)
tear.filled = True
tear.fill_color = 'aqua'
tear.color = 'aqua'
w.add(tear, 525, 500 + 10 * i)
#right tears
for i in range(0, 10, 2):
tear = GOval(5, 50)
tear.filled = True
tear.fill_color = 'aqua'
tear.color = 'aqua'
w.add(tear, 790 + 2 * i, 300 + 10 * i)
for i in range(0, 10, 2):
tear = GOval(11, 40)
tear.filled = True
tear.fill_color = 'aqua'
tear.color = 'aqua'
w.add(tear, 808 - i, 410 + 10 * i)
#lines
line1 = GLine(525, 175, 575, 185)
w.add(line1)
line2 = GLine(575,185, 625, 270)
w.add(line2)
line3 = GLine(710, 255, 760, 170)
w.add(line3)
line4 = GLine(651, 400, 651, 420)
w.add(line4)
line5 = GLine(630, 520, 660, 520)
w.add(line5)
# Illuminati
tri = GPolygon()
tri.add_vertex((150, 20))
tri.add_vertex((-20, 280))
tri.add_vertex((320, 280))
tri.filled = True
tri.fill_color = 'green'
w.add(tri)
up_eye = GArc(200, 120, 0, 180)
up_eye.filled = True
up_eye.fill_color = 'darkgreen'
w.add(up_eye, 50, 150)
low_eye = GArc(200, 120, -12, -167)
low_eye.filled = True
low_eye.fill_color = 'darkgreen'
low_eye.color = 'darkgreen'
w.add(low_eye, 50, 145)
eye_ball = GOval(55, 55)
eye_ball.filled = True
eye_ball.fill_color = 'black'
w.add(eye_ball, 125, 150)
py = GLabel('PYTHON')
py.font = '-50'
w.add(py, 20, 280)
if __name__ == '__main__':
main()
| 24.156109 | 114 | 0.579189 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,726 | 0.161505 |
7bb1307068ac9567a6b6b9039165f859f8bd8998 | 127 | py | Python | tests/handlers/testhandlers/not_a_handler.py | bcurnow/magicband-reader | d8afa80648abc8954abd9c3cab8f6f6b9cb260ec | [
"Apache-2.0"
]
| 5 | 2021-01-12T02:53:07.000Z | 2022-03-02T17:58:18.000Z | tests/handlers/testhandlers/not_a_handler.py | bcurnow/magicband-reader | d8afa80648abc8954abd9c3cab8f6f6b9cb260ec | [
"Apache-2.0"
]
| null | null | null | tests/handlers/testhandlers/not_a_handler.py | bcurnow/magicband-reader | d8afa80648abc8954abd9c3cab8f6f6b9cb260ec | [
"Apache-2.0"
]
| null | null | null | """ This module doesn't provide a register method and should be skipped. This ensures that the error handling logic works. """
| 63.5 | 126 | 0.76378 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 126 | 0.992126 |
7bb15b935b3d0af4caae284ba8b64031d24bf414 | 3,196 | py | Python | ciri/modules/reddit.py | AmarnathCJD/Cirilla-Userbot | a580f2d3442ab7ebc4497aee7e381e6e220dbf93 | [
"MIT"
]
| null | null | null | ciri/modules/reddit.py | AmarnathCJD/Cirilla-Userbot | a580f2d3442ab7ebc4497aee7e381e6e220dbf93 | [
"MIT"
]
| null | null | null | ciri/modules/reddit.py | AmarnathCJD/Cirilla-Userbot | a580f2d3442ab7ebc4497aee7e381e6e220dbf93 | [
"MIT"
]
| 2 | 2022-01-01T06:58:10.000Z | 2022-01-12T15:59:38.000Z | import json
import os
import subprocess
import requests
from bs4 import BeautifulSoup
from ciri import HelpStr
from ciri.utils import ciri_cmd, eor
@ciri_cmd(pattern="red(?:dit)? (.*)")
async def reddit(e):
url = e.pattern_match.group(1)
if not url:
return await e.edit("`No url provided?`")
if not "reddit.com" in url:
return await e.edit("`Invalid reddit url.`")
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"
}
r = requests.get(url, headers=headers)
if not r.status_code == 200:
return await e.edit("`Invalid reddit url, returned 404.`")
post_id = get_post_id(url)
vid, aud, title = get_download_url(post_id, r)
msg = await eor(e, f"`Downloading...`")
file = download_files(aud, vid, title)
await msg.delete()
await e.client.send_file(e.chat_id, file, caption=f"`{title}`")
def get_post_id(url: str) -> str:
post_id = url[url.find("comments/") + 9 :]
post_id = f"t3_{post_id[:post_id.find('/')]}"
return post_id
def get_download_url(post_id: str, data: bytes):
soup = BeautifulSoup(data.content, "html.parser")
required_js = soup.find("script", id="data")
json_data = json.loads(required_js.text.replace("window.___r = ", "")[:-1])
title = json_data["posts"]["models"][post_id]["title"]
title = title.replace(" ", "_")
dash_url = json_data["posts"]["models"][post_id]["media"]["dashUrl"]
height = json_data["posts"]["models"][post_id]["media"]["height"]
if height == "1080":
height = "480"
dash_url = dash_url[: int(dash_url.find("DASH")) + 4]
return f"{dash_url}_{height}.mp4", f"{dash_url}_audio.mp3", title
def download_files(a, v, title="reddit"):
with requests.get(a) as r:
if r.status_code == 200:
with open(f"{title}_aud.mp3", "wb") as f:
f.write(r.content)
else:
with requests.get(a.split("DASH_audio.mp3")[0] + "audio") as r:
if r.status_code == 200:
with open(f"{title}_aud.mp3", "wb") as f:
f.write(r.content)
with requests.get(v) as r:
if r.status_code == 200:
with open(f"{title}_vid.mp4", "wb") as f:
f.write(r.content)
else:
with requests.get(v.split(".mp4")[0]) as r:
if r.status_code == 200:
with open(f"{title}_vid.mp4", "wb") as f:
f.write(r.content)
subprocess.call(
[
"ffmpeg",
"-i",
f"{title}_vid.mp4",
"-i",
f"{title}_aud.mp3",
"-map",
"0:v",
"-map",
"1:a",
"-c:v",
"copy",
f"{title}.mp4",
]
)
os.remove(f"{title}_vid.mp4")
os.remove(f"{title}_aud.mp3")
return f"{title}.mp4"
HelpStr.update(
{
"reddit": {
"red(ddit)": {
"Description": "Downloads the audio and video from a reddit post.",
"Usage": "red(ddit <url>)",
},
}
}
)
| 30.730769 | 129 | 0.54005 | 0 | 0 | 0 | 0 | 799 | 0.25 | 761 | 0.23811 | 888 | 0.277847 |
7bb4c8f4351e98128f6ae2e0b66016892643437c | 2,343 | py | Python | .ipynb_checkpoints/visuals-checkpoint.py | Serenitea/DSND-Capstone-LoL | 2a29132b5e513f9dde1b2afadbc9f28b00ae952d | [
"CNRI-Python"
]
| null | null | null | .ipynb_checkpoints/visuals-checkpoint.py | Serenitea/DSND-Capstone-LoL | 2a29132b5e513f9dde1b2afadbc9f28b00ae952d | [
"CNRI-Python"
]
| null | null | null | .ipynb_checkpoints/visuals-checkpoint.py | Serenitea/DSND-Capstone-LoL | 2a29132b5e513f9dde1b2afadbc9f28b00ae952d | [
"CNRI-Python"
]
| null | null | null | import requests, json
import numpy as np
import importlib
import pandas as pd
import os
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
import pdb
import warnings
warnings.filterwarnings('ignore')
def plot_barh(s_to_plot, title = '', xlabel = '', ylabel = '',
color_palette = 'YlGnBu', pre_unit = '', suff_unit = '', round_place = '',
xllim_set = 0, xulim_set = 1):
y_labels = s_to_plot.index
# Plot the figure.
plt.figure(figsize=(12, 8))
ax = s_to_plot.plot(kind='barh')
plt.barh(s_to_plot.index, s_to_plot,
color = sns.color_palette(color_palette, len(s_to_plot.index)))
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_yticklabels(y_labels)
#x_llim, x_ulim = ax.get_xlim()
#x_ulim = xulim_set
ax.set_xlim(xllim_set, xulim_set) # expand xlim to make labels easier to read
plt.tight_layout()
rects = ax.patches
# For each bar: Place a label
for rect in rects:
# Get X and Y placement of label from rect.
x_value = rect.get_width()
y_value = rect.get_y() + rect.get_height() / 2
# Number of points between bar and label. Change to your liking.
space = 5
# Vertical alignment for positive values
ha = 'left'
# If value of bar is negative: Place label left of bar
if x_value < 0:
# Invert space to place label to the left
space *= -1
# Horizontally align label at right
ha = 'right'
label_string = "{:."+str(round_place)+"f}"
# Use X value as label and format number with one decimal place
#label = "{:.1f}".format(x_value)
label = label_string.format(x_value)
# Create annotation
plt.annotate(
pre_unit+label+suff_unit, # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(space, 0), # Horizontally shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
va='center', # Vertically center label
ha=ha) # Horizontally align label differently for
# positive and negative values. | 34.455882 | 82 | 0.600512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 839 | 0.358088 |
7bb571ec75fa6c41fe74464726a90fe46a7374f0 | 4,373 | py | Python | components/roode/__init__.py | mgernhard/Roode | 50727e0f46d2bfc73559eb5fc73984ca87acb174 | [
"Unlicense"
]
| null | null | null | components/roode/__init__.py | mgernhard/Roode | 50727e0f46d2bfc73559eb5fc73984ca87acb174 | [
"Unlicense"
]
| null | null | null | components/roode/__init__.py | mgernhard/Roode | 50727e0f46d2bfc73559eb5fc73984ca87acb174 | [
"Unlicense"
]
| null | null | null | from re import I
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import sensor
from esphome.const import CONF_ID, STATE_CLASS_MEASUREMENT, UNIT_EMPTY, UNIT_METER
# DEPENDENCIES = ["i2c"]
AUTO_LOAD = ["sensor", "binary_sensor", "text_sensor"]
MULTI_CONF = True
CONF_ROODE_ID = "roode_id"
roode_ns = cg.esphome_ns.namespace("roode")
Roode = roode_ns.class_("Roode", cg.PollingComponent)
CONF_ROI_HEIGHT = 'roi_height'
CONF_ROI_WIDTH = 'roi_width'
CONF_ADVISED_SENSOR_ORIENTATION = 'advised_sensor_orientation'
CONF_CALIBRATION = "calibration"
CONF_ROI_CALIBRATION = "roi_calibration"
CONF_INVERT_DIRECTION = "invert_direction"
CONF_MAX_THRESHOLD_PERCENTAGE = "max_threshold_percentage"
CONF_MIN_THRESHOLD_PERCENTAGE = "min_threshold_percentage"
CONF_MANUAL_THRESHOLD = "manual_threshold"
CONF_THRESHOLD_PERCENTAGE = "threshold_percentage"
CONF_RESTORE_VALUES = "restore_values"
CONF_I2C_ADDRESS = "i2c_address"
CONF_SENSOR_MODE = "sensor_mode"
CONF_MANUAL = "manual"
CONF_MANUAL_ACTIVE = "manual_active"
CONF_CALIBRATION_ACTIVE = "calibration_active"
CONF_TIMING_BUDGET = "timing_budget"
TYPES = [
CONF_RESTORE_VALUES, CONF_INVERT_DIRECTION,
CONF_ADVISED_SENSOR_ORIENTATION, CONF_I2C_ADDRESS
]
CONFIG_SCHEMA = (cv.Schema({
cv.GenerateID():
cv.declare_id(Roode),
cv.Optional(CONF_INVERT_DIRECTION, default='false'):
cv.boolean,
cv.Optional(CONF_RESTORE_VALUES, default='false'):
cv.boolean,
cv.Optional(CONF_ADVISED_SENSOR_ORIENTATION, default='true'):
cv.boolean,
cv.Optional(CONF_I2C_ADDRESS, default=0x29):
cv.uint8_t,
cv.Exclusive(
CONF_CALIBRATION, "mode", f"Only one mode, {CONF_MANUAL} or {CONF_CALIBRATION} is usable"):
cv.Schema({
cv.Optional(CONF_CALIBRATION_ACTIVE, default='true'):
cv.boolean,
cv.Optional(CONF_MAX_THRESHOLD_PERCENTAGE, default=85):
cv.int_range(min=50, max=100),
cv.Optional(CONF_MIN_THRESHOLD_PERCENTAGE, default=0):
cv.int_range(min=0, max=100),
cv.Optional(CONF_ROI_CALIBRATION, default='false'):
cv.boolean,
}),
cv.Exclusive(
CONF_MANUAL, "mode", f"Only one mode, {CONF_MANUAL} or {CONF_CALIBRATION} is usable"):
cv.Schema({
cv.Optional(CONF_MANUAL_ACTIVE, default='true'):
cv.boolean,
cv.Optional(CONF_TIMING_BUDGET, default=10):
cv.int_range(min=10, max=1000),
cv.Inclusive(
CONF_SENSOR_MODE,
"manual_mode",
f"{CONF_SENSOR_MODE}, {CONF_ROI_HEIGHT}, {CONF_ROI_WIDTH} and {CONF_MANUAL_THRESHOLD} must be used together",
):
cv.int_range(min=-1, max=2),
cv.Inclusive(
CONF_ROI_HEIGHT,
"manual_mode",
f"{CONF_SENSOR_MODE}, {CONF_ROI_HEIGHT}, {CONF_ROI_WIDTH} and {CONF_MANUAL_THRESHOLD} must be used together",
):
cv.int_range(min=4, max=16),
cv.Inclusive(
CONF_ROI_WIDTH,
"manual_mode",
f"{CONF_SENSOR_MODE}, {CONF_ROI_HEIGHT}, {CONF_ROI_WIDTH} and {CONF_MANUAL_THRESHOLD} must be used together",
):
cv.int_range(min=4, max=16),
cv.Inclusive(
CONF_MANUAL_THRESHOLD,
"manual_mode",
f"{CONF_SENSOR_MODE}, {CONF_ROI_HEIGHT}, {CONF_ROI_WIDTH} and {CONF_MANUAL_THRESHOLD} must be used together",
):
cv.int_range(min=40, max=4000),
}),
}).extend(cv.polling_component_schema("100ms")))
async def setup_conf(config, key, hub):
if key in config:
cg.add(getattr(hub, f"set_{key}")(config[key]))
def setup_manual_mode(config, hub):
manual = config[CONF_MANUAL]
for key in manual:
cg.add(getattr(hub, f"set_{key}")(manual[key]))
def setup_calibration_mode(config, hub):
calibration = config[CONF_CALIBRATION]
for key in calibration:
cg.add(getattr(hub, f"set_{key}")(calibration[key]))
async def to_code(config):
hub = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(hub, config)
cg.add_library("EEPROM", None)
cg.add_library("Wire", None)
cg.add_library("pololu", "1.3.0", "VL53L1X")
for key in TYPES:
await setup_conf(config, key, hub)
if CONF_MANUAL in config:
setup_manual_mode(config, hub)
if CONF_CALIBRATION in config:
setup_calibration_mode(config, hub)
| 35.266129 | 121 | 0.69426 | 0 | 0 | 0 | 0 | 0 | 0 | 562 | 0.128516 | 1,117 | 0.255431 |
7bb63dc88642b89c018c0dffd26e55f7b04e4fc6 | 7,767 | py | Python | GUI/mainUI.py | nipunsampath/Folder-Maker | e52ace87a08f477e6c105a0e14f85b7886a71f8c | [
"MIT"
]
| 2 | 2019-04-18T14:37:05.000Z | 2020-10-25T02:47:26.000Z | GUI/mainUI.py | nipunsampath/Folder-Maker | e52ace87a08f477e6c105a0e14f85b7886a71f8c | [
"MIT"
]
| null | null | null | GUI/mainUI.py | nipunsampath/Folder-Maker | e52ace87a08f477e6c105a0e14f85b7886a71f8c | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\Projects\Python\Folder Maker\GUI\main.ui'
#
# Created by: PyQt5 UI code generator 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(729, 384)
MainWindow.setWindowFlags(QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.WindowMinimizeButtonHint)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.folderPathLineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.folderPathLineEdit.setGeometry(QtCore.QRect(110, 20, 581, 22))
self.folderPathLineEdit.setObjectName("folderPathLineEdit")
self.openFolderPath = QtWidgets.QPushButton(self.centralwidget)
self.openFolderPath.setGeometry(QtCore.QRect(10, 17, 93, 28))
self.openFolderPath.setObjectName("openFolderPath")
self.toolBox = QtWidgets.QToolBox(self.centralwidget)
self.toolBox.setGeometry(QtCore.QRect(20, 70, 671, 271))
self.toolBox.setObjectName("toolBox")
self.movies = QtWidgets.QWidget()
self.movies.setGeometry(QtCore.QRect(0, 0, 671, 209))
self.movies.setObjectName("movies")
self.addButton = QtWidgets.QPushButton(self.movies)
self.addButton.setGeometry(QtCore.QRect(550, 30, 93, 28))
self.addButton.setObjectName("addButton")
self.createMoviesButton = QtWidgets.QPushButton(self.movies)
self.createMoviesButton.setGeometry(QtCore.QRect(550, 150, 93, 28))
self.createMoviesButton.setObjectName("createMoviesButton")
self.removeButton = QtWidgets.QPushButton(self.movies)
self.removeButton.setGeometry(QtCore.QRect(550, 110, 93, 28))
self.removeButton.setObjectName("removeButton")
self.editButton = QtWidgets.QPushButton(self.movies)
self.editButton.setGeometry(QtCore.QRect(550, 70, 93, 28))
self.editButton.setObjectName("editButton")
self.listWidget = QtWidgets.QListWidget(self.movies)
self.listWidget.setGeometry(QtCore.QRect(57, 5, 451, 191))
font = QtGui.QFont()
font.setPointSize(12)
self.listWidget.setFont(font)
self.listWidget.setObjectName("listWidget")
self.subsChecBox = QtWidgets.QCheckBox(self.movies)
self.subsChecBox.setGeometry(QtCore.QRect(550, 0, 81, 20))
self.subsChecBox.setObjectName("subsChecBox")
self.toolBox.addItem(self.movies, "")
self.tvShow = QtWidgets.QWidget()
self.tvShow.setGeometry(QtCore.QRect(0, 0, 671, 209))
self.tvShow.setObjectName("tvShow")
self.seriesName = QtWidgets.QLineEdit(self.tvShow)
self.seriesName.setGeometry(QtCore.QRect(230, 20, 241, 22))
self.seriesName.setObjectName("seriesName")
self.label = QtWidgets.QLabel(self.tvShow)
self.label.setGeometry(QtCore.QRect(140, 20, 81, 16))
self.label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.tvShow)
self.label_2.setGeometry(QtCore.QRect(100, 60, 121, 20))
self.label_2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_2.setObjectName("label_2")
self.numberOfSeasons = QtWidgets.QLineEdit(self.tvShow)
self.numberOfSeasons.setGeometry(QtCore.QRect(230, 60, 241, 22))
self.numberOfSeasons.setObjectName("numberOfSeasons")
self.numberOfEpisodes = QtWidgets.QLineEdit(self.tvShow)
self.numberOfEpisodes.setGeometry(QtCore.QRect(230, 100, 241, 22))
self.numberOfEpisodes.setObjectName("numberOfEpisodes")
self.label_3 = QtWidgets.QLabel(self.tvShow)
self.label_3.setGeometry(QtCore.QRect(100, 100, 121, 20))
self.label_3.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.tvShow)
self.label_4.setGeometry(QtCore.QRect(100, 140, 121, 20))
self.label_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_4.setObjectName("label_4")
self.createSeries = QtWidgets.QPushButton(self.tvShow)
self.createSeries.setGeometry(QtCore.QRect(530, 70, 93, 28))
self.createSeries.setObjectName("createSeries")
self.groupBox = QtWidgets.QGroupBox(self.tvShow)
self.groupBox.setGeometry(QtCore.QRect(230, 129, 241, 71))
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.perEpisodeRb = QtWidgets.QRadioButton(self.groupBox)
self.perEpisodeRb.setGeometry(QtCore.QRect(10, 10, 221, 20))
self.perEpisodeRb.setChecked(True)
self.perEpisodeRb.setObjectName("perEpisodeRb")
self.perSeasonRb = QtWidgets.QRadioButton(self.groupBox)
self.perSeasonRb.setGeometry(QtCore.QRect(10, 40, 221, 20))
self.perSeasonRb.setObjectName("perSeasonRb")
self.toolBox.addItem(self.tvShow, "")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.toolBox.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Folder Maker"))
self.openFolderPath.setText(_translate("MainWindow", "Folder Path"))
self.addButton.setText(_translate("MainWindow", "Add "))
self.addButton.setShortcut(_translate("MainWindow", "A"))
self.createMoviesButton.setText(_translate("MainWindow", "Create "))
self.createMoviesButton.setShortcut(_translate("MainWindow", "C"))
self.removeButton.setText(_translate("MainWindow", "Remove"))
self.removeButton.setShortcut(_translate("MainWindow", "R"))
self.editButton.setText(_translate("MainWindow", "Edit"))
self.editButton.setShortcut(_translate("MainWindow", "E"))
self.subsChecBox.setText(_translate("MainWindow", "Subtitles"))
self.toolBox.setItemText(self.toolBox.indexOf(self.movies), _translate("MainWindow", "Movies"))
self.label.setText(_translate("MainWindow", "Series Name"))
self.label_2.setText(_translate("MainWindow", "Number of Seasons"))
self.label_3.setText(_translate("MainWindow", "Number of Episodes"))
self.label_4.setText(_translate("MainWindow", "Mode"))
self.createSeries.setText(_translate("MainWindow", "Create Series"))
self.perEpisodeRb.setToolTip(_translate("MainWindow", "Creates sub directories for episodes inside a Season directory"))
self.perEpisodeRb.setText(_translate("MainWindow", "Sub Directory Per Episode"))
self.perSeasonRb.setToolTip(_translate("MainWindow", "Creates only Seasons directories and Subtitles directories inside them"))
self.perSeasonRb.setText(_translate("MainWindow", "Directory Per Season Only"))
self.toolBox.setItemText(self.toolBox.indexOf(self.tvShow), _translate("MainWindow", "TV Series"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 55.085106 | 135 | 0.709927 | 7,262 | 0.934981 | 0 | 0 | 0 | 0 | 0 | 0 | 1,175 | 0.151281 |
7bb80c3ecc1f81bebc7a34d9d8f2cc068b53480f | 1,632 | py | Python | LoanPandas/code.py | yogprabhu/ga-learner-dsmp-repo | eaf27f7598f767481b08be3999024fb56612a666 | [
"MIT"
]
| 1 | 2019-05-01T18:24:49.000Z | 2019-05-01T18:24:49.000Z | LoanPandas/code.py | yogprabhu/ga-learner-dsmp-repo | eaf27f7598f767481b08be3999024fb56612a666 | [
"MIT"
]
| null | null | null | LoanPandas/code.py | yogprabhu/ga-learner-dsmp-repo | eaf27f7598f767481b08be3999024fb56612a666 | [
"MIT"
]
| null | null | null | # --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
# code starts here
bank = pd.read_csv(path)
categorical_var = bank.select_dtypes(include = 'object')
print(categorical_var)
numerical_var = bank.select_dtypes(include = 'number')
print(numerical_var)
# code ends here
# --------------
# code starts here
banks = bank.drop(columns='Loan_ID')
print(banks.isnull().sum())
bank_mode = banks.mode()
#print(bank_mode)
banks = banks.fillna(0)
print(banks.isna().sum())
#code ends here
# --------------
# Code starts here
avg_loan_amount = pd.pivot_table(data=banks, index=['Gender', 'Married', 'Self_Employed'],values='LoanAmount', aggfunc=np.mean)
# code ends here
# --------------
# code starts here
loan_approved_se = banks[(banks['Self_Employed']=='Yes')&(banks['Loan_Status']=='Y')].shape[0]
loan_approved_nse=banks[(banks['Self_Employed']=='No')&(banks['Loan_Status']=='Y')].shape[0]
Loan_Status = 614
percentage_se = (loan_approved_se/Loan_Status)*100
percentage_nse = (loan_approved_nse/Loan_Status)*100
# code ends here
# --------------
# code starts here
banks.Loan_Amount_Term.iloc[0]
loan_term = banks['Loan_Amount_Term'].apply(lambda x: int(x)/12)
banks['Loan_Amount_Term']=banks['Loan_Amount_Term'].apply(lambda x: int(x)/12)
big_loan_term= banks[banks['Loan_Amount_Term']>=25].shape[0]
# code ends here
# --------------
# code starts here
columns_to_show = ['ApplicantIncome', 'Credit_History']
loan_groupby = banks.groupby(by='Loan_Status')
loan_groupby = loan_groupby[columns_to_show]
mean_values = loan_groupby.agg([np.mean])
# code ends here
| 22.666667 | 127 | 0.700368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 591 | 0.362132 |
7bb9a05e4b4df3445a16a9d49bf23b734a000bdc | 1,718 | py | Python | test/espnet2/tts/feats_extract/test_energy.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
]
| 5,053 | 2017-12-13T06:21:41.000Z | 2022-03-31T13:38:29.000Z | test/espnet2/tts/feats_extract/test_energy.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
]
| 3,666 | 2017-12-14T05:58:50.000Z | 2022-03-31T22:11:49.000Z | test/espnet2/tts/feats_extract/test_energy.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
]
| 1,709 | 2017-12-13T01:02:42.000Z | 2022-03-31T11:57:45.000Z | import pytest
import torch
from espnet2.tts.feats_extract.energy import Energy
@pytest.mark.parametrize(
"use_token_averaged_energy, reduction_factor", [(False, None), (True, 1), (True, 3)]
)
def test_forward(use_token_averaged_energy, reduction_factor):
layer = Energy(
n_fft=128,
hop_length=64,
fs="16k",
use_token_averaged_energy=use_token_averaged_energy,
reduction_factor=reduction_factor,
)
xs = torch.randn(2, 384)
if not use_token_averaged_energy:
es, elens = layer(xs, torch.LongTensor([384, 128]))
assert es.shape[1] == max(elens)
else:
ds = torch.LongTensor([[3, 3, 1], [3, 0, 0]]) // reduction_factor
dlens = torch.LongTensor([3, 1])
es, _ = layer(
xs, torch.LongTensor([384, 128]), durations=ds, durations_lengths=dlens
)
assert torch.isnan(es).sum() == 0
@pytest.mark.parametrize(
"use_token_averaged_energy, reduction_factor", [(False, None), (True, 1), (True, 3)]
)
def test_output_size(use_token_averaged_energy, reduction_factor):
layer = Energy(
n_fft=4,
hop_length=1,
fs="16k",
use_token_averaged_energy=use_token_averaged_energy,
reduction_factor=reduction_factor,
)
print(layer.output_size())
@pytest.mark.parametrize(
"use_token_averaged_energy, reduction_factor", [(False, None), (True, 1), (True, 3)]
)
def test_get_parameters(use_token_averaged_energy, reduction_factor):
layer = Energy(
n_fft=4,
hop_length=1,
fs="16k",
use_token_averaged_energy=use_token_averaged_energy,
reduction_factor=reduction_factor,
)
print(layer.get_parameters())
| 30.140351 | 88 | 0.661816 | 0 | 0 | 0 | 0 | 1,629 | 0.948196 | 0 | 0 | 150 | 0.087311 |
7bba3197cf6ebc84a1f3034725dd0f1b29fd1b82 | 4,699 | py | Python | squad/merge.py | uwnlp/piqa | e18f2189c93965c94655d5cc943dcecdc2c1ea57 | [
"Apache-2.0"
]
| 89 | 2018-08-25T07:59:07.000Z | 2021-05-04T06:37:27.000Z | squad/merge.py | seominjoon/piqa | e18f2189c93965c94655d5cc943dcecdc2c1ea57 | [
"Apache-2.0"
]
| 11 | 2018-09-28T17:33:27.000Z | 2019-11-27T23:34:45.000Z | squad/merge.py | uwnlp/piqa | e18f2189c93965c94655d5cc943dcecdc2c1ea57 | [
"Apache-2.0"
]
| 10 | 2018-09-19T06:48:06.000Z | 2020-04-14T20:42:06.000Z | """Official merge script for PI-SQuAD v0.1"""
from __future__ import print_function
import os
import argparse
import json
import sys
import shutil
import scipy.sparse
import scipy.sparse.linalg
import numpy as np
import numpy.linalg
def get_q2c(dataset):
q2c = {}
for article in dataset:
for para_idx, paragraph in enumerate(article['paragraphs']):
cid = '%s_%d' % (article['title'], para_idx)
for qa in paragraph['qas']:
q2c[qa['id']] = cid
return q2c
def get_predictions(context_emb_path, question_emb_path, q2c, sparse=False, metric='ip', progress=False):
context_emb_dir, context_emb_ext = os.path.splitext(context_emb_path)
question_emb_dir, question_emb_ext = os.path.splitext(question_emb_path)
if context_emb_ext == '.zip':
print('Extracting %s to %s' % (context_emb_path, context_emb_dir))
shutil.unpack_archive(context_emb_path, context_emb_dir)
if question_emb_ext == '.zip':
print('Extracting %s to %s' % (question_emb_path, question_emb_dir))
shutil.unpack_archive(question_emb_path, question_emb_dir)
if progress:
from tqdm import tqdm
else:
tqdm = lambda x: x
predictions = {}
for id_, cid in tqdm(q2c.items()):
q_emb_path = os.path.join(question_emb_dir, '%s.npz' % id_)
c_emb_path = os.path.join(context_emb_dir, '%s.npz' % cid)
c_json_path = os.path.join(context_emb_dir, '%s.json' % cid)
if not os.path.exists(q_emb_path):
print('Missing %s' % q_emb_path)
continue
if not os.path.exists(c_emb_path):
print('Missing %s' % c_emb_path)
continue
if not os.path.exists(c_json_path):
print('Missing %s' % c_json_path)
continue
load = scipy.sparse.load_npz if sparse else np.load
q_emb = load(q_emb_path) # shape = [M, d], d is the embedding size.
c_emb = load(c_emb_path) # shape = [N, d], d is the embedding size.
with open(c_json_path, 'r') as fp:
phrases = json.load(fp)
if sparse:
if metric == 'ip':
sim = c_emb * q_emb.T
m = sim.max(1)
m = np.squeeze(np.array(m.todense()), 1)
elif metric == 'l1':
m = scipy.sparse.linalg.norm(c_emb - q_emb, ord=1, axis=1)
elif metric == 'l2':
m = scipy.sparse.linalg.norm(c_emb - q_emb, ord=2, axis=1)
else:
q_emb = q_emb['arr_0']
c_emb = c_emb['arr_0']
if metric == 'ip':
sim = np.matmul(c_emb, q_emb.T)
m = sim.max(1)
elif metric == 'l1':
m = numpy.linalg.norm(c_emb - q_emb, ord=1, axis=1)
elif metric == 'l2':
m = numpy.linalg.norm(c_emb - q_emb, ord=2, axis=1)
argmax = m.argmax(0)
predictions[id_] = phrases[argmax]
if context_emb_ext == '.zip':
shutil.rmtree(context_emb_dir)
if question_emb_ext == '.zip':
shutil.rmtree(question_emb_dir)
return predictions
if __name__ == '__main__':
squad_expected_version = '1.1'
parser = argparse.ArgumentParser(description='Official merge script for PI-SQuAD v0.1')
parser.add_argument('data_path', help='Dataset file path')
parser.add_argument('context_emb_dir', help='Context embedding directory')
parser.add_argument('question_emb_dir', help='Question embedding directory')
parser.add_argument('pred_path', help='Prediction json file path')
parser.add_argument('--sparse', default=False, action='store_true',
help='Whether the embeddings are scipy.sparse or pure numpy.')
parser.add_argument('--metric', type=str, default='ip',
help='ip|l1|l2 (inner product or L1 or L2 distance)')
parser.add_argument('--progress', default=False, action='store_true', help='Show progress bar. Requires `tqdm`.')
args = parser.parse_args()
with open(args.data_path) as dataset_file:
dataset_json = json.load(dataset_file)
if dataset_json['version'] != squad_expected_version:
print('Evaluation expects v-' + squad_expected_version +
', but got dataset with v-' + dataset_json['version'],
file=sys.stderr)
dataset = dataset_json['data']
q2c = get_q2c(dataset)
predictions = get_predictions(args.context_emb_dir, args.question_emb_dir, q2c, sparse=args.sparse,
metric=args.metric, progress=args.progress)
with open(args.pred_path, 'w') as fp:
json.dump(predictions, fp)
| 38.516393 | 117 | 0.613109 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 831 | 0.176846 |
7bba6288445870de13beac5ccea088e511b9306b | 3,918 | py | Python | src/passpredict/locations.py | samtx/pass-predictor | 6577f75cd7d64bd3c12a9512880d4b29c2682b4c | [
"MIT"
]
| null | null | null | src/passpredict/locations.py | samtx/pass-predictor | 6577f75cd7d64bd3c12a9512880d4b29c2682b4c | [
"MIT"
]
| null | null | null | src/passpredict/locations.py | samtx/pass-predictor | 6577f75cd7d64bd3c12a9512880d4b29c2682b4c | [
"MIT"
]
| null | null | null | from functools import cached_property
from datetime import datetime
from math import degrees, radians, sin, cos
import numpy as np
from orbit_predictor import coordinate_systems
from .utils import get_timezone_from_latlon
from .time import make_utc
from ._time import datetime2mjd
from .solar import sun_pos_mjd
from ._rotations import elevation_at_rad
try:
from zoneinfo import ZoneInfo
except ImportError:
from backports.zoneinfo import ZoneInfo
class Location:
def __init__(self, name, latitude_deg, longitude_deg, elevation_m):
"""Location.
Parameters
----------
latitude_deg : float
Latitude in degrees.
longitude_deg : float
Longitude in degrees.
elevation_m : float
Elevation in meters.
"""
self.name = name
self.latitude_deg = latitude_deg
self.longitude_deg = longitude_deg
self.latitude_rad = radians(latitude_deg)
self.longitude_rad = radians(longitude_deg)
self.elevation_m = elevation_m
position_ecef = coordinate_systems.geodetic_to_ecef(
self.latitude_rad,
self.longitude_rad,
elevation_m / 1000.)
self.recef = np.array(position_ecef)
def dict(self) -> dict:
d = {
'name': self.name,
'lat': self.lat,
'lon': self.lon,
'h': self.h
}
return d
@property
def lat(self) -> float:
return self.latitude_deg
@property
def lon(self) -> float:
return self.longitude_deg
@property
def h(self) -> float:
return self.elevation_m
@cached_property
def timezone(self) -> ZoneInfo:
""" Find timezone """
return get_timezone_from_latlon(self.latitude_deg, self.longitude_deg)
@property
def tz(self) -> ZoneInfo:
return self.timezone
@cached_property
def offset(self) -> float:
""" Compute timezone offset in hours from UTC """
now = datetime.now(self.timezone)
delta = now.utcoffset().total_seconds() / 3600
return delta
@cached_property
def _cached_elevation_calculation_data(self):
"""
Cache trig values used for rotating ECEF to SEZ topocentric coordinates
"""
sin_lat, sin_long = sin(self.latitude_rad), sin(self.longitude_rad)
cos_lat, cos_long = cos(self.latitude_rad), cos(self.longitude_rad)
return (cos_lat * cos_long, cos_lat * sin_long, sin_lat)
def _sun_elevation_mjd(self, mjd: float) -> float:
"""
Computes elevation angle of sun relative to location. Returns degrees.
"""
sun_recef = sun_pos_mjd(mjd)
coslatcoslon, coslatsinlon, sinlat = self._cached_elevation_calculation_data
el = elevation_at_rad(coslatcoslon, coslatsinlon, sinlat, self.recef, sun_recef)
return degrees(el)
def sun_elevation(self, d: datetime) -> float:
"""
Computes elevation angle of sun relative to location. Returns degrees.
"""
d2 = make_utc(d)
mjd = datetime2mjd(d2)
return self._sun_elevation_mjd(mjd)
def is_sunlit(self, dt: datetime) -> bool:
"""
Computes elevation angle of sun relative to location
Returns True if elevation > -6 degrees
"""
el = self.sun_elevation(dt)
return el > -6
def _is_sunlit_mjd(self, mjd: float) -> bool:
"""
Computes elevation angle of sun relative to location
Returns True if elevation > -6 degrees
"""
el = self._sun_elevation_mjd(mjd)
return el > -6
def __repr__(self):
deg = u'\N{DEGREE SIGN}'
s = '<Location '
if self.name:
s += self.name + ' '
s += f'({self.latitude_deg}{deg} , {self.longitude_deg}{deg})'
s += '>'
return s
| 29.238806 | 88 | 0.619704 | 3,454 | 0.881572 | 0 | 0 | 1,049 | 0.267739 | 0 | 0 | 962 | 0.245533 |
7bbaeab63e6d9b82f2fcd904c0c52ba80c699e2f | 4,559 | py | Python | rl_baselines/evaluation/eval_post.py | anonymous-authors-2018/robotics-repo | 385d1f3b49f8d414ab90f53c6f06b56614ae83ba | [
"MIT"
]
| 5 | 2019-08-21T22:57:21.000Z | 2021-01-01T21:15:26.000Z | rl_baselines/evaluation/eval_post.py | BillChan226/POAR-SRL-4-Robot | a6a8052e105369656d34fffc4f7ca4475dcc38df | [
"MIT"
]
| null | null | null | rl_baselines/evaluation/eval_post.py | BillChan226/POAR-SRL-4-Robot | a6a8052e105369656d34fffc4f7ca4475dcc38df | [
"MIT"
]
| 2 | 2019-11-26T11:41:12.000Z | 2021-08-30T16:00:27.000Z |
import subprocess
import numpy as np
import pickle
import argparse
import os
from rl_baselines.student_eval import allPolicy
from srl_zoo.utils import printRed, printGreen
from rl_baselines.evaluation.cross_eval_utils import EnvsKwargs, loadConfigAndSetup, policyEval,createEnv
def dict2array(tasks,data):
res=[]
for t in tasks:
if(t=='sc'):
max_reward=250
else:
max_reward=1850
data[t][:,1:]=data[t][:,1:]/max_reward
res.append(data[t])
res=np.array(res)
return res
def episodeEval(log_dir, tasks,num_timesteps=1000):
for t in tasks:
eval_args=['--log-dir', log_dir, '--num-timesteps', str(num_timesteps), '--num-cpu',str(5)]
task_args=['--task',t]
subprocess.call(['python', '-m', 'rl_baselines.cross_eval_utils']+eval_args+task_args)
file_name=log_dir+'episode_eval.pkl'
with open(file_name, 'rb') as f:
eval_reward = pickle.load(f)
#Trasfer the data from dict into a numpy array and save
eval_reward=dict2array(tasks,eval_reward)
file_name=log_dir+'episode_eval.npy'
np.save(file_name, eval_reward)
def policyCrossEval(log_dir,task,episode,model_path, num_timesteps=2000,num_cpu=1,seed=0):
train_args, algo_name, algo_class, srl_model_path, env_kwargs = loadConfigAndSetup(log_dir)
env_kwargs = EnvsKwargs(task, env_kwargs)
OK = True
if (not OK):
# no latest model saved yet
return None, False
else:
pass
printGreen(
"Evaluation from the model saved at: {}, with evaluation time steps: {}".format(model_path, num_timesteps))
log_dir, environment, algo_args = createEnv(log_dir, train_args, algo_name, algo_class, env_kwargs, num_cpu=num_cpu,seed=seed)
reward = policyEval(environment, model_path, log_dir, algo_class, algo_args, num_timesteps, num_cpu)
# Just a trick to save the episode number of the reward,but need a little bit more space to store
reward = np.append(episode, reward)
return reward, True
def saveReward(log_dir,reward, task,save_name='episode_eval.pkl'):
reward = reward.astype(float)
file_name=log_dir+save_name
#can be changed accordingly
if(os.path.isfile(file_name)):
with open(file_name, 'rb') as f:
eval_reward= pickle.load(f)
if (task in eval_reward.keys()):
episodes = eval_reward[task][0]
#The fisrt dimension of reward is the episode
current_episode =reward[0]
#Check if the latest episodes policy is already saved
if (current_episode not in episodes):
# # eval_reward[task]=np.append(eval_reward[task],[reward],axis=0)
eval_reward[task][0].append(reward[0])
eval_reward[task][1].append(reward.tolist())
else:
index = episodes.index(current_episode)
eval_reward[task][1][index].extend(reward[1:])
with open(file_name, 'wb') as f:
pickle.dump(eval_reward, f, pickle.HIGHEST_PROTOCOL)
else:# The task is not in the file yet
eval_reward[task]=([reward[0]],[reward.tolist()])
with open(file_name, 'wb') as f:
pickle.dump(eval_reward, f, pickle.HIGHEST_PROTOCOL)
else: #There is still not a episodes rewards evaluation registered
eval_reward = {}
eval_reward[task]=([reward[0]],[reward.tolist()])
with open(file_name, 'wb') as f:
pickle.dump(eval_reward, f, pickle.HIGHEST_PROTOCOL)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Evaluation after training")
parser.add_argument('--log-dir',type=str, default=''
,help='RL algo to use')
parser.add_argument('--task-label', type=str, default='',
help='task to evaluate')
parser.add_argument('--episode', type=str, default='',
help='evaluation for the policy saved at this episode')
parser.add_argument('--policy-path', type=str, default='',
help='policy path')
parser.add_argument('--seed', type=int, default=0,
help='policy path')
args, unknown = parser.parse_known_args()
reward, _ = policyCrossEval(args.log_dir, args.task_label, episode=args.episode, model_path=args.policy_path,
num_timesteps=251,seed=args.seed)
saveReward(args.log_dir, reward, args.task_label, save_name='episode_eval.pkl')
| 33.77037 | 130 | 0.645317 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 938 | 0.205747 |
7bbb8601ea2e62414cb9ab4019393f8898c93e86 | 6,304 | py | Python | HLTriggerOffline/SUSYBSM/test/BSMTriggerCheck/runComparison.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
]
| 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | HLTriggerOffline/SUSYBSM/test/BSMTriggerCheck/runComparison.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
]
| 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | HLTriggerOffline/SUSYBSM/test/BSMTriggerCheck/runComparison.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
]
| 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | #! /usr/bin/env python
import os
os.system("make clean; make; \\rm *.log log.list")
############################################
#dir1='TriggerValidation_223_HLT'
#dir2='TriggerValidation_224_HLT'
#out='223_vs_224'
#samples=['LM1']
#prefix1 = "histo_"
#prefix2 = "histo_"
#sufix1 = "_IDEALV11"
#sufix2 = "_IDEALV11_v1"
#label1 = "LM1_223"
#label2 = "LM1_224"
############################################
#dir1='TriggerValidation_224_HLT'
#dir2='TriggerValidation_300pre2_HLT'
#out='224_vs_300pre2'
#samples=['LM1']
#prefix1 = "histo_"
#prefix2 = "histo_"
#sufix1 = "_IDEALV11_v1"
#sufix2 = "_IDEALV9"
#label1 = "LM1_223"
#label2 = "LM1_300pre2"
############################################
#dir1='TriggerValidation_224_HLT'
#dir2='TriggerValidation_300pre6_HLT'
#out='224_vs_300pre6'
#samples=['LM1']
#prefix1 = "histo_"
#prefix2 = "histo_"
#sufix1 = "_IDEALV11_v1"
#sufix2 = "_IDEAL_30x_v1"
#label1 = "LM1_223"
#label2 = "LM1_300pre6"
############################################
dir1='/afs/cern.ch/user/c/chiorbo/scratch0/SUSY_2007/TriggerValidation/TriggerValidation_DQM_312_commit_V00-06-00/src/HLTriggerOffline/SUSYBSM/test'
dir2='/afs/cern.ch/user/c/chiorbo/scratch0/SUSY_2007/TriggerValidation/TriggerValidation_DQM_312_commit_V00-06-00/src/HLTriggerOffline/SUSYBSM/test'
out='mc1_vs_mc2'
samples=['_HLT']
prefix1 = "DQM_V0001"
prefix2 = "DQM_V0001"
sufix1 = "_R000000001"
sufix2 = "_R000000001_2"
label1 = "HLT"
label2 = "HLT"
############################################
os.system('mkdir html/'+out)
#create html index page
os.system('cp html/template/index.html html/'+out+'/index.html')
#create the cover page
inputhtml = open('html/template/beginning.html')
outputhtml = open('html/'+out+'/cover.html','w')
for line in inputhtml:
# remove .root
if line.find('<!-- Here python will write the name of first release -->') != -1: outputhtml.write(dir1)
# remove .root
elif line.find('<!-- Here python will write the name of second release -->') != -1: outputhtml.write(dir2)
else: outputhtml.write(line)
continue
inputhtml.close()
outputhtml.close()
#create the menu page
os.system('cp html/template/menu_beginning.html html/'+out+'/menu.html')
for sample in samples:
tmp1 = open('tmp.html','w')
tmp2 = open('html/template/menu_body.html')
for line in tmp2:
if line.find('thissample') != -1:
newline = line.replace('thissample',sample)
tmp1.write(newline)
else: tmp1.write(line)
continue
tmp1.close()
tmp2.close()
os.system('more tmp.html >> html/'+out+'/menu.html')
os.system('rm tmp.html')
continue
os.system('more html/template/menu_end.html >> html/'+out+'/menu.html')
#run the code for each sample
for sample in samples:
file1 = dir1+'/'+prefix1+sample+sufix1+'.root'
file2 = dir2+'/'+prefix2+sample+sufix2+'.root'
outputfile = 'outputfile.root'
#create html page for this sample
inputhtml = open('html/template/comp_beginning.html')
os.system('mkdir html/'+out+'/'+sample)
outputhtml = open('html/'+out+'/'+sample+'/comparison.html','w')
# add right version names in the html
for line in inputhtml:
if line.find('<!-- Here python will write the name of first release -->') != -1: outputhtml.write(dir1)
elif line.find('<!-- Here python will write the name of second release -->') != -1: outputhtml.write(dir2)
elif line.find('<!-- Here python will write the name of the model -->') != -1: outputhtml.write(sample)
elif line.find('thissample') != -1:
newline = line.replace('thissample',sample)
outputhtml.write(newline)
else: outputhtml.write(line)
continue
inputhtml.close()
outputhtml.close()
# run the comparison
os.system('./triggerComparison.x -File1='+file1+' -File2='+file2+' -OutputFile='+outputfile+' -label1='+label1+' -label2='+label2)
# for old names
# os.system('./triggerComparison.x --oldL1names -File1='+file1+' -File2='+file2+' -OutputFile='+outputfile+' -label1='+label1+' -label2='+label2)
os.system('mv HLTcomparison.log html/'+out+'/'+sample)
os.system('mv L1comparison.log html/'+out+'/'+sample)
# mv root file to the html directory
os.system('mv '+outputfile+' html/'+out+'/'+sample)
# add eff and residual pulls to the html
os.system('more html/template/comp.html >> html/'+out+'/'+sample+'/comparison.html')
# link the compatibility maps
os.system('more compatibility.html >> html/'+out+'/'+sample+'/comparison.html')
# create jpg files
os.system("ls *eps > listeps.log")
listeps = open("listeps.log")
for epsfile in listeps: os.system("convert \""+epsfile[:-1]+"\" \""+epsfile[:-4]+"jpg\"")
thefile = open('html/'+out+'/'+sample+'/comparison.html',"r+")
# link HLT files
#thefile.seek(0,2)
#thefile.write('<tr><td><center><table>\n')
#listeps.seek(0)
#for epsfile in listeps:
# if(epsfile.find('HLT') != -1): #this is a plot of a trigger path
# tmp1 = open('html/template/addplot.html')
# for line in tmp1:
# newline = line.replace('triggerpath',epsfile[:-5])
# thefile.write(newline+'\n')
# continue
# continue
# continue
#thefile.write('</table></center></td>\n')
# link L1 files
#thefile.write('<td><center><table>\n')
#listeps.seek(0)
#for epsfile in listeps:
# if(epsfile.find('L1') != -1): #this is a plot of a trigger path
# if(epsfile.find('A_') != -1): #this is a plot of a trigger path
# tmp1 = open('html/template/addplot.html')
# for line in tmp1:
# newline = line.replace('triggerpath',epsfile[:-5])
# thefile.write(newline+'\n')
# continue
# continue
# continue
#thefile.write('</table></center></td></tr>\n')
#thefile.close()
# write end of the comparison web page
os.system('more html/template/end.html >> html/'+out+'/'+sample+'/comparison.html')
# move all eps and jpg files in the proper directory
os.system('mv *jpg html/'+out+'/'+sample+'/')
os.system('mv *eps html/'+out+'/'+sample+'/')
continue
os.system('\\rm listeps.log')
| 34.075676 | 153 | 0.615641 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,176 | 0.662437 |
7bbbafe111995f8ba65d3c92e9ed6a6bf9e416f8 | 2,275 | py | Python | log_in.py | lowerx/8March | 02948996ca43dddbc2a7ffad882a21b59fbea4ed | [
"MIT"
]
| null | null | null | log_in.py | lowerx/8March | 02948996ca43dddbc2a7ffad882a21b59fbea4ed | [
"MIT"
]
| null | null | null | log_in.py | lowerx/8March | 02948996ca43dddbc2a7ffad882a21b59fbea4ed | [
"MIT"
]
| null | null | null | from curses import echo
from importlib.metadata import metadata
import sqlite3
import sys
import sqlalchemy
import os
from sqlalchemy import Column, Integer, String, ForeignKey, Table, MetaData, create_engine, engine_from_config
from sqlalchemy.orm import relationship, backref, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from database import DataBase
Base = declarative_base()
# def db_connect(db_name):
# """
# Performs database connection using database settings from settings.py.
# Returns sqlalchemy engine instance
# """
def create_table(db_name):
engine = create_engine('sqlite:///' + str(db_name) + '.sqlite')
An.metadata.create_all(engine)
class An(Base):
__tablename__ = 'test'
id = Column(sqlalchemy.Integer, primary_key=True)
username = Column('username', sqlalchemy.Text())
password = Column('password', sqlalchemy.Text())
page = Column('page', sqlalchemy.Integer)
class Authentification(object):
__engine = None
def __init__(self, db, name, passphrase):
engine = create_engine('sqlite:///' + str(name) + '.sqlite')
if not sqlalchemy.inspect(engine).has_table(name):
create_table('test')
# self.__engine = db_connect(name)
# self.__db.set_name(name)
# self.__db.username = name
# self.__db.password = passphrase
# engine = db_connect(name)
# create_table(engine)
# self.Session = sessionmaker(bind=engine)
# session = self.Session()
# try:
# session.add(self.__db)
# session.commit()
# except:
# session.rollback()
# raise
# finally:
# session.close()
def new_session(self):
engine = db_connect(name)
create_table(engine)
self.Session = sessionmaker(bind=engine)
session = self.Session()
try:
result = session.query(An)
for item in result:
print(item)
session.commit()
except:
session.rollback()
raise
finally:
session.close()
name = "test"
db = DataBase(name)
passphrase = "test"
AuthentificationPros = Authentification(db, name, passphrase)
| 28.4375 | 110 | 0.633407 | 1,449 | 0.636923 | 0 | 0 | 0 | 0 | 0 | 0 | 620 | 0.272527 |
7bbbb84b2ea6ce8e2867ca8c352a6bb6c21ce89f | 1,602 | py | Python | mecc/views.py | unistra/eva | 9f7bd8c44edbca05eb45b36cb5b8e658e53bc3c0 | [
"Apache-2.0"
]
| null | null | null | mecc/views.py | unistra/eva | 9f7bd8c44edbca05eb45b36cb5b8e658e53bc3c0 | [
"Apache-2.0"
]
| 3 | 2021-03-19T10:36:10.000Z | 2021-09-08T01:37:47.000Z | mecc/views.py | unistra/eva | 9f7bd8c44edbca05eb45b36cb5b8e658e53bc3c0 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
from django_cas.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import render, redirect
from mecc.apps.years.models import UniversityYear
@login_required
def home(request):
"""
root of all evil:
dispatch according to user profile
"""
try:
target_year = UniversityYear.objects.get(is_target_year=True)
request.session['current_year'] = target_year.label_year
request.session['current_code_year'] = target_year.code_year
except ObjectDoesNotExist:
pass
for e in request.user.groups.all():
if e.name == "VP":
return redirect('dashboards:general')
if e.name == "DES1":
return redirect('training:list_all')
for e in request.user.meccuser.profile.all():
if e.code == "RESPFORM" and e.year == request.session['current_code_year']:
return redirect('training:list_resp')
if e.code == 'REFAPP':
return redirect('training:list', cmp=e.cmp)
if e.code == 'DIRETU':
return redirect('training:list', cmp=e.cmp)
if e.code == "GESCOL":
return redirect('training:list', cmp=e.cmp)
if e.code in ['DIRCOMP', 'RAC']:
return redirect('dashboards:institute', code=e.cmp)
if e.code == "ECI":
return redirect('training:list_all_meccs')
if e.code == "RESPENS" and e.year == request.session['current_code_year']:
return redirect('training:my_teachings')
return render(request, 'base.html')
| 36.409091 | 83 | 0.637953 | 0 | 0 | 0 | 0 | 1,374 | 0.857678 | 0 | 0 | 423 | 0.264045 |
7bbbe09edfcf3321edef1198ab48f96f54dee63c | 2,695 | py | Python | src/OTLMOW/OTLModel/Datatypes/KlNetwerklinkMediumtype.py | davidvlaminck/OTLClassPython | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
]
| 2 | 2022-02-01T08:58:11.000Z | 2022-02-08T13:35:17.000Z | src/OTLMOW/OTLModel/Datatypes/KlNetwerklinkMediumtype.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
]
| null | null | null | src/OTLMOW/OTLModel/Datatypes/KlNetwerklinkMediumtype.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
]
| null | null | null | # coding=utf-8
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlNetwerklinkMediumtype(KeuzelijstField):
"""Mogelijke waarden voor het type drager waarlangs data door de link getransporteerd wordt."""
naam = 'KlNetwerklinkMediumtype'
label = 'Netwerklink mediumtype'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlNetwerklinkMediumtype'
definition = 'Mogelijke waarden voor het type drager waarlangs data door de link getransporteerd wordt.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlNetwerklinkMediumtype'
options = {
'andere': KeuzelijstWaarde(invulwaarde='andere',
label='andere',
definitie='De link tussen de netwerkpoorten wordt gerealiseerd via een andere dan een optische, UTP, DSL of transportnetwerk verbinding.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlNetwerklinkMediumtype/andere'),
'dsl': KeuzelijstWaarde(invulwaarde='dsl',
label='DSL',
definitie='De link tussen de netwerkpoorten wordt gerealiseerd via een DSL verbinding.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlNetwerklinkMediumtype/dsl'),
'optisch': KeuzelijstWaarde(invulwaarde='optisch',
label='optisch',
definitie='De link tussen de netwerkpoorten wordt gerealiseerd via een glasvezelkabel.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlNetwerklinkMediumtype/optisch'),
'transportnetwerk': KeuzelijstWaarde(invulwaarde='transportnetwerk',
label='transportnetwerk',
definitie='De link tussen de netwerkpoorten wordt gerealiseerd via het optisch transportnetwerk.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlNetwerklinkMediumtype/transportnetwerk'),
'utp': KeuzelijstWaarde(invulwaarde='utp',
label='UTP',
definitie='De link tussen de netwerkpoorten wordt gerealiseerd via een UTP kabel.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlNetwerklinkMediumtype/utp')
}
| 72.837838 | 173 | 0.634879 | 2,463 | 0.913915 | 0 | 0 | 0 | 0 | 0 | 0 | 1,488 | 0.552134 |
7bbde3e95bb2349d1613a331043db076b94f2cfe | 1,617 | py | Python | src/utgardtests/filewriter/statusprocessor.py | ess-dmsc/utgard-test-utils | 27e244d06a681e09a10584dc6b93e5eaf767a8be | [
"BSD-2-Clause"
]
| null | null | null | src/utgardtests/filewriter/statusprocessor.py | ess-dmsc/utgard-test-utils | 27e244d06a681e09a10584dc6b93e5eaf767a8be | [
"BSD-2-Clause"
]
| null | null | null | src/utgardtests/filewriter/statusprocessor.py | ess-dmsc/utgard-test-utils | 27e244d06a681e09a10584dc6b93e5eaf767a8be | [
"BSD-2-Clause"
]
| null | null | null | import logging
import threading
import time
class StatusProcessor:
MAX_NUM_MESSAGES_PER_UPDATE = 10
GET_MESSAGES_TIMEOUT_S = 0.5
LIVENESS_TIMEOUT_S = 5
def __init__(
self,
status_consumer,
msg_processor,
logger=logging.getLogger(__name__),
time_function=time.time,
):
self._consumer = status_consumer
self._msg_processor = msg_processor
self._logger = logger
self._time_function = time_function
self._is_writing = False
self._is_writing_lock = threading.Lock()
def start(self):
self._consumer.start()
def update_status(self):
self._get_and_process_messages()
self._update_running_status()
def _get_and_process_messages(self):
msgs = self._consumer.get_messages(
self.MAX_NUM_MESSAGES_PER_UPDATE, self.GET_MESSAGES_TIMEOUT_S
)
for msg in msgs:
self._msg_processor.process_msg(msg)
def _update_running_status(self):
ts = self._msg_processor.get_latest_timestamp()
if ts is None:
return
ct = self._time_function()
with self._is_writing_lock:
self._is_writing = (ct - ts) <= self.LIVENESS_TIMEOUT_S
def is_writing(self):
with self._is_writing_lock:
status = self._is_writing
return status
def stop(self):
self._consumer.stop()
def get_metrics(self):
if self.is_writing():
raise RuntimeError("File writer is still running")
else:
return self._msg_processor.get_metrics()
| 26.508197 | 73 | 0.641311 | 1,570 | 0.970934 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.018553 |
7bbdf574388c84658ffc5b1e989b4bad6ddb075e | 9,045 | py | Python | befh/exchanges/okex_spot.py | philsong/BitcoinExchangeFH | 3c45d4be2ea2a258f132d982f62f69d649e0b083 | [
"Apache-2.0"
]
| 32 | 2017-12-15T07:30:11.000Z | 2020-07-16T10:15:18.000Z | befh/exchanges/okex_spot.py | bijiasuo/BitcoinExchangeFH | 9aa7b790cf74cf9fe48662147c30fc05e045e9ed | [
"Apache-2.0"
]
| null | null | null | befh/exchanges/okex_spot.py | bijiasuo/BitcoinExchangeFH | 9aa7b790cf74cf9fe48662147c30fc05e045e9ed | [
"Apache-2.0"
]
| 20 | 2017-11-09T15:28:39.000Z | 2019-12-10T01:02:57.000Z | from befh.ws_api_socket import WebSocketApiClient
from befh.market_data import L2Depth, Trade
from befh.exchanges.gateway import ExchangeGateway
from befh.instrument import Instrument
from befh.util import Logger
from befh.clients.sql_template import SqlClientTemplate
import time
import threading
import json
from functools import partial
from datetime import datetime
class ExchGwApiOkexSpotWs(WebSocketApiClient):
"""
Exchange socket
"""
def __init__(self):
"""
Constructor
"""
WebSocketApiClient.__init__(self, 'ExchGwOkexSpot')
@classmethod
def get_timestamp_offset(cls):
return 1000
@classmethod
def get_order_book_timestamp_field_name(cls):
return 'timestamp'
@classmethod
def get_bids_field_name(cls):
return 'bids'
@classmethod
def get_asks_field_name(cls):
return 'asks'
@classmethod
def get_link(cls):
return 'wss://real.okex.com:10441/websocket'
@classmethod
def get_order_book_subscription_string(cls, instmt):
return json.dumps({"event":"addChannel", "channel": instmt.get_order_book_channel_id()})
@classmethod
def get_trades_subscription_string(cls, instmt):
return json.dumps({"event":"addChannel", "channel": instmt.get_trades_channel_id()})
@classmethod
def parse_l2_depth(cls, instmt, raw):
"""
Parse raw data to L2 depth
:param instmt: Instrument
:param raw: Raw data in JSON
"""
# l2_depth = instmt.get_l2_depth()
l2_depth = L2Depth()
keys = list(raw.keys())
if cls.get_order_book_timestamp_field_name() in keys and \
cls.get_bids_field_name() in keys and \
cls.get_asks_field_name() in keys:
# Date time
timestamp = float(raw[cls.get_order_book_timestamp_field_name()])/cls.get_timestamp_offset()
l2_depth.date_time = datetime.utcfromtimestamp(timestamp).strftime("%Y%m%d %H:%M:%S.%f")
# Bids
bids = raw[cls.get_bids_field_name()]
bids = sorted(bids, key=lambda x: x[0], reverse=True)
max_bid_len = min(len(bids), 5)
for i in range(0, max_bid_len):
l2_depth.bids[i].price = float(bids[i][0]) if type(bids[i][0]) != float else bids[i][0]
l2_depth.bids[i].volume = float(bids[i][1]) if type(bids[i][1]) != float else bids[i][1]
# Asks
asks = raw[cls.get_asks_field_name()]
asks = sorted(asks, key=lambda x: x[0])
max_ask_len = min(len(asks), 5)
for i in range(0, max_ask_len):
l2_depth.asks[i].price = float(asks[i][0]) if type(asks[i][0]) != float else asks[i][0]
l2_depth.asks[i].volume = float(asks[i][1]) if type(asks[i][1]) != float else asks[i][1]
else:
raise Exception('Does not contain order book keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return l2_depth
@classmethod
def parse_trade(cls, instmt, raw):
"""
:param instmt: Instrument
:param raw: Raw data in JSON
:return:
"""
trade = Trade()
trade_id = raw[0]
trade_price = float(raw[1])
trade_volume = float(raw[2])
date_time = raw[3]
trade_side = raw[4]
# trade.date_time = date_time
trade.trade_id = str(trade_id)
trade.trade_price = trade_price
trade.trade_volume = trade_volume
trade.trade_side = Trade.parse_side(trade_side)
return trade
class ExchGwOkexSpot(ExchangeGateway):
"""
Exchange gateway
"""
def __init__(self, db_clients):
"""
Constructor
:param db_client: Database client
"""
ExchangeGateway.__init__(self, ExchGwApiOkexSpotWs(), db_clients)
@classmethod
def get_exchange_name(cls):
"""
Get exchange name
:return: Exchange name string
"""
return 'Okex'
def on_open_handler(self, instmt, ws):
"""
Socket on open handler
:param instmt: Instrument
:param ws: Web socket
"""
Logger.info(self.__class__.__name__, "Instrument %s is subscribed in channel %s" % \
(instmt.get_instmt_code(), instmt.get_exchange_name()))
if not instmt.get_subscribed():
instmt_code_split = instmt.get_instmt_code().split('_')
if len(instmt_code_split) == 2:
# Future instruments
instmt.set_order_book_channel_id("ok_sub_spot_%s_%s_depth_5" % \
(instmt_code_split[0].lower(),
instmt_code_split[1].lower()))
instmt.set_trades_channel_id("ok_sub_spot_%s_%s_deals" % \
(instmt_code_split[0].lower(),
instmt_code_split[1].lower()))
else:
# Spot instruments
instmt.set_order_book_channel_id("ok_sub_spot_%s_depth_5" % instmt.get_instmt_code().lower())
instmt.set_trades_channel_id("ok_sub_spot_%s_deals" % instmt.get_instmt_code().lower())
ws.send(self.api_socket.get_order_book_subscription_string(instmt))
# ws.send(self.api_socket.get_trades_subscription_string(instmt))
instmt.set_subscribed(True)
def on_close_handler(self, instmt, ws):
"""
Socket on close handler
:param instmt: Instrument
:param ws: Web socket
"""
Logger.info(self.__class__.__name__, "Instrument %s is unsubscribed in channel %s" % \
(instmt.get_instmt_code(), instmt.get_exchange_name()))
instmt.set_subscribed(False)
def on_message_handler(self, instmt, messages):
"""
Incoming message handler
:param instmt: Instrument
:param message: Message
"""
for message in messages:
keys = message.keys()
# print(keys)
if 'channel' in keys:
if 'data' in keys:
if message['channel'] == instmt.get_order_book_channel_id():
data = message['data']
l2_depth = self.api_socket.parse_l2_depth(instmt, data)
if l2_depth is not None:
# Insert only if the first 5 levels are different
# if l2_depth is not None and instmt.get_l2_depth().is_diff(instmt.get_prev_l2_depth()):
instmt.set_prev_l2_depth(instmt.get_l2_depth())
instmt.set_l2_depth(l2_depth)
instmt.incr_order_book_id()
self.insert_order_book(instmt)
elif message['channel'] == instmt.get_trades_channel_id():
for trade_raw in message['data']:
trade = self.api_socket.parse_trade(instmt, trade_raw)
if trade.trade_id != instmt.get_exch_trade_id():
instmt.incr_trade_id()
instmt.set_exch_trade_id(trade.trade_id)
self.insert_trade(instmt, trade)
elif 'success' in keys:
Logger.info(self.__class__.__name__, "Subscription to channel %s is %s" \
% (message['channel'], message['success']))
else:
Logger.info(self.__class__.__name__, ' - ' + json.dumps(message))
def start(self, instmt):
"""
Start the exchange gateway
:param instmt: Instrument
:return List of threads
"""
instmt.set_prev_l2_depth(L2Depth(20))
instmt.set_l2_depth(L2Depth(20))
instmt.set_instmt_snapshot_table_name(self.get_instmt_snapshot_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
self.init_instmt_snapshot_table(instmt)
return [self.api_socket.connect(self.api_socket.get_link(),
on_message_handler=partial(self.on_message_handler, instmt),
on_open_handler=partial(self.on_open_handler, instmt),
on_close_handler=partial(self.on_close_handler, instmt))]
if __name__ == '__main__':
exchange_name = 'Okex'
instmt_name = 'BCHBTC'
instmt_code = 'BCH_BTC'
instmt = Instrument(exchange_name, instmt_name, instmt_code)
db_client = SqlClientTemplate()
Logger.init_log()
exch = ExchGwOkexSpot([db_client])
td = exch.start(instmt)
| 38.326271 | 116 | 0.570591 | 8,364 | 0.92471 | 0 | 0 | 3,269 | 0.361415 | 0 | 0 | 1,825 | 0.201769 |
7bbe575d89df5cb9077767131f1bcff71b4ea2bc | 191 | py | Python | cloudflare_ddns/__init__.py | joshuaavalon/cloudflare-ddns | 9a79a73dc6f723d2bd9afd26289a9c990744f4e7 | [
"Apache-2.0"
]
| 1 | 2019-05-16T15:25:22.000Z | 2019-05-16T15:25:22.000Z | cloudflare_ddns/__init__.py | joshuaavalon/cloudflare-ddns | 9a79a73dc6f723d2bd9afd26289a9c990744f4e7 | [
"Apache-2.0"
]
| null | null | null | cloudflare_ddns/__init__.py | joshuaavalon/cloudflare-ddns | 9a79a73dc6f723d2bd9afd26289a9c990744f4e7 | [
"Apache-2.0"
]
| 1 | 2019-06-17T15:22:29.000Z | 2019-06-17T15:22:29.000Z | from cloudflare_ddns.configuration import Configuration, SiteConfiguration
from cloudflare_ddns.ddns import CloudflareDDNS
__all__ = ["CloudflareDDNS", "Configuration", "SiteConfiguration"]
| 38.2 | 74 | 0.848168 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.26178 |
7bbe5cef3d1aeca66fb6ca826edab503eb8c860b | 587 | py | Python | hardhat/recipes/python/twisted.py | stangelandcl/hardhat | 1ad0c5dec16728c0243023acb9594f435ef18f9c | [
"MIT"
]
| null | null | null | hardhat/recipes/python/twisted.py | stangelandcl/hardhat | 1ad0c5dec16728c0243023acb9594f435ef18f9c | [
"MIT"
]
| null | null | null | hardhat/recipes/python/twisted.py | stangelandcl/hardhat | 1ad0c5dec16728c0243023acb9594f435ef18f9c | [
"MIT"
]
| null | null | null | from .base import PipBaseRecipe
class TwistedRecipe(PipBaseRecipe):
def __init__(self, *args, **kwargs):
super(TwistedRecipe, self).__init__(*args, **kwargs)
self.sha256 = 'a4cc164a781859c74de47f17f0e85f4b' \
'ce8a3321a9d0892c015c8f80c4158ad9'
self.pythons = ['python3']
self.pydepends = ['Automat',
'constantly',
'hyperlink',
'incremental',
'zope.interface']
self.name = 'twisted'
self.version = '18.4.0'
| 32.611111 | 60 | 0.524702 | 552 | 0.940375 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.264055 |
7bbf00877f721b0c24c4e63d13a17b9fddb98274 | 250 | py | Python | EXC/CW1/task3/combiner.py | easyCZ/UoE-Projects | 7651c8caf329c4f7b4562eba441bfc24124cfcfd | [
"BSD-2-Clause"
]
| null | null | null | EXC/CW1/task3/combiner.py | easyCZ/UoE-Projects | 7651c8caf329c4f7b4562eba441bfc24124cfcfd | [
"BSD-2-Clause"
]
| 1 | 2022-02-23T07:34:53.000Z | 2022-02-23T07:34:53.000Z | EXC/CW1/task3/combiner.py | easyCZ/UoE-Projects | 7651c8caf329c4f7b4562eba441bfc24124cfcfd | [
"BSD-2-Clause"
]
| null | null | null | #!/usr/bin/python
# combiner.py
import sys
word_count = 0
line_count = 0
for line in sys.stdin:
words, lines = line.strip().split('\t')
word_count += int(words)
line_count += int(lines)
print("{0}\t{1}".format(word_count, line_count)) | 17.857143 | 48 | 0.66 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.176 |
7bbf1685508e5466a589c9ca9ef370e0a3b9611c | 1,376 | py | Python | tests/exploratory/user_data/radish/steps.py | tuxrosi/radish | b21fa751f8dfc4309451476151c810b44975babb | [
"MIT"
]
| null | null | null | tests/exploratory/user_data/radish/steps.py | tuxrosi/radish | b21fa751f8dfc4309451476151c810b44975babb | [
"MIT"
]
| null | null | null | tests/exploratory/user_data/radish/steps.py | tuxrosi/radish | b21fa751f8dfc4309451476151c810b44975babb | [
"MIT"
]
| null | null | null | import re
from radish.stepregistry import step
from radish import when, then
from radish.terrain import world
@step(re.compile("I have the number in user data as (.+)"))
def have_number(step, input_variable):
if world.config.user_data:
if input_variable in world.config.user_data:
step.context.numbers.append(int(world.config.user_data[input_variable]))
else:
msg = "Variable [{0}] is not in the user data (-u/--user-data) specified on the command-line."
assert False, msg.format(input_variable)
else:
assert (
False
), "There is no user data (-u/--user-data) specified on the command-line."
@when("I sum them")
def sum_numbers(step):
step.context.result = sum(step.context.numbers)
@then(re.compile("I expect the result to be the value in user data as (.+)"))
def expect_result(step, result_variable):
if world.config.user_data:
if result_variable in world.config.user_data:
assert step.context.result == int(world.config.user_data[result_variable])
else:
msg = "Variable [{0}] is not in the user data (-u/--user-data) specified on the command-line."
assert False, msg.format(input_variable)
else:
assert (
False
), "There is no user data (-u/--user-data) specified on the command-line."
| 35.282051 | 106 | 0.652616 | 0 | 0 | 0 | 0 | 1,256 | 0.912791 | 0 | 0 | 428 | 0.311047 |
7bbf1d84d1d1e722a857754d78ceb86118a7eadb | 3,462 | py | Python | django/core/views.py | andreyvpng/askme | 65139c347a6b80f0a660ca24d6dd864e4531903a | [
"Apache-2.0"
]
| 2 | 2018-10-29T09:37:47.000Z | 2019-11-28T14:11:12.000Z | django/core/views.py | andreyvpng/askme | 65139c347a6b80f0a660ca24d6dd864e4531903a | [
"Apache-2.0"
]
| null | null | null | django/core/views.py | andreyvpng/askme | 65139c347a6b80f0a660ca24d6dd864e4531903a | [
"Apache-2.0"
]
| 2 | 2018-09-18T14:09:46.000Z | 2019-11-28T14:11:14.000Z | from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from django.http.response import HttpResponseBadRequest, HttpResponseRedirect
from django.urls import reverse_lazy
from django.urls.base import reverse
from django.views.generic import CreateView, DeleteView, DetailView, View
from .forms import AnswerForm, QuestionForm
from .models import Answer, Like, Question
User = get_user_model()
class AnswerDetailView(DetailView):
queryset = Answer.objects.all_with_question()
class AnswerCreateView(LoginRequiredMixin, CreateView):
model = Answer
form_class = AnswerForm
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.question = self.get_question()
if self.object.question.asked_to != self.request.user:
return HttpResponseBadRequest()
self.object.save()
return super().form_valid(form)
def get_question(self):
return Question.objects.get(id=self.kwargs['pk'])
class AnswerDeleteView(LoginRequiredMixin, DeleteView):
model = Answer
success_url = reverse_lazy('user:my-profile')
def dispatch(self, *args, **kwargs):
answer = self.get_object()
if answer.question.asked_to != self.request.user:
raise PermissionDenied
return super().dispatch(*args, **kwargs)
class PrivateQuestionDetailView(DetailView):
model = Question
def dispatch(self, *args, **kwargs):
question = self.get_object()
if question.asked_to != self.request.user:
raise PermissionDenied
try:
return reverse(question.answer.get_absolute_url())
except ObjectDoesNotExist:
pass
return super().dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
answer_form = AnswerForm()
ctx.update({'answer_form': answer_form})
return ctx
class QuestionCreateView(LoginRequiredMixin, CreateView):
model = Question
form_class = QuestionForm
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.asked_by = self.request.user
self.object.asked_to = self.get_user()
self.object.save()
return super().form_valid(form)
def get_success_url(self):
return reverse('user:profile', kwargs={
'pk': self.get_user().id
})
def get_user(self):
return User.objects.get(id=self.kwargs['pk'])
class QuestionDeleteView(LoginRequiredMixin, DeleteView):
model = Question
success_url = reverse_lazy('user:inbox')
def dispatch(self, *args, **kwargs):
question = self.get_object()
if question.asked_to != self.request.user:
raise PermissionDenied
return super().dispatch(*args, **kwargs)
class LikeView(LoginRequiredMixin, View):
def post(self, request, pk):
answer = Answer.objects.get(id=pk)
like = Like.objects.filter(answer=answer,
liked_by=request.user)
if like:
like.delete()
else:
like = Like.objects.create(answer=answer,
liked_by=request.user)
like.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
| 27.696 | 77 | 0.662334 | 2,925 | 0.844887 | 0 | 0 | 0 | 0 | 0 | 0 | 82 | 0.023686 |
7bc0b829008737def4ec98f701aadecbf19f6fdd | 336 | py | Python | setup.py | lepture/pydouban | 5b67c9f6a206a2b21539fc28b3b8658947ae1904 | [
"BSD-3-Clause"
]
| 1 | 2019-04-14T19:58:43.000Z | 2019-04-14T19:58:43.000Z | setup.py | lepture/pydouban | 5b67c9f6a206a2b21539fc28b3b8658947ae1904 | [
"BSD-3-Clause"
]
| null | null | null | setup.py | lepture/pydouban | 5b67c9f6a206a2b21539fc28b3b8658947ae1904 | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python
from distutils.core import setup
setup(
name = 'pydouban',
version = '1.0.0',
description = 'Lightweight Python Douban API Library',
author = 'Marvour',
author_email = '[email protected]',
license = 'BSD License',
url = 'http://i.shiao.org/a/pydouban',
packages = ['pydouban'],
)
| 22.4 | 58 | 0.630952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 159 | 0.473214 |
7bc111fc110f0ab3862581da0b6b979e7a706d1e | 3,234 | py | Python | drowsiness_detector.py | zhww-107/drowsiness_detector | 855995e1da36ffc0ec1fda7df8ea1aafc35c416d | [
"BSD-2-Clause"
]
| 1 | 2020-05-12T12:31:51.000Z | 2020-05-12T12:31:51.000Z | drowsiness_detector.py | zhww-107/drowsiness_detector | 855995e1da36ffc0ec1fda7df8ea1aafc35c416d | [
"BSD-2-Clause"
]
| null | null | null | drowsiness_detector.py | zhww-107/drowsiness_detector | 855995e1da36ffc0ec1fda7df8ea1aafc35c416d | [
"BSD-2-Clause"
]
| null | null | null | from imutils import face_utils
from scipy.spatial import distance
import cv2
import dlib
import imutils
import pygame
import time
# Initializing the alert sound
pygame.mixer.init()
alert_sound = pygame.mixer.Sound("alert_sound.wav")
default_volume = 0.2
# Eye-Aspect-Ratio data
EAR_threshhold = 0.17 # One valid frame is counted when EAR is lower than this value
frame_count = 0 # Number of frames when EAR is lower than EAR_threshhold
EAR_total_frame = 25 # Having frame_count larger than this value is considered drowsiness
# Play the alarm in a given volume
def alert(volume):
alert_sound.set_volume(volume)
alert_sound.play()
# Given an eye landmark, compute its eye_aspect_ratio
def eye_aspect_ratio(eye):
v1 = distance.euclidean(eye[1], eye[5])
v2 = distance.euclidean(eye[2], eye[4])
h1 = distance.euclidean(eye[0], eye[3])
return (v1 + v2) / (2 * h1)
# Initialize the face detector and Facial landmark predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# Access the camera
cap = cv2.VideoCapture(0)
# Main loop for drowsiness detection
while True:
# Read the camera input, resize it, and concert it to grayscale frame
ret, frame = cap.read()
frame = imutils.resize(frame, width=600)
raw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect faces in grayscale frame
bounds = detector(raw,0)
for bound in bounds:
# Predict facial landmarks for each detected face
shape = predictor(raw,bound)
# Convert the facial lanmarks into a 1-D numpy array (x, y)
shape = face_utils.shape_to_np(shape)
# Left and right eyes' indexes for facial landmarks
left_eye = shape[42:48]
right_eye = shape[36:42]
# The main EAR is the average of left and right eye's EAR
left_EAR = eye_aspect_ratio(left_eye)
right_EAR = eye_aspect_ratio(right_eye)
EAR = (left_EAR + right_EAR) / 2
# Draw the facial landmarks for left eye
for (x, y) in left_eye:
cv2.circle(frame, (x, y), 1, (0, 255, 0), -1)
# Draw the facial landmarks for right eye
for (x, y) in right_eye:
cv2.circle(frame, (x, y), 1, (0, 255, 0), -1)
# Alarm when drowsiness is detected
if EAR < EAR_threshhold:
frame_count += 1
# Volume increases gradually
if frame_count >= EAR_total_frame:
alert(0.2 + (frame_count - 25) * 0.2)
time.sleep(3)
else:
frame_count = 0
# Display informations
cv2.putText(frame, "Frame: {:.0f}".format(frame_count), (30, 60),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "Eye-Aspect-Ratio: {:.2f}".format(EAR), (30, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "Press Q to exit.", (410, 320),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# Display the frame
cv2.imshow("Drowsiness_Detector", frame)
# Provide a way to exit the program -- pressing "Q"
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows() | 31.096154 | 89 | 0.649969 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,135 | 0.350959 |
7bc160c90d8d420f5bacbdb3fbe421c84e36aaf4 | 11,809 | py | Python | trunk-tap.py | schreiberstein/trunk-tap.py | aacf32816e2a558e31ebc431edf84e23ef22146d | [
"MIT"
]
| 15 | 2017-10-22T15:08:58.000Z | 2022-01-03T22:21:12.000Z | trunk-tap.py | ideechaniz/trunk-tap.py | aacf32816e2a558e31ebc431edf84e23ef22146d | [
"MIT"
]
| 2 | 2018-04-04T18:52:54.000Z | 2019-02-20T10:16:13.000Z | trunk-tap.py | ideechaniz/trunk-tap.py | aacf32816e2a558e31ebc431edf84e23ef22146d | [
"MIT"
]
| 6 | 2017-10-23T03:03:16.000Z | 2021-07-03T16:28:29.000Z | #!/usr/bin/env python3
# < trunk-tap.py >
# Version 1.0 < 20171022 >
# Copyright 2017: Alexander Schreiber < schreiberstein[at]gmail.com >
# https://github.com/schreiberstein/trunk-tap.py
# MIT License:
# ============
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# See: https://opensource.org/licenses/MIT
# Introduction:
# =============
# trunk-tap.py is a Linux command line utility to connects a set of 802.1Q VLANs to a TINC VPN/OpenVPN TAP-interface and is designed to be invoked by ifup/ifdown scripts after starting or stopping a VPN connection.
# Dependencies (on Debian): python3, iproute2, bridge-utils, vlan (including kernel module '8021q' in /etc/modules)
# It reads the filenames from the content of a folder containing files corresponding to the VLAN ID (e.g. '100', '105', ...), then creates VLAN interfaces on a local Ethernet adapter used as "trunk port" (e.g. 'eth1.100', 'eth1.105', ...).
# The script then proceeds to generate bridge interfaces for every VLAN ID. (e.g. "trunk0.100", "trunk0.105", ...) and attaches the respective Ethernet VLAN interfaces to the bridge. (e.g. 'trunk0.105 <-> eth1.105', ...)
# After that, the local infrastructure is ready to be attached to the VPN layer 2 tunnel.
# This is achieved by enabling the TAP interface ("up"), creating VLAN interfaces on the TAP adapter (e.g. 'tap0.100', 'tap0.105', ...) and attaching them to the respective bridge.
# Illustration:
# =============
# (TINC VPN / OpenVPN)
# -------- SITE 1 ------- -------- SITE 2 -------
# eth1.100 <-> trunk0.100 <--\ ################ /--> trunk0.100 <-> eth1.100
# eth1.105 <-> trunk0.105 <--->> ---TAP-TUNNEL--- <<---> trunk0.105 <-> eth1.105
# eth1.110 <-> trunk0.110 <--/ ################ \--> trunk0.110 <-> eth1.110
# Hint: Interface names (ethernet adapter, bridge name, ...) do not neccesarily have to be identical among sites.
# --------------------------------------------------------------------------------------------------------------- #
# Code:
# =====
# Import required Python3 modules
import os, sys, subprocess
from pathlib import Path
# Create VLAN-interfaces on trunk interface (e.g. 'eth1.100', 'eth1.105', ...)
def trunk_vlan_add():
# Initialize our trunk interface, if it is not up yet
p = subprocess.Popen("ip link set dev " + trunk_interface + " up", shell=True)
p.communicate()
# Create VLAN interfaces on trunk_interface
for filename in os.listdir(vlan_dir):
p = subprocess.Popen("ip link add link " + trunk_interface + " name " + trunk_interface + "." + filename + " type vlan id " + filename +" ; " + "ip link set " + trunk_interface + "." + filename + " up", shell=True)
p.communicate()
continue
return
# Function to remove VLAN interfaces from trunk interface
def trunk_vlan_del():
# Remove VLAN interfaces on trunk_interface
for filename in os.listdir(vlan_dir):
p = subprocess.Popen("ip link set dev " + trunk_interface + "." + filename + " down" + " ; " + "ip link delete " + trunk_interface + "." + filename, shell=True)
p.communicate()
continue
return
# Function to create main bridge (no VLAN ID - May be used to attach a VLAN/network to provide network to devices without VLAN support (VLAN0 - untagged))
def bridge_add():
p = subprocess.Popen("ip link add name " + bridge_name + " type bridge" + " ; " + "ip link set " + bridge_name + " up" + " ; " + "ip link set " + trunk_interface + " master " + bridge_name, shell=True)
p.communicate()
return
# Function to remove bridge
def bridge_del():
p = subprocess.Popen("ip link set " + bridge_name + " down" + " ; " + "ip link delete " + bridge_name + " type bridge", shell=True)
p.communicate()
return
# Creates bridges to be used for VLAN bridging (e.g. 'trunk0.100', 'trunk0.105', ..) - illustration: eth1.105 <-> Bridge: trunk0.105 <-> tap0.105
def bridge_vlan_add():
for filename in os.listdir(vlan_dir):
p = subprocess.Popen("ip link add name " + bridge_name + "." + filename + " type bridge" + " ; " + "ip link set " + bridge_name + "." + filename + " up", shell=True)
p.communicate()
continue
return
# Function to remove VLAN interfaces from the bridge
def bridge_vlan_del():
for filename in os.listdir(vlan_dir):
p = subprocess.Popen("ip link set dev " + bridge_name + "." + filename + " down" + " ; " + "ip link delete " + bridge_name + "." + filename, shell=True)
p.communicate()
continue
return
# Function to bridge the VLANs of the physical interface with the VLANs of the bridge
def bridge():
for filename in os.listdir(vlan_dir):
p = subprocess.Popen("ip link set " + trunk_interface + "." + filename + " master " + bridge_name + "." + filename, shell=True)
p.communicate()
continue
return
# Create VLAN-interfaces on tap interface
def tap_vlan_add():
# Initialize the tap interface, if it is not up yet
p = subprocess.Popen("ip link set dev " + tap_interface + " up", shell=True)
p.communicate()
# Create VLAN interfaces on tap interface
for filename in os.listdir(vlan_dir):
p = subprocess.Popen("ip link add link " + tap_interface + " name " + tap_interface + "." + filename + " type vlan id " + filename + " ; " + "ip link set dev " + tap_interface + "." + filename + " up", shell=True)
p.communicate()
continue
return
# Function to bridge the VLANs of the physical interface with the VLANs of the bridge
def tap_bridge():
for filename in os.listdir(vlan_dir):
p = subprocess.Popen("ip link set " + tap_interface + "." + filename + " master " + bridge_name + "." + filename, shell=True)
p.communicate()
continue
return
# Function to enable ("up") the tap interface
def tap_if_up():
p = subprocess.Popen("ip link set dev " + tap_interface + " down", shell=True)
p.communicate();
return
# Function to disable ("down") the tap interface
def tap_if_down():
p = subprocess.Popen("ip link set dev " + tap_interface + " down", shell=True)
p.communicate();
return
# Function to remove VLAN interfaces from tap interface
def tap_vlan_del():
# Remove VLAN interfaces on tinc_interface
for filename in os.listdir(vlan_dir):
p = subprocess.Popen("ip link set dev " + tap_interface + "." + filename + " down" + " ; " + "ip link delete " + tap_interface + "." + filename, shell=True)
p.communicate()
continue
return
# Function to remove members attached by the tap_bridge() function
def tap_unbridge():
for filename in os.listdir(vlan_dir):
p = subprocess.Popen("ip link set " + tap_interface + "." + filename + " nomaster", shell=True)
p.communicate()
continue
return
# Function to remove members attached by the bridge() function
def unbridge():
for filename in os.listdir(vlan_dir):
p = subprocess.Popen("ip link set " + trunk_interface + "." + filename + " nomaster", shell=True)
p.communicate()
continue
return
# ------------------------
# Note: Order of execution
# ------------------------
# Start:
# ------
# trunk_vlan_add()
# bridge_add()
# bridge_vlan_add()
# bridge()
# tap_if_up()
# tap_vlan_add()
# tap_bridge()
# Stop:
# -----
# tap_unbridge()
# tap_vlan_del()
# tap_if_down()
# unbridge()
# bridge_vlan_del()
# bridge_del()
# trunk_vlan_del()
# Start function - Used to execute all other functions
def start(no_tap):
trunk_vlan_add()
bridge_add()
bridge_vlan_add()
bridge()
# Don't do anything with the TAP interface if --no_tap was specified
if not no_tap:
tap_if_up()
tap_vlan_add()
tap_bridge()
return
# Stop function - reverses the actions performed by start()
def stop(no_tap):
# Don't do anything with the TAP interface if --no_tap was specified
if not no_tap:
tap_unbridge()
tap_vlan_del()
tap_if_down()
unbridge()
bridge_vlan_del()
bridge_del()
trunk_vlan_del()
return
# # # # # # # # #
# Main function #
# # # # # # # # #
def main():
# If no arguments are specified, quit.
if len(sys.argv) == 1:
print("Error: No arguments specified. Enter ./trunktap.py --help for more information.")
quit()
# If arguments are given, parse them and run script.
import argparse
parser = argparse.ArgumentParser()
# Add arguments
parser.add_argument("-start", dest="is_start", action="store_true", help="Creates all interfaces and establishes VLAN bridges")
parser.add_argument("-stop", dest="is_stop", action="store_true", help="Reverses -start: Removes the previously created interfaces")
parser.add_argument("-i", "--interface", dest="trunk_interface", help="Specify the trunk interface on the host that will provide the VLANs to the network (e.g. eth1)")
parser.add_argument("-t", "--tap-interface", dest="tap_interface", help="Specify the TAP interface on the host that will be used by TINC/OpenVPN (e.g. $INTERFACE, tap0)")
parser.add_argument("-v", "--vlan-dir", dest="vlan_dir", help="The path to the folder that contains the files that represent the VLANs that will be created. - Default: ./vlans/ ", default="./vlans/")
parser.add_argument("-b", "--bridge", dest="bridge_name", help="Name of the bridge that will be created. (e.g. trunk0, br0)")
parser.add_argument("--no-tap", dest="no_tap", help="Only for special use: If used, the VLANs will be created locally (e.g. trunk0.105 <-> eth1.105), but the TAP interface won't be used.", default=False, action="store_true")
# Parse arguments
arguments = parser.parse_args()
# Create local variables because the functions use these
global trunk_interface, tap_interface, vlan_dir, bridge_name
trunk_interface = arguments.trunk_interface
tap_interface = arguments.tap_interface
vlan_dir = arguments.vlan_dir
bridge_name = arguments.bridge_name
# Make sure that either start or stop was specified (NOT XOR)
if not arguments.is_start ^ arguments.is_stop:
print("Error: You have to specify either -start or -stop. Only one option is valid.")
quit()
# Make sure that arguments are not empty
if not (trunk_interface and tap_interface and vlan_dir and bridge_name):
print("Error: You have to specify -i, -t, -b and -v.")
quit()
# Execute either function start() or stop() and pass the no_tap-variable
if arguments.is_start:
start(arguments.no_tap)
if arguments.is_stop:
stop(arguments.no_tap)
quit()
# Only run main if the script is explicitly executed (e.g. './trunktap.py')
if __name__ == "__main__":
main()
| 41.146341 | 260 | 0.655348 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,148 | 0.605301 |
7bc353399a2502106befa0365666e5d586522d04 | 4,404 | py | Python | tests/common/mock_cgroup_commands.py | rbgithuub/WALinuxAgent | c0462f33bb5e3a33430fe3d172676d85cefa6227 | [
"Apache-2.0"
]
| null | null | null | tests/common/mock_cgroup_commands.py | rbgithuub/WALinuxAgent | c0462f33bb5e3a33430fe3d172676d85cefa6227 | [
"Apache-2.0"
]
| null | null | null | tests/common/mock_cgroup_commands.py | rbgithuub/WALinuxAgent | c0462f33bb5e3a33430fe3d172676d85cefa6227 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import contextlib
import os
import re
import subprocess
from azurelinuxagent.common.utils import fileutil
from tests.tools import patch, data_dir
#
# Default values for the mocked commands.
#
# The output comes from an Ubuntu 18 system
#
_default_commands = [
(r"systemctl --version",
'''systemd 237
+PAM +AUDIT +SELINUX +IMA +APPARMOR +SMACK +SYSVINIT +UTMP +LIBCRYPTSETUP +GCRYPT +GNUTLS +ACL +XZ +LZ4 +SECCOMP +BLKID +ELFUTILS +KMOD -IDN2 +IDN -PCRE2 default-hierarchy=hybrid
'''),
(r"mount -t cgroup",
'''cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,name=systemd)
cgroup on /sys/fs/cgroup/rdma type cgroup (rw,nosuid,nodev,noexec,relatime,rdma)
cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset)
cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (rw,nosuid,nodev,noexec,relatime,net_cls,net_prio)
cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,perf_event)
cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,hugetlb)
cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer)
cgroup on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,memory)
cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,pids)
cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices)
cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,cpu,cpuacct)
cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio)
'''),
(r"mount -t cgroup2",
'''cgroup on /sys/fs/cgroup/unified type cgroup2 (rw,nosuid,nodev,noexec,relatime)
'''),
(r"systemctl show walinuxagent\.service --property CPUAccounting",
'''CPUAccounting=no
'''),
(r"systemctl show walinuxagent\.service --property MemoryAccounting",
'''MemoryAccounting=no
'''),
(r"systemd-run --unit=([^\s]+) --scope ([^\s]+)",
'''
Running scope as unit: TEST_UNIT.scope
Thu 28 May 2020 07:25:55 AM PDT
'''),
]
_default_files = (
(r"/proc/self/cgroup", os.path.join(data_dir, 'cgroups', 'proc_self_cgroup')),
(r"/proc/[0-9]+/cgroup", os.path.join(data_dir, 'cgroups', 'proc_pid_cgroup')),
(r"/sys/fs/cgroup/unified/cgroup.controllers", os.path.join(data_dir, 'cgroups', 'sys_fs_cgroup_unified_cgroup.controllers')),
)
@contextlib.contextmanager
def mock_cgroup_commands():
original_popen = subprocess.Popen
original_read_file = fileutil.read_file
original_path_exists = os.path.exists
def mock_popen(command, *args, **kwargs):
if isinstance(command, list):
command_string = " ".join(command)
else:
command_string = command
for cmd in _default_commands:
match = re.match(cmd[0], command_string)
if match is not None:
command = ["echo", cmd[1]]
return original_popen(command, *args, **kwargs)
def mock_read_file(filepath, **kwargs):
for file in _default_files:
match = re.match(file[0], filepath)
if match is not None:
filepath = file[1]
return original_read_file(filepath, **kwargs)
def mock_path_exists(path):
for file in _default_files:
match = re.match(file[0], path)
if match is not None:
return True
return original_path_exists(path)
with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", side_effect=mock_popen) as patcher:
with patch("azurelinuxagent.common.cgroupapi.os.path.exists", side_effect=mock_path_exists):
with patch("azurelinuxagent.common.cgroupapi.fileutil.read_file", side_effect=mock_read_file):
yield patcher
| 38.631579 | 178 | 0.711172 | 0 | 0 | 1,376 | 0.312443 | 1,403 | 0.318574 | 0 | 0 | 2,772 | 0.629428 |
7bc78e4dfebfc4162a535f0855d380aa68aa6df8 | 1,474 | py | Python | main.py | saiamphora/XOR-NEATpy | 091b6d6fc3b662491c8216227f5305841521e0ed | [
"Unlicense"
]
| 1 | 2021-11-29T03:30:49.000Z | 2021-11-29T03:30:49.000Z | main.py | saiamphora/XOR-NEATpy | 091b6d6fc3b662491c8216227f5305841521e0ed | [
"Unlicense"
]
| 1 | 2021-11-29T15:28:09.000Z | 2021-11-29T15:28:09.000Z | main.py | saiamphora/XOR-NEATpy | 091b6d6fc3b662491c8216227f5305841521e0ed | [
"Unlicense"
]
| null | null | null | from __future__ import print_function
import os
import neat
# 2-input XOR inputs and expected outputs.
xor_inputs = [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)]
xor_outputs = [(0.0,),(1.0,),(1.0,),(0.0,)]
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
genome.fitness = 4.0
net = neat.nn.FeedForwardNetwork.create(genome, config)
for xi, xo in zip(xor_inputs, xor_outputs):
output = net.activate(xi)
genome.fitness -= (output[0] - xo[0]) ** 4
def run(config_file):
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_file)
p = neat.Population(config)
p.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
p.add_reporter(neat.Checkpointer(5))
winner = p.run(eval_genomes, 500)
print('\nBest genome:\n{!s}'.format(winner))
print('\nOutput:')
winner_net = neat.nn.FeedForwardNetwork.create(winner, config)
for xi, xo in zip(xor_inputs, xor_outputs):
output = winner_net.activate(xi)
print("input {!r}, expected output {!r}, got {!r}".format(xi, xo, output))
p = neat.Checkpointer.restore_checkpoint('neat-checkpoint-4')
p.run(eval_genomes, 10)
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'config-feedforward')
run(config_path) | 33.5 | 82 | 0.651967 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 158 | 0.107191 |
7bc7b39f24b5e8a02751dc33b721dc3411814fe9 | 16,866 | py | Python | iBlock.py | RussianOtter/iBlock | e0db1b94fd2d8ed9538ad42df1a706cc782bb2f3 | [
"MIT"
]
| 5 | 2017-10-02T06:01:01.000Z | 2022-03-08T05:51:51.000Z | iBlock.py | RussianOtter/iBlock | e0db1b94fd2d8ed9538ad42df1a706cc782bb2f3 | [
"MIT"
]
| null | null | null | iBlock.py | RussianOtter/iBlock | e0db1b94fd2d8ed9538ad42df1a706cc782bb2f3 | [
"MIT"
]
| null | null | null | """
_ _____ _ _
|_| __ | |___ ___| |_
| | __ -| | . | _| '_|
|_|_____|_|___|___|_,_|
iBlock is a machine learning video game!
This game is played on a 8x6 board (48 spaces) and the goal is to fill up the enemy's column with your pieces! Once that happens the game will reset and log all the data for the AI's to observe! In the first few games the AI will take random moves and attempt winning. Once one of the AI's win, the information on how they one gets processed and they try to attempt it again using that information!
Rather then focusing on attacking, these AI naturally plays offensively! You will see them defend their base while at the same time try to attack the enemy!
The AI also doesn't know which spaces it must fill to win so as it plays it must learn on it's own (this also allows for the creation of custom maps).
iBlock has multiple different game options for how to set up the way the AI will play! New gamemodes coming soon!
Copyright (c) SavSec 2017
Copyright (c) SavSec iBlock 2017
Format:
Encoding: UTF-8
Tab: 2
System:
Python 2.7
Modules: sys, time, random
License:
MIT License
Developer:
@Russian_Otter - Instagram
"""
import sys, random, time, argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--intelligence",help="Activates dynamic machine learning mode for both players",action="store_true")
parser.add_argument("-r", "--random",help="Activates random machine learning mode for both players",action="store_true")
parser.add_argument("-p", "--pvai",help="Activates Player vs AI mode",action="store_true")
parser.add_argument("-R", "--Reset",help="Activates reset mode for both players",action="store_true")
parser.add_argument("-sm", "--show-moves",help="Shows the last move for each turn",action="store_true")
parser.add_argument("-d", "--display",help="Set to False to disable table display",default=True)
parser.add_argument("-pg", "--progress",help="Displays progress graphs", action="store_true")
parser.add_argument("-t", "--time",help="Turn rate for each player",default=0.05)
parser.add_argument("-q", "--quick",help="Plays a 1 match game", action="store_true")
parser.add_argument("-H", "--Hide",help="Hides help",action="store_true")
args = parser.parse_args()
if args.pvai:
human_mode = True
else:
human_mode = False
if args.Reset:
fresh_start1,fresh_start0 = True,True
else:
fresh_start0,fresh_start1 = False,False
if args.show_moves:
show_move = True
else:
show_move = False
if args.progress:
progress_graphing = True
else:
progress_graphing = False
display = args.display
mtime = float(args.time)
if show_move:
from time import gmtime, strftime
if progress_graphing:
"""
import matplotlib.pyplot as plt
import numpy as np
Still in progress
"""
pass
global last_move
last_move = ["41"]
def pull_dynamics(player):
p0_info = open("player_0.ib").read().split()
p1_info = open("player_1.ib").read().split()
if player == "1":
return p1_info
if player == "0":
return p0_info
def table_ref():
print """
Table Reference
==============================
1 |2 | 3 4 5 6 |7 | 8
9 |10| 11 12 13 14 |15| 16
17 |18| 19 20 21 22 |23| 24
25 |26| 27 28 29 30 |31| 32
33 |34| 35 36 37 38 |39| 40
41 |42| 43 44 45 46 |47| 48
==============================
"""
pass
table = {
"1":".", "2":".", "3":".", "4":".", "5":".", "6":".", "7":".", "8":"0",
"9":".", "10":".", "11":".", "12":".", "13":".", "14":".", "15":".", "16":".",
"17":".", "18":".", "19":".", "20":".", "21":".", "22":".", "23":".", "24":".",
"25":".", "26":".", "27":".", "28":".", "29":".", "30":".", "31":".", "32":".",
"33":".", "34":".", "35":".", "36":".", "37":".", "38":".", "39":".", "40":".",
"41":"1", "42":".", "43":".", "44":".", "45":".", "46":".", "47":".", "48":"."
}
# up left = -9
# up down = +-8
# right left = +-1
# down right = +9
# up right = -7
# down left = +7
def table_reset():
reset = {
"1":".", "2":".", "3":".", "4":".", "5":".", "6":".", "7":".", "8":"0",
"9":".", "10":".", "11":".", "12":".", "13":".", "14":".", "15":".", "16":".",
"17":".", "18":".", "19":".", "20":".", "21":".", "22":".", "23":".", "24":".",
"25":".", "26":".", "27":".", "28":".", "29":".", "30":".", "31":".", "32":".",
"33":".", "34":".", "35":".", "36":".", "37":".", "38":".", "39":".", "40":".",
"41":"1", "42":".", "43":".", "44":".", "45":".", "46":".", "47":".", "48":"."
}
table.update(reset)
def table_print(table):
if display:
print "\n"+"=" * 26
print """%s .%s. %s %s %s %s .%s. %s
%s .%s. %s %s %s %s .%s. %s
%s .%s. %s %s %s %s .%s. %s
%s .%s. %s %s %s %s .%s. %s
%s .%s. %s %s %s %s .%s. %s
%s .%s. %s %s %s %s .%s. %s""".replace(".","|") %(
table["1"],table["2"],table["3"],table["4"],table["5"],table["6"],table["7"],table["8"],
table["9"],table["10"],table["11"],table["12"],table["13"],table["14"],table["15"],table["16"],
table["17"],table["18"],table["19"],table["20"],table["21"],table["22"],table["23"],table["24"],
table["25"],table["26"],table["27"],table["28"],table["29"],table["30"],table["31"],table["32"],
table["33"],table["34"],table["35"],table["36"],table["37"],table["38"],table["39"],table["40"],
table["41"],table["42"],table["43"],table["44"],table["45"],table["46"],table["47"],table["48"]
)
print "=" * 26
def my_blocks(id):
block = []
id = str(id)
for _ in table:
if table[_] == id:
block.append(_)
return block
def grab_dynamics(player):
if player == "0":
dyn = open("player_0.ib").read().split(" ")
if player == "1":
dyn = open("player_1.ib").read().split(" ")
return dyn
def sort_dynamics(latest,player):
info = latest+grab_dynamics(player)
return list(set([x for x in info if info.count(x) > 1]))
def write_dynamics(dynamics,player):
if player == "0":
dyn = open("player_0.ib","w")
dyn.write(" ".join(dynamics))
dyn.close()
if player == "1":
dyn = open("player_1.ib","w")
dyn.write(" ".join(dynamics))
dyn.close()
def move(id,space,table=table):
table.update({space:id})
last_move = space
table_print(table)
if show_move:
tplay = strftime("%H:%M:%S", gmtime())
print "[%s] Player %s moved to %s" %(tplay,id,space)
time.sleep(mtime)
return last_move
def validate_move(space,id,attempt=0):
if attempt == 5:
return True
if table[space] != id:
if space in globals()["last_move"]:
return False
return True
else:
return False
def move_options(space,player="3"):
space = str(space)
tspace = int(space)
if space in [
"10","11","12","13","14","15",
"18","19","20","21","22","23",
"26","27","28","29","30","31",
"34","35","36","37","38","39",
]:
moves = tspace-9,tspace+9,tspace-7,tspace+7,tspace-8,tspace+8,tspace+1,tspace-1
if space == "1":
moves = tspace+1,tspace+8,tspace+9
if space == "8":
moves = tspace-1,tspace+8,tspace+7
if space == "41":
moves = tspace+1,tspace-8,tspace-7
if space == "48":
moves = tspace-1,tspace-8,tspace-9
if space in ["2","3","4","5","6","7"]:
moves = tspace+8,tspace-1,tspace+1,tspace+7,tspace+9
if space in ["16","24","32","40"]:
moves = tspace-1,tspace-8,tspace+8,tspace-9,tspace+7
if space in ["9","17","25","33"]:
moves = tspace+1,tspace+8,tspace-8,tspace+9,tspace-7
if space in ["42","43","44","45","46","47"]:
moves = tspace-1,tspace+1,tspace-8,tspace-9,tspace-7
area = []
for _ in moves:
if table[str(_)] != player:
area.append(str(_))
return area
def available_moves(player):
moves = []
m = 0
for _ in my_blocks(player):
for v in move_options(_):
if table[v] != player:
moves.insert(m,v)
m += 1
return moves
def dynamic_update(new,old,player):
info = new + old
for _ in new:
info.append(_)
return list(set([x for x in info if info.count(x) > 1]))
def push_dynamics(dyn,player):
if player == "1":
cv = open("player_1.ib").read().split(" ")
dyn = dynamic_update(cv,dyn,"1")
print dyn
f = open("player_1.ib","w")
f.write("")
f.write(" ".join(dyn))
f.close()
print "\nUpdated Values:\n"+open("player_1.ib").read()
time.sleep(2)
if player == "0":
cv = open("player_0.ib").read().split(" ")
dyn = dynamic_update(cv,dyn,"0")
f = open("player_0.ib","w")
f.write("")
f.write(" ".join(dyn))
f.close()
print "\nUpdated Values:\n"+open("player_0.ib").read()
time.sleep(2)
def check_for_win(dinfo0,dinfo1,real=True,table=table,push=False,fresh_start0=fresh_start0,fresh_start1=fresh_start1):
if table["2"] + table["10"] + table["18"] + table["26"] + table["34"] + table["42"] == "000000":
if fresh_start0:
f = open("player_0.ib","w")
f.write(" ".join(my_blocks("0")))
f.close()
fresh_start0 = False
else:
if push:
write_dynamics(sort_dynamics(my_blocks("0"),"0"),"0")
print "Player 0 Wins!"
p0_info = open("player_0.ib").read().split()
p1_info = open("player_1.ib").read().split()
pf0 = 48-len(p0_info)
pf1 = 48-len(p1_info)
if pf0 == 48:
pf0 = 0
if pf1 == 48:
pf1 = 0
print "Player 0 Fitness:",pf0
print "Player 1 Fitness:",pf1
time.sleep(2)
if real:
sys.exit()
else:
table_reset()
return True,"0"
if table["7"] + table["15"] + table["23"] + table["31"] + table["39"] + table["47"] == "111111":
if fresh_start1:
f = open("player_1.ib","w")
f.write(" ".join(my_blocks("1")))
f.close()
fresh_start1 = False
else:
if push:
write_dynamics(sort_dynamics(my_blocks("1"),"1"),"1")
print "Player 1 Wins!"
p0_info = open("player_0.ib").read().split()
p1_info = open("player_1.ib").read().split()
pf0 = 48-len(p0_info)
pf1 = 48-len(p1_info)
if pf0 == 48:
pf0 = 0
if pf1 == 48:
pf1 = 0
print "Player 0 Fitness:",pf0
print "Player 1 Fitness:",pf1
time.sleep(2)
if real:
sys.exit()
else:
table_reset()
return True,"1"
def last_go(last):
globals()["last_move"].append(last)
if len(last_move) > 2:
globals()["last_move"].reverse()
globals()["last_move"].pop(1)
globals()["last_move"].reverse()
def iblock(real=False,dynamic=False):
push = True
dyn0,dyn1 = [],[]
p0w,p1w = 0,0
while 1:
for _ in "1","0":
dyn0 = my_blocks("0")
dyn1 = my_blocks("1")
if p0w == 5:
check = check_for_win(dyn0,dyn1, real=real,push=True)
p0w = 0
if p1w == 5:
check = check_for_win(dyn0,dyn1, real=real,push=True)
p1w = 0
if p0w < 5 and p1w < 5:
check = check_for_win(dyn0,dyn1, real=real,push=push)
if "None" in str(type(check)):
check = [dynamic,_]
if check[0]:
if check[1] == "1":
p1w += 1
if check[1] == "0":
p0w += 1
dyn = pull_dynamics(check[1])
if check[1] == "0":
dyn0 = dynamic_update(dyn,dyn0,"0")
if check[1] == "1":
dyn1 = dynamic_update(dyn,dyn1,"1")
go_move,block = None,None
if _ == "0":
av = available_moves(_)
try:
if type(av) == tuple:
av = list(av)
for lm in last_move:
av.pop(av.index(lm))
except:
pass
for md in dyn0:
if md in av:
go_move = md
if _ == "1":
av = available_moves(_)
try:
if type(av) == tuple:
av = list(av)
for lm in last_move:
av.pop(av.index(lm))
except:
pass
for md in dyn1:
if md in av:
go_move = md
if go_move == None:
block = True
attempt = -1
while 1:
attempt += 1
try:
if block == None:
c = go_move
else:
c = random.choice(available_moves(_))
if attempt > 2:
c = random.choice(available_moves(_))
if validate_move(c,_,attempt) == True:
break
except Exception as e:
print e
c = random.choice(available_moves(_))
last_go(move(_,c))
def iblockgo(real=False,dynamic=False):
push = True
dyn0,dyn1 = [],[]
p0w,p1w = 0,0
while 1:
for _ in "1","0":
dyn0 = my_blocks("0")
dyn1 = my_blocks("1")
if p0w == 5:
check = check_for_win(dyn0,dyn1, real=real,push=True)
p0w = 0
if p1w == 5:
check = check_for_win(dyn0,dyn1, real=real,push=True)
p1w = 0
if p0w < 5 and p1w < 5:
check = check_for_win(dyn0,dyn1, real=real,push=push)
if "None" in str(type(check)):
check = [dynamic,_]
if check[0]:
if check[1] == "1":
p1w += 1
if check[1] == "0":
p0w += 1
dyn = pull_dynamics(check[1])
if check[1] == "0":
dyn0 = dynamic_update(dyn,dyn0,"0")
if check[1] == "1":
dyn1 = dynamic_update(dyn,dyn1,"1")
go_move,block = None,None
if _ == "0":
av = available_moves(_)
try:
if type(av) == tuple:
av = list(av)
for lm in last_move:
av.pop(av.index(lm))
except:
pass
for md in dyn0:
if md in av:
go_move = md
if _ == "1":
av = available_moves(_)
try:
if type(av) == tuple:
av = list(av)
for lm in last_move:
av.pop(av.index(lm))
except:
pass
for md in dyn1:
if md in av:
go_move = md
if go_move == None:
block = True
if _ == "1":
table_ref()
print "Available Moves:"
for av in available_moves(_):
print av,
print
while 1:
go_move = raw_input("iBlock ~ ")
if go_move in available_moves(_) and validate_move(go_move,_):
last_go(move(_,go_move))
break
else:
attempt = -1
while 1:
attempt += 1
try:
if block == None:
c = go_move
else:
c = random.choice(available_moves(_))
if attempt > 2:
c = random.choice(available_moves(_))
if validate_move(c,_,attempt) == True:
break
except Exception as e:
print e
c = random.choice(available_moves(_))
last_go(move(_,c))
def reset_knowldge():
"""
Reseting knowldge wipes all past game history and updates it with random winning moves.
"""
print "Reseting Knowldge..."
time.sleep(1)
if not fresh_start0 or not fresh_start1:
print "You must change values: \"fresh_start0\" and \"fresh_start1\" to True before reseting."
print "Be sure to change those values back to False while not in reset mode."
time.sleep(3)
if mtime > 0.0009 or display == True:
print "Consider Temporarily Changing You Game Settings For Reset:"
print "-Speed should be less than 0.0009"
print "-Display should be turned off"
time.sleep(3)
try:
iblock(False,False)
except:
pass
print "Reset Complete!"
time.sleep(1)
def random_ai_mode():
"""
Random AI mode disables the learning ability of the program which causes it to make random moves.
(Personally this is more entiretaining than Intelligence Mode)
"""
print "Starting Random AI Mode..."
if mtime < 0.05:
print "Consider changing the frame rate to more than 0.05 while in random mode"
time.sleep(3)
if display == False:
print "Consider changing display to True inorder to view the game in random mode"
time.sleep(3)
time.sleep(1)
try:
iblock(False,False)
except:
print "Game Paused"
def intelligence_mode():
if mtime < 0.005:
print "Consider changing the frame rate to more than 0.005 while in intelligence mode"
time.sleep(3)
if display == False:
print "Consider changing display to True inorder to view the game in intelligence mode"
time.sleep(3)
"""
This mode activates the intelligence factor of the machine learning program and tells it to improve based on it's last victories!
"""
print "Starting Intelligence Mode..."
time.sleep(1)
try:
iblock(False,True)
except:
print "Game Paused"
def intelligent_1v1():
"""
This is a 1 match mode to quickly see who wins a fast fight
"""
print "Starting Intelligent 1v1..."
if mtime < 0.005:
print "Consider changing the frame rate to more than 0.005 while in intelligence mode"
time.sleep(3)
if display == False:
print "Consider changing display to True inorder to view the game in intelligence mode"
time.sleep(3)
time.sleep(1)
try:
iblock(True,True)
except:
print "Game Paused"
def human_vs_iblock():
"""
You'll probably loose...
"""
# Coming Soon #
if not args.Hide:
print """
_ _____ _ _
|_| __ | |___ ___| |_
| | __ -| | . | _| '_|
|_|_____|_|___|___|_,_|
"""
parser.print_help()
print
print "Available Game Modes/Options:"
print "-Random Mode"
print "-Intelligence Mode"
print "-1 Match Intelligence Mode"
print "-Reset Mode"
print "-Human vs Player Mode"
print "\n(Enter the function name for the gamemode you want in the python terminal or set your arguments to choose your gamemode)\n"
print "Set arguments to \"-H\" to disable this message."
time.sleep(0.5)
if len(sys.argv) > 1:
if args.intelligence:
intelligence_mode()
sys.exit()
if args.random:
random_ai_mode()
sys.exit()
if args.Reset:
print "Stop the program once both player's fitness is at your desired stat"
reset_knowldge()
sys.exit()
if args.quick:
intelligent_1v1()
sys.exit()
if human_mode:
try:
iblockgo()
except:
print "Game Paused/Stopped"
| 27.115756 | 398 | 0.600024 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,199 | 0.367544 |
7bc9519279bbaea50bce0ecf16967333a0bd62b5 | 319 | py | Python | Autre/Internet.py | Yaya-Cout/Python | 500a2bc18cbb0b9bf1470943def8fd8e8e76d36d | [
"Unlicense"
]
| 5 | 2020-12-05T14:00:39.000Z | 2021-12-02T11:44:54.000Z | Autre/Internet.py | Yaya-Cout/Python | 500a2bc18cbb0b9bf1470943def8fd8e8e76d36d | [
"Unlicense"
]
| 11 | 2021-03-15T17:51:43.000Z | 2021-11-24T13:24:39.000Z | Autre/Internet.py | Yaya-Cout/Python | 500a2bc18cbb0b9bf1470943def8fd8e8e76d36d | [
"Unlicense"
]
| 1 | 2021-01-02T14:15:10.000Z | 2021-01-02T14:15:10.000Z | def main():
import webbrowser
recherche = 0
while True:
if recherche >= 2:
print("Vous avez fait " + str(recherche) + " recherches.")
recherche += 1
adresse = input("Quel adresse veut-tu ouvrir")
webbrowser.open(adresse)
if __name__ == "__main__":
main()
| 19.9375 | 70 | 0.567398 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.219436 |
7bc96e1706c4c4494a902bdb9aa51a33d9269620 | 6,502 | py | Python | older/rc-qradar-search/query_runner/components/ariel_query.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
]
| 65 | 2017-12-04T13:58:32.000Z | 2022-03-24T18:33:17.000Z | older/rc-qradar-search/query_runner/components/ariel_query.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
]
| 48 | 2018-03-02T19:17:14.000Z | 2022-03-09T22:00:38.000Z | older/rc-qradar-search/query_runner/components/ariel_query.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
]
| 95 | 2018-01-11T16:23:39.000Z | 2022-03-21T11:34:29.000Z | """Action Module circuits component to update incidents from QRadar Ariel queries"""
import logging
from datetime import datetime
import time
import copy
import json
from string import Template
from pkg_resources import Requirement, resource_filename
import resilient_circuits.template_functions as template_functions
from query_runner.lib.query_action import QueryRunner
from query_runner.lib.qradar_rest_client import QRadarClient
from query_runner.lib.misc import SearchTimeout, SearchFailure
try:
basestring
except NameError:
basestring = str
LOG = logging.getLogger(__name__)
CONFIG_DATA_SECTION = 'ariel'
def config_section_data():
"""sample config data for use in app.config"""
section_config_fn = resource_filename(Requirement("rc-qradar-search"), "query_runner/data/app.config.qradar")
query_dir = resource_filename(Requirement("rc-qradar-search"), "query_runner/data/queries_ariel")
with open(section_config_fn, 'r') as section_config_file:
section_config = Template(section_config_file.read())
return section_config.safe_substitute(directory=query_dir)
class AQLIncidentUpdate(QueryRunner):
""" Acknowledges and fires off new query requests """
def __init__(self, opts):
query_options = opts.get(CONFIG_DATA_SECTION, {})
jinja_filters = template_functions.JINJA_FILTERS
jinja_filters["datetime"] = self._datetime_filter
template_functions.ENV.filters.update(jinja_filters)
super(AQLIncidentUpdate, self).__init__(opts, query_options, run_search)
def _datetime_filter(self, val):
""" JINJA filter to convert ms to YYYY-MM-DD HH:mm:ss """
dt = datetime.fromtimestamp(val/1000.0)
return dt.strftime("%Y-%m-%d %H:%M:%S")
#############################
# Functions for running Query
#############################
def _wait_for_query_to_complete(search_id, qradar_client, timeout, polling_interval):
""" Poll QRadar until search execution finishes """
start_time = time.time()
search_status = qradar_client.get_search_status(search_id)
if not search_status:
# Sometimes it takes a little while to be able to query a search id
time.sleep(4)
search_status = qradar_client.get_search_status(search_id)
while search_status.get("status", "") in ("WAIT", "EXECUTE", "SORTING"):
if timeout != 0:
if time.time() - start_time > timeout:
raise SearchTimeout(search_id, search_status.get("status", ""))
time.sleep(polling_interval)
search_status = qradar_client.get_search_status(search_id)
if search_status.get("status", "") != "COMPLETED":
LOG.error(search_status)
raise SearchFailure(search_id, search_status.get("status", ""))
# end _wait_for_query_to_complete
def _get_query_results(search_id, qradar_client, item_range):
""" Get results from a complete QRadar query """
if item_range:
headers = {"Range": item_range}
else:
headers = None
url = "ariel/searches/{0}/results".format(search_id, headers=headers)
response = qradar_client.get(url)
LOG.debug(response)
# Replace "NULL" with ""
response = remove_nulls(response)
return response
# end _get_query_results
def remove_nulls(d):
""" recursively replace 'NULL' with '' in dictionary """
if isinstance(d, basestring):
if d == u'NULL':
return u''
else:
return d
new = {}
LOG.debug("d={d} ".format(d=d))
LOG.debug("type of d is {t}".format(t=type(d)))
for k, v in d.items():
if isinstance(v, dict):
v = remove_nulls(v)
elif isinstance(v, list):
v = [remove_nulls(v1) for v1 in v]
elif isinstance(v, basestring) and v == u'NULL':
v = u''
new[k] = v
LOG.info("Returning: {n}".format(n=new))
return new
def run_search(options, query_definition, event_message):
""" Run Ariel search and return result """
# Read the options and construct a QRadar client
qradar_url = options.get("qradar_url", "")
qradar_token = options.get("qradar_service_token", "")
timeout = int(options.get("query_timeout", 600))
polling_interval = int(options.get("polling_interval", 5))
if not all((qradar_url, qradar_token, timeout, polling_interval)):
LOG.error("Configuration file missing required values!")
raise Exception("Missing Configuration Values")
verify = options.get("qradar_verify", "")
if verify[:1].lower() in ("0", "f", "n"):
verify = False
else:
verify = True
qradar_client = QRadarClient(qradar_url, qradar_token, verify=verify)
error = None
response = None
try:
params = {'query_expression': query_definition.query}
url = "ariel/searches"
response = qradar_client.post(url, params=params)
LOG.debug(response)
search_id = response.get('search_id', '')
if not search_id:
error = "Query Failed: " + response.get("message", "No Error Message Found")
else:
LOG.info("Queued Search %s", search_id)
_wait_for_query_to_complete(search_id, qradar_client, timeout, polling_interval)
# Query Execution Finished, Get Results
response = _get_query_results(search_id, qradar_client, query_definition.range)
except Exception as exc:
if not query_definition.onerror:
raise
LOG.error(exc)
error = u"{}".format(exc)
if error:
mapdata = copy.deepcopy(event_message)
mapdata.update(query_definition.vars)
mapdata.update({"query": query_definition.query})
mapdata.update({"error": error})
error_template = json.dumps({"events": [query_definition.onerror]}, indent=2)
error_rendered = template_functions.render_json(error_template, mapdata)
response = error_rendered
if not response or len(response["events"]) == 0:
LOG.warn("No data returned from query")
if query_definition.default:
mapdata = copy.deepcopy(event_message)
mapdata.update(query_definition.vars)
mapdata.update({"query": query_definition.query})
default_template = json.dumps({"events": [query_definition.default]}, indent=2)
default_rendered = template_functions.render_json(default_template, mapdata)
response = default_rendered
return response
# end run_search
| 36.324022 | 113 | 0.669948 | 642 | 0.098739 | 0 | 0 | 0 | 0 | 0 | 0 | 1,457 | 0.224085 |
7bcaa605df103e994b12588df4d84741fe74b87f | 2,371 | py | Python | first/sendmail-practice.py | bujige/Python-practice | c1eb76b0caaada628f23a477303f07d6be3f707c | [
"Apache-2.0"
]
| null | null | null | first/sendmail-practice.py | bujige/Python-practice | c1eb76b0caaada628f23a477303f07d6be3f707c | [
"Apache-2.0"
]
| null | null | null | first/sendmail-practice.py | bujige/Python-practice | c1eb76b0caaada628f23a477303f07d6be3f707c | [
"Apache-2.0"
]
| null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from email import encoders
from email.header import Header
from email.mime.multipart import MIMEBase, MIMEMultipart
from email.mime.text import MIMEText
from email.utils import parseaddr, formataddr
import smtplib
# 格式化一个邮件地址
def _format_addr(s):
# parseaddr:解析字符串中的email地址
name, addr = parseaddr(s)
# name中包含中文,需要通过Header对象进行编码
# formataddr:parseaddr函数的逆函数
return formataddr((Header(name, 'utf-8').encode(), addr))
# 登录账户和口令
from_addr = input('From:')
password = input('Password:')
# 目标地址
to_addr = input('To:')
# 目标服务器
smtp_server = input('SMTP server:')
# 封装邮件
# 内容
msg = MIMEText('Hello,send by Python...', 'plain', 'utf-8')
# HTML邮件
msg = MIMEText('<html><body><h1>Hello</h1>' +
'<p>send by <a href="http://www.python.org">Python</a>...</p>' +
'</body></html>', 'html', 'utf-8')
# 发件人
msg['From'] = _format_addr('Python爱好者<%s>' % from_addr)
# 收件人
msg['To'] = _format_addr('管理员<%s>' % to_addr)
# 主题
msg['Subject'] = Header('来自SMTP的问候...', 'utf-8').encode()
# 邮件对象
msg = MIMEMultipart()
msg = MIMEMultipart('alternative')
msg['From'] = _format_addr('Python爱好者<%s>' % from_addr)
msg['To'] = _format_addr('管理员<%s>' % to_addr)
msg['Subject'] = Header('来自SMTP的问候。。。', 'utf-8').encode()
# 邮件正文是MIMEText:
msg.attach(MIMEText('send with file...', 'plain', 'utf-8'))
msg.attach(MIMEText('<html><body><h1>Hello</h1>' +
'<p><img src="cid:0"></p>' +
'</body></html>', 'html', 'utf-8'))
with open('/Users/doc88/Desktop/banner.png', 'rb') as f:
# 设置附件和MIME,从本地读取一个图片
mime = MIMEBase('image', 'jpeg', filename='banner.png')
# 加上必要的头信息:
mime.add_header('Content-Disposition', 'attachment', filename='banner.png')
mime.add_header('Content-ID', '<0>')
mime.add_header('X-Attachment-Id', '0')
# 把附件的内容读进来:
mime.set_payload(f.read())
# 用Base64编码:
encoders.encode_base64(mime)
# 添加到MIMEMultipart:
msg.attach(mime)
try:
# 发送邮件
# 创建服务器
server = smtplib.SMTP_SSL(smtp_server, 465)
# 打印出和SMTP服务器所有的交互信息
server.set_debuglevel(1)
# 登录服务器
server.login(from_addr, password)
# 发送邮件
# 发件账户,收件账户,内容
server.sendmail(from_addr, [to_addr], msg.as_string())
# 退出服务器
server.quit()
print('Success!')
except smtplib.SMTPException as e:
print('Fail,%s' % e) | 28.22619 | 79 | 0.634753 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,380 | 0.497656 |
7bcea7388e12344b8c218c07128ff9fb1cd5ed79 | 1,519 | py | Python | yat-master/pymodule/common_sql/plain_parser/reader.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
]
| null | null | null | yat-master/pymodule/common_sql/plain_parser/reader.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
]
| null | null | null | yat-master/pymodule/common_sql/plain_parser/reader.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
]
| null | null | null | #!/usr/bin/env python
# encoding=utf-8
"""
Copyright (c) 2021 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
class PlainReader:
def __init__(self, content):
if isinstance(content, (str, )):
self.content = content.splitlines(keepends=False)
else:
self.content = content
self.content_iter = iter(self.content)
self._cache = None
def next_line(self):
if self._cache is None:
return next(self.content_iter)
else:
swap = self._cache
self._cache = None
return swap
def top_line(self):
if self._cache is None:
self._cache = next(self.content_iter)
return self._cache
def skip_line(self):
if self._cache is None:
next(self.content_iter)
else:
self._cache = None
def has_next(self):
try:
if self._cache is None:
self._cache = next(self.content_iter)
return True
except StopIteration:
return False
| 27.125 | 84 | 0.623436 | 960 | 0.631995 | 0 | 0 | 0 | 0 | 0 | 0 | 553 | 0.364055 |
7bcfdbc346740098cdd0e1ea01a84bd850dcb6f3 | 2,895 | py | Python | web/admin.py | dschien/greendoors-web | 26a10e909e6447f1709d27e58340f08372ce8f26 | [
"MIT"
]
| null | null | null | web/admin.py | dschien/greendoors-web | 26a10e909e6447f1709d27e58340f08372ce8f26 | [
"MIT"
]
| 2 | 2020-06-05T17:29:54.000Z | 2021-06-10T18:58:13.000Z | web/admin.py | dschien/greendoors-web | 26a10e909e6447f1709d27e58340f08372ce8f26 | [
"MIT"
]
| null | null | null |
__author__ = 'schien'
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.contrib.admin import BooleanFieldListFilter
from api.models import Scan, Measure, InstalledMeasure, MeasureCategory, App, MessageThread, RedirectUrl, TrackableURL, Click, UserProfile, Favourite, \
LoggerMessage
from api.models import Device, House, Note, HomeOwnerProfile, Message
class TrackableUrlInline(admin.StackedInline):
model = TrackableURL
can_delete = False
# verbose_name_plural = 'House'
class RedirectUrlAdmin(admin.ModelAdmin):
#inlines = (TrackableUrlInline,)
pass
admin.site.register(RedirectUrl, RedirectUrlAdmin)
admin.site.register(TrackableURL)
admin.site.register(Click)
class InstalledMeasureInline(admin.StackedInline):
model = InstalledMeasure
can_delete = False
verbose_name_plural = 'House'
class HouseAdmin(admin.ModelAdmin):
inlines = (InstalledMeasureInline,)
admin.site.register(House, HouseAdmin)
class MessagesAdmin(admin.ModelAdmin):
readonly_fields = ('key',)
date_hierarchy = 'created'
admin.site.register(Message, MessagesAdmin)
admin.site.register(MessageThread)
class HouseInline(admin.StackedInline):
model = House
can_delete = False
verbose_name_plural = 'House'
class CreatedDateAdmin(admin.ModelAdmin):
date_hierarchy = 'created'
list_filter = ('user', 'created')
admin.site.register(Device, CreatedDateAdmin)
admin.site.register(Scan, CreatedDateAdmin)
admin.site.register(Note, CreatedDateAdmin)
class ScanInline(admin.StackedInline):
model = Scan
extra = 0
class NoteInline(admin.StackedInline):
model = Note
extra = 0
class FavouriteInline(admin.StackedInline):
model = Favourite
extra = 0
class PhoneInline(admin.StackedInline):
model = Device
can_delete = False
verbose_name_plural = 'Phone'
extra = 0
class HomeOwnerProfileInline(admin.StackedInline):
model = HomeOwnerProfile
can_delete = False
verbose_name_plural = 'Home Owners'
extra = 0
class UserProfileAdmin(admin.ModelAdmin):
model = UserProfile
can_delete = False
# Define a new User admin
class UserAdmin(UserAdmin):
inlines = (PhoneInline, HomeOwnerProfileInline, ScanInline, NoteInline, FavouriteInline)
list_display = ('username', 'email', 'first_name', 'last_name', 'is_active', 'date_joined', 'is_staff')
list_filter = ['is_staff', 'is_superuser', 'date_joined', 'last_login', ]
# bristol
admin.site.register(Measure)
admin.site.register(UserProfile, UserProfileAdmin)
admin.site.register(HomeOwnerProfile)
admin.site.register(InstalledMeasure)
admin.site.register(MeasureCategory)
admin.site.register(App)
admin.site.register(LoggerMessage)
# frome
# Re-register UserAdmin
admin.site.unregister(User)
admin.site.register(User, UserAdmin) | 24.327731 | 152 | 0.760622 | 1,648 | 0.569257 | 0 | 0 | 0 | 0 | 0 | 0 | 330 | 0.11399 |
c8730231294cec0e238e9725d099edb7ac1ec02d | 7,359 | py | Python | compecon/basisSpline.py | daniel-schaefer/CompEcon-python | d3f66e04a7e02be648fc5a68065806ec7cc6ffd6 | [
"MIT"
]
| null | null | null | compecon/basisSpline.py | daniel-schaefer/CompEcon-python | d3f66e04a7e02be648fc5a68065806ec7cc6ffd6 | [
"MIT"
]
| null | null | null | compecon/basisSpline.py | daniel-schaefer/CompEcon-python | d3f66e04a7e02be648fc5a68065806ec7cc6ffd6 | [
"MIT"
]
| 1 | 2021-06-01T03:47:35.000Z | 2021-06-01T03:47:35.000Z | import numpy as np
from scipy.sparse import csc_matrix, diags, tril
from .basis import Basis
__author__ = 'Randall'
# TODO: complete this class
# todo: compare performance of csr_matrix and csc_matrix to deal with sparse interpolation operators
# fixme: interpolation is 25 slower than in matlab when 2 dimensions!! 2x slower with only one
class BasisSpline(Basis):
def __init__(self, *args, k=3, **kwargs):
nargs = len(args)
if nargs == 1:
if isinstance(args[0], tuple):
breaks = [np.sort(br) for br in args[0]]
n = np.array([br.size + k - 1 for br in breaks])
a = np.array([br[0] for br in breaks])
b = np.array([br[-1] for br in breaks])
kwargs['nodetype'] = 'user'
else:
raise ValueError("If only 1 positional argument is provided, it must be a tuple of 'd' array-like, " +
"each of them containing the breaks for one dimension.")
elif nargs == 3:
n, a, b = np.broadcast_arrays(*np.atleast_1d(*args))
breaks = [np.linspace(aa, bb, nn + 1 - k) for aa, bb, nn in zip(a, b, n)]
kwargs['nodetype'] = 'canonical'
else:
txt = 'Either 1 or 3 positional arguments must be provided\n'
txt += '\t1 argument -> break points\n'
txt += '\t3 argument -> n, a, b'
raise ValueError(txt)
''' Check inputs '''
assert ((k > 0) and type(k) is int), 'k must be a positive integer'
assert np.all(n > k), 'number of nodes must exceed order of spline'
assert np.all([(br.size > 1) for br in breaks]), 'breakpoint sequence must contain at least two elements'
''' Make instance '''
kwargs['basistype'] = 'spline'
super().__init__(n, a, b, **kwargs)
self.k = k
self.breaks = breaks
self._set_nodes()
def _set_nodes(self):
"""
Sets the basis nodes
:return: None
"""
n = self.n
k = self.k
self._nodes = list()
for i in range(self.d):
x = np.cumsum(self._augbreaks(i, k))
x = (x[k : n[i] + k] - x[:n[i]]) / k
x[0] = self.a[i]
x[-1] = self.b[i]
self._nodes.append(x)
self._expand_nodes()
def _augbreaks(self, i, m,):
aa = np.repeat(self.a[i], m)
bb = np.repeat(self.b[i], m)
return np.concatenate((aa, self.breaks[i], bb))
def _update_diff_operators(self, i, order):
"""
Updates the list _D of differentiation operators
:param order: order of required derivative
:return: None
"""
keys = set(self._diff_operators[i].keys())
if (order in keys) or (order == 0):
return # Use previously stored values if available
n = self.n[i]
a = self.a[i]
b = self.b[i]
k = self.k
assert order <= k, 'order must be less or equal to k'
kk = k - 1 - min(order, 0)
augbreaks = self._augbreaks(i, kk)
if order > 0:
def sptemp(j):
temp = np.atleast_2d((k + 1 - j) / (augbreaks[k:(n + k - j)] - augbreaks[(j - 1):(n - 1)]))
return diags((-temp, temp), [0, 1], (n - j, n + 1 - j))
missing_keys = set(range(1, order + 1)) - keys
if 1 in missing_keys:
self._diff_operators[i][1] = sptemp(1)
missing_keys -= {1}
missing_keys = list(missing_keys)
missing_keys.sort(reverse=True)
while missing_keys:
j = missing_keys.pop()
self._diff_operators[i][j] = np.dot(sptemp(j), self._diff_operators[i][j - 1])
else:
def sptemp(j):
temp = (augbreaks[(kk + 1):(kk + n - j)] -
augbreaks[(kk - k + j + 1):(kk + n - k)]) / (k - j)
return tril(np.tile(temp, (n - j, 1)), -1)
missing_keys = set(range(order, 0)) - keys
if -1 in missing_keys:
self._diff_operators[i][-1] = sptemp(-1)
missing_keys -= {-1}
missing_keys = list(missing_keys)
missing_keys.sort(reverse=False)
while missing_keys:
j = missing_keys.pop()
self._diff_operators[i][j] = sptemp(j) * self._diff_operators[i][j + 1]
"""
Interpolation methods
"""
def _phi1d(self, i, x=None, order=0):
"""
Computes interpolation matrices for given data x and order of differentiation 'order' (integration if negative)
:param x: evaluation points (defaults to nodes)
:param order: a list of orders for differentiation (+) / integration (-)
:return a: dictionary with interpolation matrices, keys given by unique elements of order.
Example: Create a basis with 5 nodes, get the interpolation matrix evaluated at 20 points::
n, a, b = 5, 0, 4
x = numpy.linspace(a,b, 20)
Phi = BasisSpline(n, a, b)
Phi.Phi(x)
Phi(x)
Calling an instance directly (as in the last line) is equivalent to calling the interpolation method.
"""
n = self.n[i]
k = self.k
if order is None:
order = 0
order = np.atleast_1d(order).flatten()
assert np.max(order) < k, 'Derivatives defined for order less than k'
nn = n + np.maximum(0, -np.min(order)) # todo review why nn is not used, weird
# Check for x argument
xIsProvided = (x is not None)
x = x.flatten() if xIsProvided else self._nodes[i]
nx = x.size
minorder = np.min(order)
kaug = k - minorder
augbreaks = self._augbreaks(i, kaug)
ind = self._lookup(augbreaks, x)
# Recursively determine the values of a k-order basis matrix.
# This is placed in an (m x k+1-order) matrix
bas = np.zeros((kaug + 1, nx))
bas[0] = 1
Phidict = dict()
for j in range(1, kaug + 1):
for jj in range(j, 0, -1):
b0 = augbreaks[ind + jj - j]
b1 = augbreaks[ind + jj]
temp = bas[jj - 1] / (b1 - b0)
bas[jj] = (x - b0) * temp + bas[jj]
bas[jj - 1] = (b1 - x) * temp
# as now contains the order j spline basis
ii = np.where((k - j) == order)[0]
if ii.size > 0:
ii = ii[0]
oi = order[ii]
# Put values in appropriate columns of a sparse matrix
r = np.tile(np.arange(nx), k - oi + 1)
c = np.atleast_2d(np.arange(oi - k, 1)).T + np.atleast_2d(ind)
c = (c - (oi - minorder)).flatten()
data = bas[:k - oi + 1].flatten()
Phidict[oi] = csc_matrix((data, (r, c)), (nx, n-oi))
if oi:
# If needed compute derivative or anti-derivative
Phidict[oi] = Phidict[oi] * self._diff(i, oi)
# todo: review, i think this will return only unique values
Phi = np.array([Phidict[k] for k in order])
return Phi
| 36.430693 | 119 | 0.512298 | 7,016 | 0.95339 | 0 | 0 | 0 | 0 | 0 | 0 | 2,217 | 0.301264 |
c873b44db1fbe52cb97100b99eb41550c409cc9f | 2,279 | py | Python | vendors/rez-2.23.1-py2.7/rez/backport/shutilwhich.py | ColinKennedy/tk-config-default2-respawn | 855fb8033daa549b92615792442f19a7f9c4f55c | [
"Linux-OpenIB"
]
| 4 | 2019-01-11T03:41:28.000Z | 2019-09-12T06:57:17.000Z | vendors/rez-2.23.1-py2.7/rez/backport/shutilwhich.py | ColinKennedy/tk-config-default2-respawn | 855fb8033daa549b92615792442f19a7f9c4f55c | [
"Linux-OpenIB"
]
| null | null | null | vendors/rez-2.23.1-py2.7/rez/backport/shutilwhich.py | ColinKennedy/tk-config-default2-respawn | 855fb8033daa549b92615792442f19a7f9c4f55c | [
"Linux-OpenIB"
]
| 2 | 2019-01-10T05:00:18.000Z | 2020-02-15T16:32:56.000Z | import os
import os.path
import sys
# Modified version from Python-3.3. 'env' environ dict override has been added.
def which(cmd, mode=os.F_OK | os.X_OK, env=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `env` defaults to os.environ,
if not supplied.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# Short circuit. If we're given a full path which matches the mode
# and it exists, we're done here.
if _access_check(cmd, mode):
return cmd
if env is None:
env = os.environ
path = env.get("PATH", os.defpath).split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
default_pathext = \
'.COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC'
pathext = env.get("PATHEXT", default_pathext).split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
matches = [cmd for ext in pathext if cmd.lower().endswith(ext.lower())]
# If it does match, only test that one, otherwise we have to try
# others.
files = [cmd] if matches else [cmd + ext.lower() for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
dir = os.path.normcase(dir)
if not dir in seen:
seen.add(dir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
| 36.174603 | 79 | 0.617376 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,102 | 0.483545 |
c876f748ac3b92bbe9dd6ace6cf8630a36ac3d08 | 6,469 | py | Python | src/symbol_table.py | harkiratbehl/PyGM | e0a4e0b865afb607dfa0525ca386bfbe77bb6508 | [
"MIT"
]
| 2 | 2019-02-13T11:30:08.000Z | 2021-02-14T04:20:44.000Z | src/symbol_table.py | harkiratbehl/PyGM | e0a4e0b865afb607dfa0525ca386bfbe77bb6508 | [
"MIT"
]
| null | null | null | src/symbol_table.py | harkiratbehl/PyGM | e0a4e0b865afb607dfa0525ca386bfbe77bb6508 | [
"MIT"
]
| null | null | null | """Defines the classes SymbolTable and SymbolTableNode"""
import sys
from numpy import ones
class SymbolTableNode:
"""Defines a class SymbolTableNode which stores the nodes in the SymbolTable"""
def __init__(self, name, type_name, parameters = None, size = None):
"""Initializes the Node"""
self.name = name
self.type_name = type_name
if parameters is None:
self.parameters = []
else:
self.parameters = parameters
if size is None:
self.size = []
else:
self.size = size
def print_node(self):
print "Name:", self.name, "Type:", self.type_name, "Size:", self.size
if len(self.parameters) != 0:
print "Parameters:"
for p in self.parameters:
p.print_node()
class SymbolTable:
"""Defines a class for SymbolTable"""
def __init__(self):
"""Initializes the SymbolTable"""
self.symbol_table = {
'scope_0': {
'name': 'scope_0',
'type': 'scope_0',
'parent': None,
'identifiers': [],
'functions': [],
'allvars': [],
'nextuse': dict()
}
}
self.var_list = []
self.next_use = dict()
self.current_scope = 'scope_0'
def start_scope(self, scope):
"""Starts a scope"""
self.current_scope = scope
def end_scope(self):
"""Ends a scope"""
self.current_scope = self.symbol_table[self.current_scope]['parent']
def new_scope(self, scope_name):
self.symbol_table[scope_name] = {
'name': scope_name,
'type': scope_name,
'parent': self.current_scope,
'identifiers': [],
'functions': [],
'allvars': [],
'nextuse': dict()
}
def add_scope(self, scope_name):
"""Adds a new scope to the SymbolTable"""
if scope_name not in self.symbol_table.keys():
self.new_scope(scope_name)
self.start_scope(scope_name)
def add_identifier(self, TreeNode, scope = None, size = None):
if scope is None:
scope = self.current_scope
for node in self.symbol_table[scope]['identifiers']:
if TreeNode.data == node.name:
return True
newNode = SymbolTableNode(TreeNode.data, TreeNode.input_type, size = size)
self.symbol_table[scope]['identifiers'] += [newNode]
return True
def add_function(self, name, return_type, parameters):
ret = []
for t in return_type.children:
ret += [t.data]
params = []
for p in parameters:
params += [SymbolTableNode(p.data, p.input_type)]
newNode = SymbolTableNode(name, ret, params)
self.symbol_table[self.current_scope]['functions'] += [newNode]
return True
def add_var(self, TreeNode, scope = None):
if scope is None:
scope = self.current_scope
for node in self.symbol_table[scope]['allvars']:
if TreeNode.name == node.name:
return True
# newNode = SymbolTableNode(TreeNode.name, TreeNode.type_name)
self.symbol_table[scope]['allvars'] += [TreeNode]
return True
def search_identifier(self, name):
scope = self.current_scope
while scope != None:
for node in self.symbol_table[scope]['identifiers']:
if name == node.name:
return scope + '_' + name
scope = self.symbol_table[scope]['parent']
return False
def search_function(self, name):
scope = self.current_scope
while scope != None:
for node in self.symbol_table[scope]['functions']:
if name == node.name:
return scope + '_' + name
scope = self.symbol_table[scope]['parent']
return False
def print_symbol_table(self):
"""Prints the symbol table"""
print ''
print 'SYMBOL TABLE'
for y in self.symbol_table.keys():
print y, "with parent", self.symbol_table[y]['parent']
if len(self.symbol_table[y]['identifiers']) != 0:
print "Identifiers:"
for x in self.symbol_table[y]['identifiers']:
x.print_node()
if len(self.symbol_table[y]['functions']) != 0:
print "Functions:"
for x in self.symbol_table[y]['functions']:
x.print_node()
if len(self.symbol_table[y]['allvars']) != 0:
print "Allvars:"
for x in self.symbol_table[y]['allvars']:
x.print_node()
print ""
def make_var_list(self):
for y in self.symbol_table.keys():
for x in self.symbol_table[y]['allvars']:
if self.var_list ==[]:
self.var_list += [x]
else:
al =0
for y in self.var_list:
if y.name == x.name:
al=1
if al==0:
self.var_list += [x]
return self.var_list
def fill_next_use(self,three_address_code):
line_count = three_address_code.length()
# Initializing symbol_table values for each variable
for var in self.var_list:
self.next_use[var.name] = [sys.maxsize * ones(line_count), 0]
# traversing the three_address_code in reverse order
for i in range(line_count):
j = line_count - i - 1
three_address_instr = three_address_code.code[j]
var1 = three_address_instr[3]
var2 = three_address_instr[4]
for line_no in range(0, j):
if var1 in self.var_list:
self.next_use[var1.name][0][line_no] = j + 1
if var2 in self.var_list:
self.next_use[var2.name][0][line_no] = j + 1
# def add_node(self, node):
# """Adds a node to the SymbolTable"""
# self.symbol_table.append(node)
# def search_node(self,name):
# """Searches for a node in the SymbolTable"""
# for i in range(len(self.symbol_table)):
# if self.symbol_table[i].name == name:
# return 1
# return 0
| 34.227513 | 83 | 0.534549 | 6,372 | 0.985005 | 0 | 0 | 0 | 0 | 0 | 0 | 1,231 | 0.190292 |
c879174dc589e41a31be3771fbf140871339c500 | 151 | py | Python | setup.py | Will-Robin/NorthNet | 343238afbefd02b7255ef6013cbfb0e801bc2b3b | [
"BSD-3-Clause"
]
| null | null | null | setup.py | Will-Robin/NorthNet | 343238afbefd02b7255ef6013cbfb0e801bc2b3b | [
"BSD-3-Clause"
]
| 2 | 2022-02-23T12:03:32.000Z | 2022-02-23T14:27:29.000Z | setup.py | Will-Robin/NorthNet | 343238afbefd02b7255ef6013cbfb0e801bc2b3b | [
"BSD-3-Clause"
]
| null | null | null | from setuptools import setup, version
setup(
name="NorthNet",
version="0.0",
author="William E. Robinson",
packages = ["NorthNet"],
)
| 16.777778 | 37 | 0.635762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.304636 |
c87b5c6d8dff26ac4e6274273976c58563c8553b | 13,380 | py | Python | clustering/runner.py | kburnik/naps-clustering | 8ceaad61e7f1c3d76ad9e7c7491b705b936a6f19 | [
"MIT"
]
| null | null | null | clustering/runner.py | kburnik/naps-clustering | 8ceaad61e7f1c3d76ad9e7c7491b705b936a6f19 | [
"MIT"
]
| null | null | null | clustering/runner.py | kburnik/naps-clustering | 8ceaad61e7f1c3d76ad9e7c7491b705b936a6f19 | [
"MIT"
]
| null | null | null | """Class with high-level methods for processing NAPS and NAPS BE datasets."""
from config import DATA_NAPS_BE_ALL
from lib import partition_naps
from lib import plot
from lib import plot_clusters
from lib import plot_clusters_with_probability
from lib import plot_setup
from lib import read_naps
from lib import read_naps_be
from lib import reindex_partitions
import json
import matplotlib.pyplot as plt
import numpy as np
import os
import scipy
import sklearn
class Runner:
"""Provides methods for processing NAPS with the clustering algorithm."""
def __init__(self, input_data, config):
self.input_data = input_data
self.config = config
def compute_raw_partitions(self):
"""Compute the k-means and returns the cluster index for each sample."""
kmeans = partition_naps(
samples=self.input_data.samples,
n_clusters=self.config.n_clusters)
return kmeans.labels_
def compute_stable_partitions(self):
"""Same as compute_raw_partition, but with stable index coloring."""
return reindex_partitions(
samples=self.input_data.samples,
indices=self.compute_raw_partitions())
def compute_average_partitions(self):
"""
Repeats the stable colored k-means and computes the average membership
of each input sample. For each sample, return the percentage of membership
to a cluster, as an array of size n_clusters (Monte-Carlo simulation).
"""
cluster_hist = np.zeros(
(self.input_data.size, self.config.n_clusters))
for k in range(self.config.n_iterations):
indices = self.compute_stable_partitions()
for i, cluster in enumerate(indices):
cluster_hist[i][cluster] += 1
return np.divide(cluster_hist, self.config.n_iterations)
def compute_stable_argmax_partitions(self):
"""Computes the stable partitions using the Monte-Carlo simulation, and
selects the most frequent cluster based on the probability (argmax)."""
indices_prob = self.compute_average_partitions()
self._display_undecided_index_count(indices_prob)
return np.argmax(indices_prob, axis=1)
def compute_naps_results(self, num_samples=5, prefix_dir='naps-clustering'):
"""Saves the clustering results and plots the NAPS clusters."""
with self.config.fork() as config:
p = config.n_iterations
for k in range(*config.n_clusters_range):
config.n_clusters = k
# Partition with caching.
indices = np.array(self.cached(
func=self.compute_stable_argmax_partitions,
prefix_dir=prefix_dir,
name='naps-clustering-k=%d-p=%d' % (k, p)))
# Split the input data.
partitioned_data = self.partition_input_data(indices)
# Save the separated datasets.
partitions_filename = self.join_path(
prefix_dir,
'naps-clustering-partitioned-full-k=%d-p=%d.csv' % (k, p))
with open(partitions_filename, "w") as f:
for cluster, data in partitioned_data.items():
f.write(data.serialize() + "\n")
# Save the chosen samples.
samples_filename = self.join_path(
prefix_dir,
'naps-clustering-partitioned-samples-k=%d-p=%d.csv' % (k, p))
with open(samples_filename, "w") as f:
for cluster, data in partitioned_data.items():
chunk = data.reduce_to_samples(num_samples)
f.write(chunk.serialize(";", use_quotes=False) + "\n")
self.plot(
indices=indices,
filename=self.join_path(
prefix_dir,
'naps-clustering-k=%d-p=%d.png' % (k, p)),
output_action='save')
def compute_naps_be_results(
self,
x_axis,
y_axis,
num_samples=5,
prefix_dir='naps-be-clustering'):
"""Saves the clustering results and plots the NAPS BE clusters."""
p = self.config.n_iterations
k = self.config.n_clusters
# Partition with caching.
indices = np.array(self.cached(
func=self.compute_stable_argmax_partitions,
prefix_dir=prefix_dir,
name='naps-be-clustering-%s-%s-k=%d-p=%d' % (x_axis, y_axis, k, p)))
# Split the input data.
partitioned_data = self.partition_input_data(indices)
# Save the separated datasets.
partitions_filename = self.join_path(
prefix_dir,
'naps-be-clustering-partitioned-full-%s-%s-k=%d-p=%d.csv' % (
x_axis, y_axis, k, p))
with open(partitions_filename, "w") as f:
for cluster, data in partitioned_data.items():
f.write(data.serialize() + "\n")
# Save the chosen samples.
samples_filename = self.join_path(
prefix_dir,
'naps-be-clustering-partitioned-samples-%s-%s-k=%d-p=%d.csv' % (
x_axis, y_axis, k, p))
with open(samples_filename, "w") as f:
for cluster, data in partitioned_data.items():
chunk = data.reduce_to_samples(num_samples)
f.write(chunk.serialize(";", use_quotes=False) + "\n")
self.plot(
indices=indices,
filename=self.join_path(
prefix_dir,
'naps-be-clustering-%s-%s-k=%d-p=%d.png' % (x_axis, y_axis, k, p)),
output_action='save')
def compute_stability_error_of_iterations(self):
"""Computes the stability error curve as a function of number of
iterations."""
with self.config.fork() as config:
return [
self._compute_stability_error_point(config.n_iterations)
for config.n_iterations in
range(*config.n_iterations_range)
]
def compute_stability_error_of_partition_count(self):
"""Computes the stability error curve as a function of number of
clusters."""
with self.config.fork() as config:
return [
self._compute_stability_error_point(config.n_clusters)
for config.n_clusters in
range(*config.n_clusters_range)
]
def partition_input_data(self, indices):
"""Splits the input data to partitions as defined by the indices."""
return self.input_data.split_on_key(lambda i, row: indices[i])
def plot(self, indices, output_action='save', filename=None):
"""Plots the clusters."""
if filename is None:
# TODO: Add date?
filename = self.join_path('out-single-run.png')
plot_clusters(
indices=indices,
input_data=self.input_data,
n_clusters=self.config.n_clusters,
output_action=output_action,
filename=filename)
def plot_repeated(
self,
partition_factory,
n_plots=10,
name='out',
prefix_dir='.'):
"""
Runs the partition_factory requested number of times, plots and saves the
images.
"""
for i in range(n_plots):
self.plot(
indices=partition_factory(),
output_action='save',
filename=self.join_path(prefix_dir, '%s-%02d.png' % (name, i)))
def plot_fuzzy(self, prefix_dir='.', name='out-fuzzy-simple'):
"""Plots the undecidable points."""
indices_prob = np.array(self.cached(
func=self.compute_average_partitions,
name=name,
prefix_dir=prefix_dir))
plot_clusters_with_probability(
indices_prob=indices_prob,
input_data=self.input_data,
plot_fuzzy_simple=True,
output_action='save',
filename=self.join_path(prefix_dir, '%s.png' % name))
def plot_cluster_number_evaluation_curve(
self,
evaluate,
title,
name,
score_label,
prefix_dir='.'):
"""Plots the evaluation curve as a function of number of clusters K."""
samples = self.input_data.samples
k_range = range(*self.config.n_clusters_range)
score = [evaluate(samples, k) for k in k_range]
self.save_csv(
data=zip(k_range, score),
columns=['partition count', score_label],
prefix_dir=prefix_dir,
name=name)
plt.figure(num=None, figsize=(16, 9), dpi=300)
plt.title(title)
plt.xlabel('partition count')
plt.ylabel(score_label)
plt.xticks(np.arange(*self.config.n_clusters_range, 2.0))
plt.plot(k_range, score)
plt.grid()
plt.savefig(self.join_path(prefix_dir, '%s.png' % name))
return plt
def plot_stability_error_curve(
self,
results,
title,
name,
xlabel,
ylabel,
xticks=200,
yticks=5,
figsize=(16, 6),
dpi=300,
prefix_dir='.'):
plt.figure(num=None, figsize=figsize, dpi=dpi)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xticks(np.arange(0, 1 + max([x for x, y in results]), xticks))
plt.yticks(np.arange(0, 1 + max([y for x, y in results]), yticks))
plt.plot(*zip(*results))
plt.grid()
plt.savefig(
self.join_path(prefix_dir, '%s.png' % name),
bbox_inches='tight')
return plt
def plot_multiple_cluster_number_evaluation_curves(
self,
input_data_list,
evaluate,
n_clusters_range,
title,
name,
score_label,
prefix_dir='.'):
"""Plots the evaluation curve for a given range of K."""
fig, ax = plot_setup()
plt.title(title)
plt.xlabel('partition count')
plt.ylabel(score_label)
plt.xticks(np.arange(*n_clusters_range, 2.0))
color = plt.cm.rainbow(np.linspace(0, 1, len(input_data_list)))
k_range = range(*n_clusters_range)
score_vectors = []
for i, input_data in enumerate(input_data_list):
score = [evaluate(input_data.samples, k) for k in k_range]
ax.plot(k_range, score, color=color[i], label=input_data.label_name)
score_vectors.append(score)
score_average = np.average(score_vectors, axis=0)
ax.plot(k_range, score_average, color=(0, 0, 0, 1), label="Average")
plt.grid()
plt.legend()
plt.savefig(self.join_path(prefix_dir, '%s.png' % name))
def _compute_stability_error_point(self, variable):
"""Computes one error point though the given number of evaluation
simulations."""
cluster_hist = np.zeros(
(self.input_data.size, self.config.n_clusters))
for i in range(self.config.n_evaluations):
indices = self.compute_stable_argmax_partitions()
for j, cluster in enumerate(indices):
cluster_hist[j][cluster] += 1
total_error = self._compute_total_histogram_error(
cluster_hist, self.config.n_evaluations)
error_point = (variable, total_error)
print(error_point)
return error_point
def cached(self, func, name, prefix_dir='.'):
"""Runs the provided method using a caching mechanism."""
filename = self.join_path(prefix_dir, '%s.cached-result.json' % name)
if os.path.exists(filename):
with open(filename, 'r') as f:
results = json.load(f)
else:
results = func()
with open(filename, 'w') as f:
try:
results = results.tolist()
except:
pass
json.dump(results, f)
return results
def save_csv(
self,
data,
columns,
name,
delimiter=';',
prefix_dir='.',
extension='.csv'):
"""Saves data into a CSV file."""
filename = self.join_path(prefix_dir, name + extension);
def encode(item):
return str(item)
with open(filename, 'w') as f:
f.write(delimiter.join(['"%s"' % column for column in columns]) + '\n')
for row in data:
f.write(delimiter.join([encode(item) for item in row]) + '\n')
def join_path(self, *args):
"""Joins a path for an output file and creates directories if they don't
exist."""
filename = os.path.join(self.config.out_dir, *args)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname, 0o755)
print("I/O path:", os.path.abspath(filename))
return filename
def _compute_total_histogram_error(self, hist, n_evaluations):
"""Computes the total error from the histogram of point cluster
membership."""
hist[hist == n_evaluations] = 0
sums_per_row = (hist != 0).sum(1)
return sums_per_row.sum() - np.count_nonzero(sums_per_row)
def _display_undecided_index_count(self, indices_prob):
"""Counts and prints out how many points have appeared at the edges of
clusters (the undecidability region)."""
print("Undecided count:", len(list(filter(
lambda row: np.max(row) == 0.5, indices_prob))))
@staticmethod
def compute_silhouette_score(samples, n_clusters):
"""Computes the silhouette score for a provided clustering result."""
kmeans = partition_naps(samples, n_clusters)
return sklearn.metrics.silhouette_score(
samples,
kmeans.labels_,
metric='euclidean')
@staticmethod
def stream_naps_be(
config,
x_dimensions, y_dimensions,
x_dimension_names, y_dimension_names):
"""Generates datasets for chosen pairs of dimensions."""
for i in range(len(x_dimensions)):
for j in range(len(y_dimensions)):
if len(x_dimensions) == len(y_dimensions) and j <= i:
continue
x_axis, y_axis = x_dimensions[i], y_dimensions[j]
x_name, y_name = x_dimension_names[i], y_dimension_names[j]
input_data = read_naps_be(
DATA_NAPS_BE_ALL,
label_field="label",
x_axis=x_axis,
y_axis=y_axis,
label_name="Label",
x_name=x_name,
y_name=y_name)
yield Runner(input_data=input_data, config=config), x_axis, y_axis
| 34.307692 | 78 | 0.65568 | 12,915 | 0.965247 | 773 | 0.057773 | 1,092 | 0.081614 | 0 | 0 | 2,870 | 0.214499 |
c87d1cba2782a99d03e9fe56c04a83d537ce2a1a | 2,936 | py | Python | Algorithms_medium/1618. Maximum Font to Fit a Sentence in a Screen.py | VinceW0/Leetcode_Python_solutions | 09e9720afce21632372431606ebec4129eb79734 | [
"Xnet",
"X11"
]
| 4 | 2020-08-11T20:45:15.000Z | 2021-03-12T00:33:34.000Z | Algorithms_medium/1618. Maximum Font to Fit a Sentence in a Screen.py | VinceW0/Leetcode_Python_solutions | 09e9720afce21632372431606ebec4129eb79734 | [
"Xnet",
"X11"
]
| null | null | null | Algorithms_medium/1618. Maximum Font to Fit a Sentence in a Screen.py | VinceW0/Leetcode_Python_solutions | 09e9720afce21632372431606ebec4129eb79734 | [
"Xnet",
"X11"
]
| null | null | null | """
1618. Maximum Font to Fit a Sentence in a Screen
Medium
You are given a string text. We want to display text on a screen of width w and height h. You can choose any font size from array fonts, which contains the available font sizes in ascending order.
You can use the FontInfo interface to get the width and height of any character at any available font size.
The FontInfo interface is defined as such:
interface FontInfo {
// Returns the width of character ch on the screen using font size fontSize.
// O(1) per call
public int getWidth(int fontSize, char ch);
// Returns the height of any character on the screen using font size fontSize.
// O(1) per call
public int getHeight(int fontSize);
}
The calculated width of text for some fontSize is the sum of every getWidth(fontSize, text[i]) call for each 0 <= i < text.length (0-indexed). The calculated height of text for some fontSize is getHeight(fontSize). Note that text is displayed on a single line.
It is guaranteed that FontInfo will return the same value if you call getHeight or getWidth with the same parameters.
It is also guaranteed that for any font size fontSize and any character ch:
getHeight(fontSize) <= getHeight(fontSize+1)
getWidth(fontSize, ch) <= getWidth(fontSize+1, ch)
Return the maximum font size you can use to display text on the screen. If text cannot fit on the display with any font size, return -1.
Example 1:
Input: text = "helloworld", w = 80, h = 20, fonts = [6,8,10,12,14,16,18,24,36]
Output: 6
Example 2:
Input: text = "leetcode", w = 1000, h = 50, fonts = [1,2,4]
Output: 4
Example 3:
Input: text = "easyquestion", w = 100, h = 100, fonts = [10,15,20,25]
Output: -1
Constraints:
1 <= text.length <= 50000
text contains only lowercase English letters.
1 <= w <= 107
1 <= h <= 104
1 <= fonts.length <= 105
1 <= fonts[i] <= 105
fonts is sorted in ascending order and does not contain duplicates.
"""
# """
# This is FontInfo's API interface.
# You should not implement it, or speculate about its implementation
# """
#class FontInfo(object):
# Return the width of char ch when fontSize is used.
# def getWidth(self, fontSize, ch):
# """
# :type fontSize: int
# :type ch: char
# :rtype int
# """
#
# def getHeight(self, fontSize):
# """
# :type fontSize: int
# :rtype int
# """
class Solution:
def maxFont(self, text: str, w: int, h: int, fonts: List[int], fontInfo : 'FontInfo') -> int:
def check(fs):
if fontInfo.getHeight(fs) > h:
return False
if sum(fontInfo.getWidth(fs, c) for c in text) > w:
return False
return True
l, r = -1, len(fonts) - 1
while l < r:
m = r - (r - l) // 2
if check(fonts[m]):
l = m
else:
r = m - 1
return fonts[l] if l > -1 else -1 | 32.622222 | 260 | 0.642711 | 562 | 0.191417 | 0 | 0 | 0 | 0 | 0 | 0 | 2,365 | 0.805518 |
c880853878e1cff80cb76bcab65d294bfff7d0f4 | 6,407 | py | Python | climateeconomics/sos_wrapping/sos_wrapping_dice/tempchange/tempchange_discipline.py | os-climate/witness-core | 3ef9a44d86804c5ad57deec3c9916348cb3bfbb8 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
]
| 1 | 2022-01-14T06:37:42.000Z | 2022-01-14T06:37:42.000Z | climateeconomics/sos_wrapping/sos_wrapping_dice/tempchange/tempchange_discipline.py | os-climate/witness-core | 3ef9a44d86804c5ad57deec3c9916348cb3bfbb8 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
]
| null | null | null | climateeconomics/sos_wrapping/sos_wrapping_dice/tempchange/tempchange_discipline.py | os-climate/witness-core | 3ef9a44d86804c5ad57deec3c9916348cb3bfbb8 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
]
| null | null | null | '''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from sos_trades_core.execution_engine.sos_discipline import SoSDiscipline
from climateeconomics.core.core_dice.tempchange_model import TempChange
from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart
from sos_trades_core.tools.post_processing.charts.chart_filter import ChartFilter
import pandas as pd
class TempChangeDiscipline(SoSDiscipline):
" Temperature evolution"
# ontology information
_ontology_data = {
'label': 'Temperature Change DICE Model',
'type': 'Research',
'source': 'SoSTrades Project',
'validated': '',
'validated_by': 'SoSTrades Project',
'last_modification_date': '',
'category': '',
'definition': '',
'icon': 'fas fa-thermometer-three-quarters fa-fw',
'version': '',
}
DESC_IN = {
'year_start': {'type': 'int', 'visibility': 'Shared', 'namespace': 'ns_dice'},
'year_end': {'type': 'int', 'visibility': 'Shared', 'namespace': 'ns_dice'},
'time_step': {'type': 'int', 'visibility': 'Shared', 'namespace': 'ns_dice'},
'init_temp_ocean': {'type': 'float', 'default': 0.00687},
'init_temp_atmo': {'type': 'float', 'default': 0.85},
'eq_temp_impact': {'type': 'float', 'default': 3.1},
'init_forcing_nonco': {'type': 'float', 'default': 0.5},
'hundred_forcing_nonco': {'type': 'float', 'default': 1 },
'climate_upper': {'type': 'float', 'default': 0.1005},
'transfer_upper': {'type': 'float', 'default': 0.088},
'transfer_lower': {'type': 'float', 'default': 0.025},
'forcing_eq_co2': {'type': 'float', 'default': 3.6813},
'lo_tocean': {'type': 'float', 'default': -1},
'up_tatmo': {'type': 'float', 'default': 12},
'up_tocean': {'type': 'float', 'default' : 20},
'carboncycle_df': {'type': 'dataframe', 'visibility': 'Shared', 'namespace': 'ns_scenario'}}
DESC_OUT = {
'temperature_df': {'type': 'dataframe', 'visibility': 'Shared', 'namespace': 'ns_scenario'}}
_maturity = 'Research'
def run(self):
''' model execution '''
# get inputs
in_dict = self.get_sosdisc_inputs()
# carboncycle_df = in_dict.pop('carboncycle_df')
# model execution
model = TempChange()
temperature_df = model.compute(in_dict)
# store output data
out_dict = {"temperature_df": temperature_df}
self.store_sos_outputs_values(out_dict)
def get_chart_filter_list(self):
# For the outputs, making a graph for tco vs year for each range and for specific
# value of ToT with a shift of five year between then
chart_filters = []
chart_list = ['temperature evolution']
# First filter to deal with the view : program or actor
chart_filters.append(ChartFilter(
'Charts', chart_list, chart_list, 'charts'))
return chart_filters
def get_post_processing_list(self, chart_filters=None):
# For the outputs, making a graph for tco vs year for each range and for specific
# value of ToT with a shift of five year between then
instanciated_charts = []
# Overload default value with chart filter
if chart_filters is not None:
for chart_filter in chart_filters:
if chart_filter.filter_key == 'charts':
chart_list = chart_filter.selected_values
if 'temperature evolution' in chart_list:
to_plot = ['temp_atmo', 'temp_ocean']
temperature_df = self.get_sosdisc_outputs('temperature_df')
temperature_df = resize_df(temperature_df)
legend = {'temp_atmo': 'atmosphere temperature',
'temp_ocean': 'ocean temperature'}
years = list(temperature_df.index)
year_start = years[0]
year_end = years[len(years) - 1]
max_value = 0
min_value = 0
for key in to_plot:
max_value = max(temperature_df[key].values.max(), max_value)
min_value = min(temperature_df[key].values.min(), min_value)
chart_name = 'temperature evolution over the years'
new_chart = TwoAxesInstanciatedChart('years', 'temperature evolution (degrees Celsius above preindustrial)',
[year_start - 5, year_end + 5], [
min_value * 0.9, max_value * 1.1],
chart_name)
for key in to_plot:
visible_line = True
ordonate_data = list(temperature_df[key])
new_series = InstanciatedSeries(
years, ordonate_data, legend[key], 'lines', visible_line)
new_chart.series.append(new_series)
instanciated_charts.append(new_chart)
return instanciated_charts
def resize_df(df):
index = df.index
i = len(index) - 1
key = df.keys()
to_check = df.loc[index[i], key[0]]
while to_check == 0:
i = i - 1
to_check = df.loc[index[i], key[0]]
size_diff = len(index) - i
new_df = pd.DataFrame()
if size_diff == 0:
new_df = df
else:
for element in key:
new_df[element] = df[element][0:i + 1]
new_df.index = index[0: i + 1]
return new_df
def resize_array(array):
i = len(array) - 1
to_check = array[i]
while to_check == 0:
i = i - 1
to_check = to_check = array[i]
size_diff = len(array) - i
new_array = array[0:i]
return new_array
def resize_index(index, array):
l = len(array)
new_index = index[0:l]
return new_index
| 33.025773 | 129 | 0.605119 | 4,654 | 0.726393 | 0 | 0 | 0 | 0 | 0 | 0 | 2,442 | 0.381146 |
c8813251417f083ef4764a6d0d80104c34d5a26a | 56,368 | py | Python | pymkm/pymkm_app.py | Guibod/pymkm | 58ac805c8072979f3059c7faafc264386ae98141 | [
"MIT"
]
| null | null | null | pymkm/pymkm_app.py | Guibod/pymkm | 58ac805c8072979f3059c7faafc264386ae98141 | [
"MIT"
]
| null | null | null | pymkm/pymkm_app.py | Guibod/pymkm | 58ac805c8072979f3059c7faafc264386ae98141 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
"""
The PyMKM example app.
"""
__author__ = "Andreas Ehrlund"
__version__ = "2.0.4"
__license__ = "MIT"
import os
import csv
import json
import shelve
import logging
import logging.handlers
import pprint
import uuid
import sys
from datetime import datetime
import micromenu
import progressbar
import requests
import tabulate as tb
from pkg_resources import parse_version
from .pymkm_helper import PyMkmHelper
from .pymkmapi import PyMkmApi, CardmarketError
class PyMkmApp:
logger = None
def __init__(self, config=None):
self.logger = logging.getLogger(__name__)
# self.logger.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
fh = logging.handlers.RotatingFileHandler(
f"log_pymkm.log", maxBytes=500000, backupCount=2
)
fh.setLevel(logging.WARNING)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
sh = logging.StreamHandler()
sh.setLevel(logging.ERROR) # This gets outputted to stdout
sh.setFormatter(formatter)
self.logger.addHandler(sh)
if config is None:
self.logger.debug(">> Loading config file")
try:
self.config = json.load(open("config.json"))
# Sync missing attributes to active config
template_config = json.load(open("config_template.json"))
template_config.update(self.config)
self.config = template_config
except FileNotFoundError:
self.logger.error(
"You must copy config_template.json to config.json and populate the fields."
)
sys.exit(0)
# if no UUID is present, generate one and add it to the file
if "uuid" not in self.config:
self.config["uuid"] = str(uuid.uuid4())
with open("config.json", "w") as json_config_file:
json.dump(self.config, json_config_file, indent=2)
else:
self.config = config
self.DEV_MODE = False
try:
self.DEV_MODE = self.config["dev_mode"]
except Exception as err:
pass
fh.setLevel(self.config["log_level"])
self.logger.setLevel(self.config["log_level"])
self.api = PyMkmApi(config=self.config)
def report(self, command):
uuid = self.config["uuid"]
# if self.config["reporting"] and not self.DEV_MODE:
# try:
# r = requests.post(
# "https://andli-stats-server.herokuapp.com/pymkm",
# json={"command": command, "uuid": uuid, "version": __version__},
# )
# except Exception as err:
# self.logger.error("Connection error to stats server.")
# pass
pass
def check_latest_version(self):
latest_version = None
try:
r = requests.get("https://api.github.com/repos/andli/pymkm/releases/latest")
latest_version = r.json()["tag_name"]
except Exception as err:
self.logger.error("Connection error with github.com")
if parse_version(__version__) < parse_version(latest_version):
return f"Go to Github and download version {latest_version}! It's better!"
else:
return None
def start(self, args=None):
if not len(sys.argv) > 1: # if args have been passed
while True:
top_message = self.check_latest_version()
if hasattr(self, "DEV_MODE") and self.DEV_MODE:
top_message = "dev mode"
menu = micromenu.Menu(
f"PyMKM {__version__}",
top_message,
f"API calls used today: {self.api.requests_count}/{self.api.requests_max}",
cycle=False,
)
menu.add_function_item(
"Update stock prices",
self.update_stock_prices_to_trend,
{"api": self.api, "cli_called": False},
)
menu.add_function_item(
"Update price for a product",
self.update_product_to_trend,
{"api": self.api},
)
menu.add_function_item(
"List competition for a product",
self.list_competition_for_product,
{"api": self.api},
)
menu.add_function_item(
"Find deals from a user",
self.find_deals_from_user,
{"api": self.api},
)
menu.add_function_item(
f"Show top {self.config['show_top_x_expensive_items']} expensive items in stock",
self.show_top_expensive_articles_in_stock,
{
"num_articles": self.config["show_top_x_expensive_items"],
"api": self.api,
},
)
menu.add_function_item(
"Wantslists cleanup suggestions",
self.clean_purchased_from_wantslists,
{"api": self.api},
)
menu.add_function_item(
"Show account info", self.show_account_info, {"api": self.api}
)
menu.add_function_item(
"Clear entire stock (WARNING)",
self.clear_entire_stock,
{"api": self.api},
)
menu.add_function_item(
f"Import stock from {self.config['csv_import_filename']}",
self.import_from_csv,
{"api": self.api},
)
menu.add_function_item(
f"Track price data to {self.config['csv_prices_filename']}",
self.track_prices_to_csv,
{"api": self.api},
)
if self.DEV_MODE:
menu.add_function_item(
f"⚠ Check product id", self.check_product_id, {"api": self.api},
)
menu.add_function_item(
f"⚠ Add fake stock", self.add_fake_stock, {"api": self.api},
)
if self.api.requests_count < self.api.requests_max:
break_signal = menu.show()
else:
menu.print_menu()
self.logger.error("Out of quota, exiting app.")
sys.exit(0)
if break_signal:
break
else:
# command line interface
if args.price_check_wantslist:
self.track_prices_to_csv(
self.api, args.price_check_wantslist, args.cached
)
if args.update_stock:
self.update_stock_prices_to_trend(
self.api, args.update_stock, args.cached, args.partial
)
def check_product_id(self, api):
""" Dev function check on a product id. """
pid = int(PyMkmHelper.prompt_string("pid"))
product_json = api.get_product(pid)
del product_json["product"]["reprint"]
del product_json["product"]["links"]
pp = pprint.PrettyPrinter()
pp.pprint(product_json)
def add_fake_stock(self, api):
""" Dev function to add fake stock. """
range_start = int(PyMkmHelper.prompt_string("Range pid start"))
range_end = int(PyMkmHelper.prompt_string("Range pid end"))
if PyMkmHelper.prompt_bool("Sure?"):
print("Adding fake stock...")
product_list = []
for product_no in range(range_start, range_end):
product_list.append(
{
"idProduct": product_no,
"idLanguage": 1,
"count": 1,
"price": 1,
"comments": "TEST ARTICLE DO NOT BUY",
"condition": "PO",
"isFoil": "false",
}
)
api.add_stock(product_list)
def clean_json_for_upload(self, not_uploadable_json):
for entry in not_uploadable_json:
del entry["price_diff"]
del entry["old_price"]
del entry["name"]
return not_uploadable_json
def update_stock_prices_to_trend(self, api, cli_called, cached=None, partial=0):
""" This function updates all prices in the user's stock to TREND. """
self.report("update stock price to trend")
stock_list = self.get_stock_as_array(self.api, cli_called, cached)
already_checked_articles = PyMkmHelper.read_from_cache(
self.config["local_cache_filename"], "partial_updated"
)
if already_checked_articles:
print(
f"{len(already_checked_articles)} articles found in previous updates, ignoring those."
)
partial_stock_update_size = 0
if partial > 0:
partial_stock_update_size = partial
elif not cli_called:
partial_status_string = ""
if already_checked_articles:
partial_status_string = (
f"({len(already_checked_articles)}/{len(stock_list)} done)"
)
partial_stock_update_size = PyMkmHelper.prompt_string(
f"Partial update? {partial_status_string} \n"
+ "If so, enter number of cards (or press Enter to update all remaining stock)"
)
if partial_stock_update_size != "":
partial_stock_update_size = int(partial_stock_update_size)
else:
partial_stock_update_size = 0
if cli_called or self.config["never_undercut_local_market"]:
undercut_local_market = False
else:
undercut_local_market = PyMkmHelper.prompt_bool(
"Try to undercut local market? (slower, more requests)"
)
uploadable_json, checked_articles = self.calculate_new_prices_for_stock(
stock_list,
undercut_local_market,
partial_stock_update_size,
already_checked_articles,
api=self.api,
)
cache_size = 0
if checked_articles:
cache_size = PyMkmHelper.append_to_cache(
self.config["local_cache_filename"],
"partial_updated",
checked_articles,
)
if cache_size == len(stock_list):
PyMkmHelper.clear_cache(
self.config["local_cache_filename"], "partial_updated"
)
print(
f"Entire stock updated in partial updates. Partial update data cleared."
)
if len(uploadable_json) > 0:
self.display_price_changes_table(uploadable_json)
if cli_called or PyMkmHelper.prompt_bool(
"Do you want to update these prices?"
):
print("Updating prices...")
api.set_stock(uploadable_json)
print("Prices updated.")
else:
print("Prices not updated.")
else:
print("No prices to update.")
self.logger.debug("-> update_stock_prices_to_trend: Done")
def __filter(self, article_list):
sticky_price_char = self.config["sticky_price_char"]
# if we find the sticky price marker, filter out articles
def filtered(stock_item):
if stock_item.get("comments"):
return stock_item.get("comments").startswith(sticky_price_char)
else:
return False
filtered_articles = [x for x in article_list if not filtered(x)]
return filtered_articles
def update_product_to_trend(self, api):
""" This function updates one product in the user's stock to TREND. """
self.report("update product price to trend")
search_string = PyMkmHelper.prompt_string("Search product name")
try:
articles = api.find_stock_article(search_string, 1)
except Exception as err:
print(err)
filtered_articles = self.__filter(articles)
### --- refactor?
if not filtered_articles:
print(f"{len(articles)} articles found, no editable prices.")
else:
if len(filtered_articles) > 1:
article = self.select_from_list_of_articles(filtered_articles)
else:
article = filtered_articles[0]
found_string = f"Found: {article['product']['enName']}"
if article["product"].get("expansion"):
found_string += f"[{article['product'].get('expansion')}] "
if article["isFoil"]:
found_string += f"[foil: {article['isFoil']}] "
if article["comments"]:
found_string += f"[comment: {article['comments']}] "
else:
found_string += "."
print(found_string)
undercut_local_market = PyMkmHelper.prompt_bool(
"Try to undercut local market? (slower, more requests)"
)
product = self.api.get_product(article["idProduct"])
r = self.update_price_for_article(
article, product, undercut_local_market, api=self.api
)
if r:
self.draw_price_changes_table([r])
print(
"\nTotal price difference: {}.".format(
str(
round(
sum(item["price_diff"] * item["count"] for item in [r]),
2,
)
)
)
)
if PyMkmHelper.prompt_bool("Do you want to update these prices?"):
# Update articles on MKM
print("Updating prices...")
api.set_stock(self.clean_json_for_upload([r]))
print("Price updated.")
else:
print("Prices not updated.")
else:
print("No prices to update.")
self.logger.debug("-> update_product_to_trend: Done")
def list_competition_for_product(self, api):
self.report("list competition for product")
print("Note: does not support playsets, booster displays etc (yet).")
search_string = PyMkmHelper.prompt_string("Search product name")
is_foil = PyMkmHelper.prompt_bool("Foil?")
try:
result = api.find_product(
search_string,
**{
# 'exact ': 'true',
"idGame": 1,
"idLanguage": 1,
# TODO: Add language support
},
)
except CardmarketError as err:
self.logger.error(err.mkm_msg())
print(err.mkm_msg())
else:
if result:
products = result
stock_list_products = [
x["idProduct"] for x in self.get_stock_as_array(api=self.api)
]
products = [
x for x in products if x["idProduct"] in stock_list_products
]
if len(products) == 0:
print("No matching cards in stock.")
else:
if len(products) > 1:
product = self.select_from_list_of_products(
[i for i in products if i["categoryName"] == "Magic Single"]
)
elif len(products) == 1:
product = products[0]
self.show_competition_for_product(
product["idProduct"], product["enName"], is_foil, api=self.api
)
else:
print("No results found.")
self.logger.debug("-> list_competition_for_product: Done")
def find_deals_from_user(self, api):
self.report("find deals from user")
search_string = PyMkmHelper.prompt_string("Enter username")
try:
result = api.find_user_articles(search_string)
except CardmarketError as err:
self.logger.error(err.mkm_msg())
print(err.mkm_msg())
else:
filtered_articles = [x for x in result if x.get("price") > 1]
# language from configured filter
language_filter_string = self.config["search_filters"]["language"]
if language_filter_string:
language_filter_code = api.get_language_code_from_string(
language_filter_string
)
if language_filter_code:
filtered_articles = [
x
for x in filtered_articles
if x.get("language").get("idLanguage") == language_filter_code
]
sorted_articles = sorted(
filtered_articles, key=lambda x: x["price"], reverse=True
)
print(
f"User '{search_string}' has {len(sorted_articles)} articles that meet the criteria."
)
num_searches = int(
PyMkmHelper.prompt_string(
f"Searching top X expensive cards for deals, choose X (1-{len(sorted_articles)})"
)
)
if 1 <= num_searches <= len(sorted_articles):
table_data = []
products_to_get = []
index = 0
bar = progressbar.ProgressBar(max_value=num_searches)
bar.update(index)
products_to_get = [
x["idProduct"] for x in sorted_articles[:num_searches]
]
products = api.get_items_async("products", products_to_get)
for article in sorted_articles[:num_searches]:
try:
p = next(
x
for x in products
if x["product"]["idProduct"] == article["idProduct"]
)
except StopIteration:
# Stock item not found in update batch, continuing
continue
name = p["product"]["enName"]
expansion = p["product"].get("expansion")
price = float(article["price"])
if expansion:
expansion_name = expansion.get("enName")
else:
expansion_name = "N/A"
if article.get("isFoil"):
market_price = p["product"]["priceGuide"]["TRENDFOIL"]
else:
market_price = p["product"]["priceGuide"]["TREND"]
if market_price > 0:
price_diff = price - market_price
percent_deal = round(-100 * (price_diff / market_price))
if price_diff < -1 or percent_deal >= 10:
table_data.append(
[
name,
expansion_name,
article.get("condition"),
article.get("language").get("languageName"),
"\u2713" if article.get("isFoil") else "",
"\u2713" if article.get("isPlayset") else "",
price,
market_price,
price_diff,
percent_deal,
]
)
index += 1
bar.update(index)
bar.finish()
if table_data:
print("Found some interesting prices:")
print(
tb.tabulate(
sorted(table_data, key=lambda x: x[9], reverse=True),
headers=[
"Name",
"Expansion",
"Condition",
"Language",
"Foil",
"Playset",
"Price",
"Market price",
"Market diff",
"Deal %",
],
tablefmt="simple",
)
)
else:
print("Found no deals. :(")
else:
print("Invalid number.")
self.logger.debug("-> find_deals_from_user: Done")
def show_top_expensive_articles_in_stock(self, num_articles, api):
self.report("show top expensive in stock")
stock_list = self.get_stock_as_array(api=self.api)
table_data = []
total_price = 0
for article in stock_list:
name = article["product"]["enName"]
expansion = article.get("product").get("expansion")
foil = article.get("isFoil")
playset = article.get("isPlayset")
condition = article.get("condition")
language_code = article.get("language")
language_name = language_code.get("languageName")
price = article.get("price")
table_data.append(
[
name,
expansion,
"\u2713" if foil else "",
"\u2713" if playset else "",
language_name,
condition,
price,
]
)
total_price += price
if len(table_data) > 0:
print(
f"Top {str(num_articles)} most expensive articles in stock (total {len(stock_list)} items):\n"
)
print(
tb.tabulate(
sorted(table_data, key=lambda x: x[6], reverse=True)[:num_articles],
headers=[
"Name",
"Expansion",
"Foil",
"Playset",
"Language",
"Condition",
"Price",
],
tablefmt="simple",
)
)
print("\nTotal stock value: {}".format(str(total_price)))
return None
def track_prices_to_csv(self, api, wantslist_name=None, cached=False):
self.report("track prices")
wantslists, wantslists_lists = self.get_wantslists_data(api, cached)
if wantslist_name is None:
selected_list = self.select_from_list_of_wantslists(wantslists)
selected_list_id = selected_list["idWantslist"]
else:
selected_list_id = next(
x["idWantslist"] for x in wantslists if x["name"] == wantslist_name
)
# TODO: fails for metaproduct
products_to_get = [
x["idProduct"]
for x in wantslists_lists[selected_list_id]
if x["type"] == "product"
]
for x in wantslists_lists[selected_list_id]:
if x["type"] == "metaproduct":
self.logger.warning(
f"Wantslist contains metaproduct ({x['metaproduct']['enName']}) which cannot be used to get prices."
)
updated_products = []
try:
updated_products = api.get_items_async("products", products_to_get)
except Exception as err:
pass
# Write to CSV:
if len(updated_products) > 0:
# if blank, then header: datetime, productid, priceguide labels
example_priceguide = updated_products[0]["product"]["priceGuide"]
priceguide_header_items = [k for k in example_priceguide.keys()]
header_list = [
"datetime",
"product id",
"name",
"expansion",
]
header_list.extend(priceguide_header_items)
data_array = []
for product in updated_products:
price_data_exploded = [
k for k in product["product"]["priceGuide"].values()
]
data_row = [
datetime.now().isoformat(" "),
product["product"]["idProduct"],
product["product"]["enName"],
product["product"]["expansion"]["enName"],
]
data_row.extend(price_data_exploded)
data_array.append(data_row)
self.write_to_csv(header_list, data_array)
def write_to_csv(self, header_list, data_array):
if len(data_array) > 0:
try:
with open(
self.config["csv_prices_filename"],
"a",
newline="",
encoding="utf-8",
) as csv_a, open(self.config["csv_prices_filename"], "r",) as csv_r:
csv_reader = csv.reader(csv_r)
row_count = sum(1 for row in csv_reader)
csv_writer = csv.writer(csv_a, delimiter=";")
if row_count == 0:
csv_writer.writerow(header_list)
csv_writer.writerows(data_array)
self.logger.debug(
f"write_to_csv:: {len(data_array)} lines written to {self.config['csv_prices_filename']}."
)
print(
f"Wrote {len(data_array)} price updates to {self.config['csv_prices_filename']}."
)
except Exception as err:
print(err.value)
def clean_purchased_from_wantslists(self, api):
self.report("clean wantslists")
print("This will show items in your wantslists you have already received.")
wantslists, wantslists_lists = self.get_wantslists_data(api)
try:
print("Gettings received orders from Cardmarket...")
received_orders = api.get_orders("buyer", "received", start=1)
except Exception as err:
print(err)
if wantslists_lists and received_orders:
purchased_product_ids = []
purchased_products = []
for (
order
) in received_orders: # TODO: foil in purchase removes non-foil in wants
purchased_product_ids.extend(
[i["idProduct"] for i in order.get("article")]
)
purchased_products.extend(
{
"id": i["idProduct"],
"foil": i.get("isFoil"),
"count": i["count"],
"date": order["state"]["dateReceived"],
}
for i in order.get("article")
)
purchased_products = sorted(
purchased_products, key=lambda t: t["date"], reverse=True
)
total_number_of_items = sum([len(x) for x in wantslists_lists.values()])
index = 0
print("Matching received purchases with wantslists...")
bar = progressbar.ProgressBar(max_value=total_number_of_items)
matches = []
for key, articles in wantslists_lists.items():
metaproducts_article_list = [
x for x in articles if x.get("type") == "metaproduct"
]
metaproducts_to_get = [
x["idMetaproduct"] for x in metaproducts_article_list
]
metaproduct_list = api.get_items_async(
"metaproducts", metaproducts_to_get
)
for article in articles:
a_type = article.get("type")
a_foil = article.get("isFoil") == True
product_matches = []
if a_type == "metaproduct":
try:
metaproduct = next(
x
for x in metaproduct_list
if x["metaproduct"]["idMetaproduct"]
== article["idMetaproduct"]
)
except StopIteration:
# Stock item not found in update batch, continuing
continue
metaproduct_product_ids = [
i["idProduct"] for i in metaproduct["product"]
]
product_matches = [
i
for i in purchased_products
if i["id"] in metaproduct_product_ids
and i["foil"] == a_foil
]
else:
a_product_id = article.get("idProduct")
product_matches = [
i
for i in purchased_products
if i["id"] == a_product_id and i["foil"] == a_foil
]
if product_matches:
match = {
"wantlist_id": key,
"wantlist_name": wantslists[key],
"date": product_matches[0]["date"],
"is_foil": a_foil,
"count": sum([x.get("count") for x in product_matches]),
}
if a_type == "product":
match.update(
{
"product_id": a_product_id,
"product_name": article.get("product").get(
"enName"
),
"expansion_name": article.get("product").get(
"expansionName"
),
}
)
elif a_type == "metaproduct":
match.update(
{
"metaproduct_id": article.get("idMetaproduct"),
"product_name": article.get("metaproduct").get(
"enName"
),
"expansion_name": article.get("metaproduct").get(
"expansionName"
),
}
)
matches.append(match)
index += 1
bar.update(index)
bar.finish()
if matches:
print(
tb.tabulate(
[
[
item["wantlist_name"],
item["count"],
"\u2713" if item["is_foil"] else "",
item["product_name"],
item["expansion_name"],
item["date"],
]
for item in matches
],
headers=[
"Wantlist",
"# bought",
"Foil",
"Name",
"Expansion",
"Date (last) received",
],
tablefmt="simple",
)
)
else:
print("No cleanup needed.")
else:
print("No wantslists or received orders.")
def show_account_info(self, api):
self.report("show account info")
pp = pprint.PrettyPrinter()
pp.pprint(api.get_account())
self.logger.debug("-> show_account_info: Done")
def clear_entire_stock(self, api):
self.report("clear entire stock")
stock_list = self.get_stock_as_array(api=self.api)
if PyMkmHelper.prompt_bool(
"Do you REALLY want to clear your entire stock ({} items)?".format(
len(stock_list)
)
):
# for article in stock_list:
# article['count'] = 0
delete_list = [
{"count": x["count"], "idArticle": x["idArticle"]} for x in stock_list
]
print("Clearing stock...")
api.delete_stock(delete_list)
self.logger.debug("-> clear_entire_stock: done")
print("Stock cleared.")
PyMkmHelper.clear_cache(self.config["local_cache_filename"], "stock")
else:
print("Aborted.")
def import_from_csv(self, api):
self.report("import from csv")
print(
"Note the required format: Card, Set name, Quantity, Foil, Language (with header row)."
)
problem_cards = []
with open(self.config["csv_import_filename"], newline="") as csvfile:
csv_reader = csvfile.readlines()
index = 0
card_rows = (sum(1 for row in csv_reader)) - 1
bar = progressbar.ProgressBar(max_value=card_rows)
self.logger.debug(f"-> import_from_csv: {card_rows} cards in csv file.")
csvfile.seek(0)
for row in csv_reader:
row = row.rstrip()
row_array = row.split(",")
if index > 0:
row_array = [x.strip('"') for x in row_array]
try:
(name, set_name, count, foil, language, *other) = row_array
except Exception as err:
problem_cards.append(row_array)
else:
foil = True if foil.lower() == "foil" else False
if not self.match_card_and_add_stock(
api, name, set_name, count, foil, language, *other
):
problem_cards.append(row_array)
bar.update(index)
index += 1
bar.finish()
if len(problem_cards) > 0:
try:
with open(
"failed_imports.csv", "w", newline="", encoding="utf-8"
) as csvfile:
csv_writer = csv.writer(csvfile)
csv_writer.writerows(problem_cards)
self.logger.debug(
f"import_from_csv:: {len(problem_cards)} failed imports."
)
print(
f"Wrote {len(problem_cards)} failed imports to failed_imports.csv"
)
print("Report failures as an issue in the pymkm GitHub repo, please!")
except Exception as err:
print(err.value)
else:
print("All cards added successfully")
# End of menu item functions ============================================
def get_wantslists_data(self, api, cached=False):
# Check for cached wantslists
local_wantslists_cache = None
PyMkmHelper.read_from_cache(self.config["local_cache_filename"], "wantslists")
local_wantslists_lists_cache = None
PyMkmHelper.read_from_cache(
self.config["local_cache_filename"], "wantslists_lists"
)
if local_wantslists_cache:
if cached or PyMkmHelper.prompt_bool(
f"Cached wantslists ({len(local_wantslists_cache)} items) found, use it? (if not, then it will be cleared)"
):
return local_wantslists_cache, local_wantslists_lists_cache
else:
PyMkmHelper.clear_cache(
self.config["local_cache_filename"], "wantslists"
)
PyMkmHelper.clear_cache(
self.config["local_cache_filename"], "wantslists_lists"
)
self.get_wantslists_data(api)
else: # no local cache
wantslists = []
wantslists_lists = {}
try:
print("Gettings wantslists from Cardmarket...")
wantslists = api.get_wantslists()
wantslists_lists = {
item["idWantslist"]: api.get_wantslist_items(item["idWantslist"])[
"item"
]
for item in wantslists
}
except Exception as err:
print(err)
PyMkmHelper.store_to_cache(
self.config["local_cache_filename"], "wantslists", wantslists
)
PyMkmHelper.store_to_cache(
self.config["local_cache_filename"],
"wantslists_lists",
wantslists_lists,
)
return wantslists, wantslists_lists
def match_card_and_add_stock(
self, api, name, set_name, count, foil, language, *other
):
if all(v != "" for v in [name, set_name, count]):
try:
possible_products = api.find_product(name, idGame="1") # ["product"]
except CardmarketError as err:
self.logger.error(err.mkm_msg())
print(err.mkm_msg())
except Exception as err:
return False
else:
if len(possible_products) == 0:
# no viable match
return False
else:
product_match = [
x
for x in possible_products
if x["categoryName"] == "Magic Single"
and self.card_equals(
x["enName"], x["expansionName"], name, set_name
)
]
if len(product_match) == 1:
language_id = (
1 if language == "" else api.languages.index(language) + 1
)
product = api.get_product(product_match[0]["idProduct"])
price = self.get_price_for_product(
product,
product_match[0]["rarity"],
self.config["csv_import_condition"],
foil,
False,
language_id=language_id,
api=self.api,
)
card = {
"idProduct": product_match[0]["idProduct"],
"idLanguage": language_id,
"count": count,
"price": str(price),
"condition": self.config["csv_import_condition"],
"isFoil": ("true" if foil else "false"),
}
api.add_stock([card])
return True
else:
# no single matching card
return False
else:
# incomplete data from card scanner
return False
def card_equals(self, db_cardname, db_setname, local_cardname, local_setname):
# TODO: add some sort of string distance like Levenshtein
filtered_db_cardname = db_cardname.replace(",", "")
filtered_db_cardname = filtered_db_cardname.replace("Æ", "Ae")
if db_setname != local_setname:
return False
else:
# filter for flip card / split card names
if filtered_db_cardname == local_cardname or (
"/" in filtered_db_cardname
and filtered_db_cardname.startswith(local_cardname)
):
return True
else:
return False
def select_from_list_of_wantslists(self, wantslists):
index = 1
for wantlist in wantslists:
print(f"{index}: {wantlist['name']} ({wantlist['game']['abbreviation']})")
index += 1
choice = int(input("Choose wantslist: "))
return wantslists[choice - 1]
def select_from_list_of_products(self, products):
index = 1
for product in products:
print(
"{}: {} [{}] {}".format(
index,
product["enName"],
product["expansionName"],
product["rarity"],
)
)
index += 1
choice = ""
while not isinstance(choice, int) or choice > len(products):
try:
choice = int(input("Choose card: "))
except ValueError as err:
print("Not a number.")
return products[choice - 1]
def select_from_list_of_articles(self, articles):
index = 1
for article in articles:
product = article["product"]
print(
f'{index}: {product["enName"]}[{product["expansion"]}], foil: {article["isFoil"]}, comment: {article["comments"]}'
)
index += 1
choice = int(input("Choose card: "))
return articles[choice - 1]
def show_competition_for_product(self, product_id, product_name, is_foil, api):
print("Selected product: {}".format(product_name))
table_data_local, table_data = self.get_competition(api, product_id, is_foil)
if table_data_local:
self.print_product_top_list("Local competition:", table_data_local, 4, 20)
if table_data:
self.print_product_top_list("Top 20 cheapest:", table_data, 4, 20)
else:
print("No prices found.")
def get_competition(self, api, product_id, is_foil):
# TODO: Add support for playsets
# TODO: Add support for card condition
self.account = api.get_account()["account"]
country_code = self.account["country"]
config = self.config
is_altered = config["search_filters"]["isAltered"]
is_signed = config["search_filters"]["isSigned"]
min_condition = config["search_filters"]["minCondition"]
user_type = config["search_filters"]["userType"]
id_language = config["search_filters"]["idLanguage"]
articles = api.get_articles(
product_id,
**{
"isFoil": str(is_foil).lower(),
"isAltered": is_altered,
"isSigned": is_signed,
"minCondition": min_condition,
"country": country_code,
"userType": user_type,
"idLanguage": id_language,
},
)
table_data = []
table_data_local = []
for article in articles:
username = article["seller"]["username"]
if article["seller"]["username"] == self.account["username"]:
username = "-> " + username
item = [
username,
article["seller"]["address"]["country"],
article["condition"],
article["language"]["languageName"],
article["count"],
article["price"],
]
if article["seller"]["address"]["country"] == country_code:
table_data_local.append(item)
table_data.append(item)
return table_data_local, table_data
def print_product_top_list(self, title_string, table_data, sort_column, rows):
print(70 * "-")
print("{} \n".format(title_string))
print(
tb.tabulate(
sorted(table_data, key=lambda x: x[sort_column], reverse=False)[:rows],
headers=[
"Username",
"Country",
"Condition",
"Language",
"Count",
"Price",
],
tablefmt="simple",
)
)
print(70 * "-")
print(
"Total average price: {}, Total median price: {}, Total # of articles: {}\n".format(
str(PyMkmHelper.calculate_average(table_data, 4, 5)),
str(PyMkmHelper.calculate_median(table_data, 4, 5)),
str(len(table_data)),
)
)
def calculate_new_prices_for_stock(
self,
stock_list,
undercut_local_market,
partial_stock_update_size,
already_checked_articles,
api,
):
filtered_stock_list = self.__filter(stock_list)
sticky_count = len(stock_list) - len(filtered_stock_list)
# articles_in_shoppingcarts = api.get_articles_in_shoppingcarts()
if already_checked_articles:
filtered_stock_list = [
x
for x in filtered_stock_list
if x["idArticle"] not in already_checked_articles
]
if len(filtered_stock_list) == 0:
PyMkmHelper.clear_cache(
self.config["local_cache_filename"], "partial_updated"
)
print(
f"Entire stock updated in partial updates. Partial update data cleared."
)
return [], []
if partial_stock_update_size:
filtered_stock_list = filtered_stock_list[:partial_stock_update_size]
result_json = []
checked_articles = []
total_price = 0
index = 0
bar = progressbar.ProgressBar(max_value=len(filtered_stock_list))
bar.update(index)
products_to_get = [x["idProduct"] for x in filtered_stock_list]
product_list = api.get_items_async("products", products_to_get)
product_list = [x for x in product_list if x]
for article in filtered_stock_list:
try:
product = next(
x
for x in product_list
if x["product"]["idProduct"] == article["idProduct"]
)
except StopIteration:
# Stock item not found in update batch, continuing
self.logger.error(
f"aid {article['idArticle']} pid {article['idProduct']} - {article['product']['enName']} {article['product']['expansion']} failed to find a product"
)
continue
checked_articles.append(article.get("idArticle"))
updated_article = self.update_price_for_article(
article, product, undercut_local_market, api=self.api
)
if updated_article:
result_json.append(updated_article)
total_price += updated_article.get("price")
else:
total_price += article.get("price")
index += 1
bar.update(index)
bar.finish()
print("Value in this update: {}".format(str(round(total_price, 2))))
if len(stock_list) != len(filtered_stock_list):
print(f"Note: {sticky_count} items filtered out because of sticky prices.")
return result_json, checked_articles
def update_price_for_article(
self, article, product, undercut_local_market=False, api=None
):
new_price = self.get_price_for_product(
product,
article["product"].get("rarity"),
article.get("condition"),
article.get("isFoil", False),
article.get("isPlayset", False),
language_id=article["language"]["idLanguage"],
undercut_local_market=undercut_local_market,
api=self.api,
)
if new_price:
price_diff = new_price - article["price"]
if price_diff != 0:
return {
"name": article["product"]["enName"],
"isFoil": article.get("isFoil", False),
"isPlayset": article.get("isPlayset", False),
"language": article["language"]["languageName"],
"condition": article["condition"],
"old_price": article["price"],
"price": new_price,
"price_diff": price_diff,
"idArticle": article["idArticle"],
"count": article["count"],
}
def get_rounding_limit_for_rarity(self, rarity, product_id):
rounding_limit = float(self.config["price_limit_by_rarity"]["default"])
try:
rounding_limit = float(self.config["price_limit_by_rarity"][rarity.lower()])
except KeyError as err:
print(
f"ERROR: Unknown rarity '{rarity}' (pid: {product_id}). Using default rounding."
)
return rounding_limit
def get_discount_for_condition(self, condition):
try:
discount = float(self.config["discount_by_condition"][condition])
except KeyError as err:
print(f"ERROR: Unknown condition '{condition}'.")
raise err
else:
return discount
def get_price_for_product(
self,
product,
rarity,
condition,
is_foil,
is_playset,
language_id=1,
undercut_local_market=False,
api=None,
):
rounding_limit = self.get_rounding_limit_for_rarity(
rarity, product["product"]["idProduct"]
)
if not is_foil:
trend_price = product["product"]["priceGuide"]["TREND"]
else:
trend_price = product["product"]["priceGuide"]["TRENDFOIL"]
# Set competitive price for region
if undercut_local_market:
table_data_local, table_data = self.get_competition(
api, product["product"]["idProduct"], is_foil
)
if len(table_data_local) > 0:
# Undercut if there is local competition
lowest_in_country = PyMkmHelper.get_lowest_price_from_table(
table_data_local, 4
)
new_price = max(
rounding_limit,
min(trend_price, lowest_in_country - rounding_limit),
)
else:
# No competition in our country, set price a bit higher.
new_price = trend_price * 1.2
else: # don't try to undercut local market
new_price = trend_price
if new_price is None:
raise ValueError("No price found!")
else:
if is_playset:
new_price = 4 * new_price
old_price = new_price
# Apply condition discount
if condition:
new_price = new_price * self.get_discount_for_condition(condition)
# Round
new_price = PyMkmHelper.round_up_to_multiple_of_lower_limit(
rounding_limit, new_price
)
return new_price
def display_price_changes_table(self, changes_json):
num_items = self.config["show_num_best_worst_items"]
print("\nBest diffs:\n")
sorted_best = sorted(changes_json, key=lambda x: x["price_diff"], reverse=True)[
:num_items
]
self.draw_price_changes_table(i for i in sorted_best if i["price_diff"] > 0)
print("\nWorst diffs:\n")
sorted_worst = sorted(changes_json, key=lambda x: x["price_diff"])[:num_items]
self.draw_price_changes_table(i for i in sorted_worst if i["price_diff"] < 0)
print(
"\nTotal price difference: {}.".format( # TODO: fix bug where summary is wrong
str(
round(
sum(item["price_diff"] * item["count"] for item in sorted_best),
2,
)
)
)
)
def draw_price_changes_table(self, sorted_best):
print(
tb.tabulate(
[
[
item["count"],
item["name"],
"\u2713" if item["isFoil"] else "",
"\u2713" if item["isPlayset"] else "",
item["condition"],
item["language"],
item["old_price"],
item["price"],
item["price_diff"],
]
for item in sorted_best
],
headers=[
"Count",
"Name",
"Foil",
"Playset",
"Condition",
"Language",
"Old price",
"New price",
"Diff",
],
tablefmt="simple",
)
)
def get_stock_as_array(self, api, cli_called=False, cached=None):
# Check for cached stock
local_stock_cache = None
local_stock_cache = PyMkmHelper.read_from_cache(
self.config["local_cache_filename"], "stock"
)
if local_stock_cache:
if not cli_called:
if PyMkmHelper.prompt_bool(
f"Cached stock ({len(local_stock_cache)} items) found, use it? Note that prices may be outdated."
):
return local_stock_cache
else:
if cached:
return local_stock_cache
PyMkmHelper.clear_cache(self.config["local_cache_filename"], "stock")
print(
"Getting your stock from Cardmarket (the API can be slow for large stock)..."
)
try:
d = api.get_stock()
except CardmarketError as err:
self.logger.error(err.mkm_msg())
print(err.mkm_msg())
sys.exit(0)
# except Exception as err:
# msg = f"No response from API. Error: {err}"
# print(msg)
# self.logger.error(msg)
# sys.exit(0)
else:
keys = [
"idArticle",
"idProduct",
"product",
"count",
"comments",
"price",
"condition",
"isFoil",
"isPlayset",
"isSigned",
"language",
]
stock_list = [
{x: y for x, y in article.items() if x in keys} for article in d
]
print("Stock fetched.")
PyMkmHelper.store_to_cache(
self.config["local_cache_filename"], "stock", stock_list
)
return stock_list
| 38.319511 | 168 | 0.482064 | 55,886 | 0.991361 | 0 | 0 | 0 | 0 | 0 | 0 | 11,795 | 0.209231 |
c8829aec3d5b9877236b2115916c5ca2a14ab73b | 333 | py | Python | Datasets/Terrain/us_ned_physio_diversity.py | monocilindro/qgis-earthengine-examples | 82aea8926d34ed3f4ad4a4a345ddbd225819d28f | [
"MIT"
]
| 646 | 2019-12-03T06:09:03.000Z | 2022-03-28T03:37:08.000Z | Datasets/Terrain/us_ned_physio_diversity.py | csaybar/qgis-earthengine-examples | ba8942683834d2847ff3246bdd1859b36e50fe44 | [
"MIT"
]
| 10 | 2019-12-30T03:42:44.000Z | 2021-05-22T07:34:07.000Z | Datasets/Terrain/us_ned_physio_diversity.py | csaybar/qgis-earthengine-examples | ba8942683834d2847ff3246bdd1859b36e50fe44 | [
"MIT"
]
| 219 | 2019-12-06T02:20:53.000Z | 2022-03-30T15:14:27.000Z | import ee
from ee_plugin import Map
dataset = ee.Image('CSP/ERGo/1_0/US/physioDiversity')
physiographicDiversity = dataset.select('b1')
physiographicDiversityVis = {
'min': 0.0,
'max': 1.0,
}
Map.setCenter(-94.625, 39.825, 7)
Map.addLayer(
physiographicDiversity, physiographicDiversityVis,
'Physiographic Diversity')
| 23.785714 | 54 | 0.738739 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.216216 |
c88407b58490b10ee7b7b9dec303ca0721d6f4c4 | 281 | py | Python | timesheet/forms.py | pincoin/windmill | fe373e5ca27c775a926e9a5538931f9394196d90 | [
"MIT"
]
| null | null | null | timesheet/forms.py | pincoin/windmill | fe373e5ca27c775a926e9a5538931f9394196d90 | [
"MIT"
]
| 7 | 2020-02-12T01:22:46.000Z | 2021-06-10T18:43:01.000Z | timesheet/forms.py | pincoin/windmill | fe373e5ca27c775a926e9a5538931f9394196d90 | [
"MIT"
]
| null | null | null | from django import forms
from . import models
class PunchLogForm(forms.ModelForm):
latitude = forms.DecimalField(widget=forms.HiddenInput())
longitude = forms.DecimalField(widget=forms.HiddenInput())
class Meta:
model = models.PunchLog
fields = ()
| 20.071429 | 62 | 0.701068 | 231 | 0.822064 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c8845f1c14219b145ec8b7fa1bba57f5b2418dfb | 497 | py | Python | bin/base64util.py | SnowleopardXI/stash | a14f016e5b568095af8d1e78addedc562e3cde70 | [
"MIT"
]
| null | null | null | bin/base64util.py | SnowleopardXI/stash | a14f016e5b568095af8d1e78addedc562e3cde70 | [
"MIT"
]
| null | null | null | bin/base64util.py | SnowleopardXI/stash | a14f016e5b568095af8d1e78addedc562e3cde70 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
import base64
print('Choose your choice:')
n='''
1:Encode string to base64
2:Decode base64 to string
'''
c=int(eval(input(n))) #定义菜单变量
if c == 1: #进入菜单1的判断
print('Type string to be encoded:')
inp=input()
out = str(base64.encodebytes(inp.encode("utf-8")), "utf-8")
print(out) # 去掉编码结果前的 b
if c == 2:
print('Type string to be decoded:')
inp2=bytes(input(),('utf-8'))
dec = base64.decodebytes(inp2)
print(dec.decode())
| 24.85 | 63 | 0.593561 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 274 | 0.508349 |
c884c97e5f0b08128955897f09554f008fe34589 | 9,781 | py | Python | Code/nebulae/lib/sc/sc.py | CarlColglazier/QB_Nebulae_V2 | 3326fa1c672ba0845b28cb55847bea0c9b8e9a05 | [
"MIT"
]
| 8 | 2020-05-14T20:18:29.000Z | 2021-08-08T15:18:28.000Z | Code/nebulae/lib/sc/sc.py | alex-thibodeau/QB_Nebulae_V2 | 34bcf341ea8eddaa9f9ce2e7c2d2438e00e50f54 | [
"MIT"
]
| null | null | null | Code/nebulae/lib/sc/sc.py | alex-thibodeau/QB_Nebulae_V2 | 34bcf341ea8eddaa9f9ce2e7c2d2438e00e50f54 | [
"MIT"
]
| null | null | null | import time, os, sys
import scsynth, scosc
server = 0 # reference to app's sc server process
sndLoader = 0
synthon = 0 # did we start the scsythn process?
##workingpath = os.getcwd() # must be set to the right path in case something special is need
sndpath = os.path.join( os.getcwd() , 'sounds' )
synthdefpath = os.path.join( os.getcwd() , 'synthdefs' )
def start( exedir='', port=57110, inputs=2, outputs=2, samplerate=44100, verbose=0,
spew=0, startscsynth=0 ) :
""" starts scsynth process. interfaces scsynth module.
Inits the OSC communication and classes that handle it
exe='', exedir='', port=57110, inputs=2, outputs=2, samplerate=44100, verbose=0, spew=0
"""
global server, sndLoader # because they are init in this func
exe = 'scsynth'
# if none is set take workingdir as exedir on mac and windows
if sys.platform == 'win32' :
exe += '.exe' # add extension
if exedir == '' : exedir = 'C:\Program Files\SuperCollider'
elif os.uname()[0] == 'Linux' :
if exedir == '' : exedir = '/usr/bin'
if not os.path.isfile(os.path.join(exedir, exe)): # in case it is in /usr/bin/local
print 'Error : /usr/bin/scsynth does not exist. Trying to find scsnth in /usr/local/bin...'
exedir = '/usr/local/bin'
elif sys.platform == 'darwin':
if exedir == '' : exedir = '/Applications/SuperCollider'
print "trying to run scsynth from :", exedir
server = scsynth.start(
#exe = exe,
#exedir = exedir,
port = port,
#inputs = inputs,
#outputs = outputs,
#samplerate = samplerate,
verbose = verbose,
spew = spew,
)
if startscsynth : # starts scsynth server process
global synthon
synthon = 1
server.instance = scsynth.startServer(
exe = exe,
exedir = exedir,
port = port,
inputs = inputs,
outputs = outputs,
samplerate = samplerate,
verbose = verbose,
#spew = spew,
)
time.sleep(1) # wait to start up
sndLoader = scsynth.Loader(server) # manages sound files
def quit() :
if synthon : # it was started
try :
server.quit()
server.ensure_dead()
except :
print 'server was not running'
def register(address, fun) :
""" bind OSC address to function callback
"""
server.listener.register( address, fun )
# sound buffer related utilities.
def loadSnd(filename, wait=False) :
""" load sound buffer from current sound folder (sc.sndpath) and return buffer's id
sends back /b_info labeled OSC message. The arguments to /b_info are as
follows:
int - buffer number
int - number of frames
int - number of channels
"""
abspath = os.path.join( sndpath, filename )
return loadSndAbs(abspath, wait)
def unloadSnd(buf_id) :
""" unload sound buffer from server memory by buffer id
"""
sndLoader.unload( buf_id, wait=False )
def loadSndAbs(path, wait=False) :
""" same as loadSnd but takes absolute path to snd file
"""
if os.path.isfile(path) :
return sndLoader.load( path, wait, b_query=True )
else :
print "file %s does NOT exist" % path
return 0
# classes
class Synth(object) :
""" wraps supercollider synthdefs
/s_new args : stringdefname, synth ID (nodeID), addaction, addtargetID, args:[controlindexorname, control value]
Create a new synth from a synth definition, give it an ID, and add it to the tree of
nodes. There are four ways to add the node to the tree as determined by the add action
argument which is defined as follows:
add actions:
0 - add the new node to the the head of the group specified by the add target ID.
1 - add the new node to the the tail of the group specified by the add target ID.
2 - add the new node just before the node specified by the add target ID.
3 - add the new node just after the node specified by the add target ID.
4 - the new node replaces the node specified by the add target ID. The target node is
freed.
Controls may be set when creating the synth. The control arguments are the same as
for the n_set command.
If you send /s_new with a synth ID of -1, then the server will generate an ID for you.
The server reserves all negative IDs. Since you don't know what the ID is, you cannot
talk to this node directly later. So this is useful for nodes that are of finite duration
and that get the control information they need from arguments and buses or messages
directed to their group. In addition no notifications are sent when there are changes of
state for this node, such as /go, /end, /on, /off.
If you use a node ID of -1 for any other command, such as /n_map, then it refers to
the most recently created node by /s_new (auto generated ID or not). This is how you
can map the controls of a node with an auto generated ID. In a multi-client situation,
the only way you can be sure what node -1 refers to is to put the messages in a bundle.
"""
loadedSynthdefs = []
def __init__(self, stringdefname='', nodeID=-1, addAction=1, addTargetID=0) : #, args=[] ) :
if nodeID == -1 :
server.synthpool.check()
self.nodeID = server.synthpool.get()
print self.nodeID, '< created node id'
else :
self.nodeID = nodeID
self.defpath = os.path.join( synthdefpath, stringdefname+'.scsyndef' ) # the sc synth abs path
if Synth.loadedSynthdefs.count(self.defpath) == 0 : # already loaded?
if os.path.isfile( self.defpath ) :
server.sendMsg('/d_load', self.defpath)
time.sleep(0.5) #wait till loaded
self.position = len(Synth.loadedSynthdefs)
Synth.loadedSynthdefs.append(self.defpath)
else :
print 'error : synthdef %s file does NOT exist' % self.defpath
server.sendMsg('/s_new', stringdefname, self.nodeID, addAction, addTargetID)
def __setattr__(self, item, value):
""" set a property and send it to the scsynth automatically via OSC
"""
object.__setattr__(self, item, value)
server.sendMsg('/n_set', self.nodeID, item, value)
## def __getattr__(self, item, value):
## """ set a property and send it to the scsynth automatically via OSC
## """
## def dothis(msg) :
## print 'play head at ', msg[3]
##
## server.listener.register( '/tr', doThis ) # call dothis function when a /tr message arrives
def free(self) :
## if Synth.loadedSynthdefs.count(self.defpath) : # if there
## i = Synth.loadedSynthdefs.index(self.defpath) # only me
## Synth.loadedSynthdefs.pop(i)
if self.position :
Synth.loadedSynthdefs.pop(self.position)
server.sendMsg("/n_free", self.nodeID)
def run(self, b=1) :
""" If the run flag set to zero then the node will not be executed.
If the run flag is set back to one, then it will be executed.
Using this method to start and stop nodes can cause a click if the node is not silent at
the time run flag is toggled.
"""
server.sendMsg('/n_run', self.nodeID, b)
class Group(object) :
""" Create a new group and add it to the tree of nodes.
There are four ways to add the group to the tree as determined by the add action argu-
ment which is defined as follows (the same as for "/s_new"):
add actions:
0 - add the new group to the the head of the group specified by the add target ID.
1 - add the new group to the the tail of the group specified by the add target ID.
2 - add the new group just before the node specified by the add target ID.
3 - add the new group just after the node specified by the add target ID.
4 - the new node replaces the node specified by the add target ID. The target node is
freed.
Multiple groups may be created in one command by adding arguments.
"""
def __init__(self, groupID=-1, addAction=1, addTargetID=0) :
if groupID == -1 :
server.synthpool.check()
self.groupID = server.synthpool.get()
else :
self.groupID = groupID
server.sendMsg('/g_new', self.groupID, addAction, addTargetID)
def __setattr__(self, item, value):
object.__setattr__(self, item, value)
server.sendMsg('/n_set', self.groupID, item, value)
def addToHead(self, node) :
""" add node to head of group
"""
server.sendMsg('/g_head', self.groupdID, node.nodeID)
def addToTail(self, node) :
""" add node to tail of group
"""
server.sendMsg('/g_tail', self.groupdID, node.nodeID)
def freeAll(self) :
""" Frees all nodes in the group. A list of groups may be specified.
"""
server.sendMsg('/g_freeAll', self.groupID )
def deepFree(self) :
""" traverses all groups below this group and frees all the synths. Sub-groups are not freed.
"""
server.sendMsg('/g_deepFree ', self.groupID)
| 37.190114 | 120 | 0.600552 | 6,235 | 0.63746 | 0 | 0 | 0 | 0 | 0 | 0 | 5,644 | 0.577037 |
c884d28504ed798c203413f680ec73fe70726669 | 357 | py | Python | test/test_api/test_routes/test_about.py | MRmlik12/biblioteczka | 3fcde24cd42d0155c3a20585d20ac0d0a7989101 | [
"MIT"
]
| null | null | null | test/test_api/test_routes/test_about.py | MRmlik12/biblioteczka | 3fcde24cd42d0155c3a20585d20ac0d0a7989101 | [
"MIT"
]
| 3 | 2021-07-29T08:34:09.000Z | 2021-07-29T10:12:34.000Z | test/test_api/test_routes/test_about.py | MRmlik12/catana | 3fcde24cd42d0155c3a20585d20ac0d0a7989101 | [
"MIT"
]
| null | null | null | import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from starlette.status import HTTP_200_OK
pytestmark = pytest.mark.asyncio
async def test_about_route_if_status_code_is_ok(app: FastAPI, client: AsyncClient):
response = await client.request("GET", app.url_path_for("index_router"))
assert response.status_code == HTTP_200_OK
| 29.75 | 83 | 0.809524 | 0 | 0 | 0 | 0 | 0 | 0 | 207 | 0.579832 | 19 | 0.053221 |
c88551ac723dd08106aa9434592b74d5d60bf757 | 2,614 | py | Python | linefinder/job_scripts/linefinder_sightlines.py | zhafen/linefinder | 0f4f36a83246f1b833d0c281e635d86be3d1eb95 | [
"MIT"
]
| null | null | null | linefinder/job_scripts/linefinder_sightlines.py | zhafen/linefinder | 0f4f36a83246f1b833d0c281e635d86be3d1eb95 | [
"MIT"
]
| 12 | 2018-08-26T14:10:18.000Z | 2021-04-15T21:48:58.000Z | linefinder/job_scripts/linefinder_sightlines.py | zhafen/linefinder | 0f4f36a83246f1b833d0c281e635d86be3d1eb95 | [
"MIT"
]
| 1 | 2021-05-19T16:45:21.000Z | 2021-05-19T16:45:21.000Z | import linefinder.linefinder as linefinder
import linefinder.config as linefinder_config
import linefinder.utils.file_management as file_management
########################################################################
sim_name = 'm12i'
'''The simulation to run tracking on.'''
tag = '{}_sightline'.format( sim_name )
'''Identifying tag used as part of the filenames.
E.g. the IDs file will have the format `ids_{}.hdf5.format( tag )`.
'''
# Tracking Parameters
tracker_kwargs = {
# What particle types to track. Typically just stars and gas.
'p_types': [ 0, 4,],
# What snapshots to compile the particle tracks for.
'snum_start': 1,
'snum_end': 600,
'snum_step': 1,
}
file_manager = file_management.FileManager()
sampler_kwargs = {
'ignore_duplicates': True,
'p_types': [ 0, 4 ],
'snapshot_kwargs': {
'sdir': file_manager.get_sim_dir( sim_name ),
'halo_data_dir': file_manager.get_halo_dir( sim_name ),
'main_halo_id': linefinder_config.MAIN_MT_HALO_ID[sim_name],
'ahf_index': 600,
'length_scale_used': 'R_vir',
}
}
visualization_kwargs = {
'install_firefly': True,
'export_to_firefly_kwargs': {
'firefly_dir': '/work/03057/zhafen/firefly_repos/sightline',
'classifications': [
'is_in_CGM',
'is_CGM_IGM_accretion',
'is_CGM_wind',
'is_CGM_satellite_wind',
'is_CGM_satellite_ISM',
],
'classification_ui_labels': [ 'All', 'IGMAcc', 'Wind', 'SatWind', 'Sat' ],
'tracked_properties': [
'logT',
'logZ',
'logDen',
'vr_div_v_cool',
'logvr_div_v_cool_offset',
],
'tracked_filter_flags': [ True, ] * 5,
'tracked_colormap_flags': [ True, ] * 5,
'snum': 465,
},
}
# This is the actual function that runs linefinder.
# In general you don't need to touch this function but if you want to,
# for example, turn off one of the steps because you're rerunning and you
# already did that step, you can do so below.
linefinder.run_linefinder_jug(
sim_name = sim_name,
tag = tag,
galdef = '_galdefv3',
# The galdef is a set of parameters used for the galaxy linking and
# classification steps. Don't touch this unless you know what you're doing.
tracker_kwargs = tracker_kwargs,
sampler_kwargs = sampler_kwargs,
visualization_kwargs = visualization_kwargs,
run_id_selecting = False,
run_id_sampling = False,
run_tracking = False,
run_galaxy_linking = False,
run_classifying = False,
)
| 30.045977 | 82 | 0.630451 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,337 | 0.511477 |
c8864bea2e2f25d967c38986aef9fb5517d5143b | 285 | py | Python | SwordToOffer/SwordToOffer-PythonSolution/47_Sum_Solution.py | dingchaofan/AlgorithmSolution | 46198e3f0dbda867e7b75f0d0e52be5f0181238a | [
"MIT"
]
| 1 | 2020-06-23T02:18:39.000Z | 2020-06-23T02:18:39.000Z | SwordToOffer/SwordToOffer-PythonSolution/47_Sum_Solution.py | dingchaofan/AlgorithmSolution | 46198e3f0dbda867e7b75f0d0e52be5f0181238a | [
"MIT"
]
| null | null | null | SwordToOffer/SwordToOffer-PythonSolution/47_Sum_Solution.py | dingchaofan/AlgorithmSolution | 46198e3f0dbda867e7b75f0d0e52be5f0181238a | [
"MIT"
]
| 1 | 2021-01-11T12:07:03.000Z | 2021-01-11T12:07:03.000Z | # 47. 求1+2+3+...+n
# 求1+2+3+...+n,要求不能使用乘除法、for、while、if、else、switch、case等关键字及条件判断语句(A?B:C)。
# -*- coding:utf-8 -*-
class Solution:
def Sum_Solution(self, n):
# write code here
res = n
if(res):
res += self.Sum_Solution(n-1)
return res | 21.923077 | 73 | 0.540351 | 166 | 0.475645 | 0 | 0 | 0 | 0 | 0 | 0 | 194 | 0.555874 |
c8870211f55a315e2890fcb0bc548ae67550546d | 137 | py | Python | apps/users/urls.py | akundev/akundotdev | 98b47925b948c920789c5acebad86944162bf53a | [
"Apache-2.0"
]
| null | null | null | apps/users/urls.py | akundev/akundotdev | 98b47925b948c920789c5acebad86944162bf53a | [
"Apache-2.0"
]
| 3 | 2021-03-30T14:21:08.000Z | 2021-07-07T03:04:26.000Z | apps/users/urls.py | almazkun/akundotdev | 98b47925b948c920789c5acebad86944162bf53a | [
"Apache-2.0"
]
| null | null | null | from django.urls import path
from .views import AboutTemplateView
urlpatterns = [path("", AboutTemplateView.as_view(), name="about")]
| 19.571429 | 67 | 0.759124 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.065693 |
c8879bded50ae8fbfe4e76e5d099e8ada2d7784b | 2,969 | py | Python | fedireads/broadcast.py | thricedotted/fedireads | a1fbba1ba31e569489378176b0894a0a8907c14c | [
"CC0-1.0"
]
| null | null | null | fedireads/broadcast.py | thricedotted/fedireads | a1fbba1ba31e569489378176b0894a0a8907c14c | [
"CC0-1.0"
]
| null | null | null | fedireads/broadcast.py | thricedotted/fedireads | a1fbba1ba31e569489378176b0894a0a8907c14c | [
"CC0-1.0"
]
| 1 | 2021-01-30T22:38:20.000Z | 2021-01-30T22:38:20.000Z | ''' send out activitypub messages '''
from base64 import b64encode
from Crypto.PublicKey import RSA
from Crypto.Signature import pkcs1_15
from Crypto.Hash import SHA256
from datetime import datetime
import json
import requests
from fedireads import incoming
from fedireads.settings import DOMAIN
def get_recipients(user, post_privacy, direct_recipients=None):
''' deduplicated list of recipient inboxes '''
recipients = direct_recipients or []
if post_privacy == 'direct':
# all we care about is direct_recipients, not followers
return recipients
# load all the followers of the user who is sending the message
followers = user.followers.all()
if post_privacy == 'public':
# post to public shared inboxes
shared_inboxes = set(u.shared_inbox for u in followers)
recipients += list(shared_inboxes)
# TODO: not every user has a shared inbox
# TODO: direct to anyone who's mentioned
if post_privacy == 'followers':
# don't send it to the shared inboxes
inboxes = set(u.inbox for u in followers)
recipients += list(inboxes)
return recipients
def broadcast(sender, activity, recipients):
''' send out an event '''
errors = []
for recipient in recipients:
try:
sign_and_send(sender, activity, recipient)
except requests.exceptions.HTTPError as e:
# TODO: maybe keep track of users who cause errors
errors.append({
'error': e,
'recipient': recipient,
'activity': activity,
})
return errors
def sign_and_send(sender, activity, destination):
''' crpyto whatever and http junk '''
# TODO: handle http[s] with regex
inbox_fragment = sender.inbox.replace('https://%s' % DOMAIN, '')
now = datetime.utcnow().isoformat()
signature_headers = [
'(request-target): post %s' % inbox_fragment,
'host: https://%s' % DOMAIN,
'date: %s' % now
]
message_to_sign = '\n'.join(signature_headers)
# TODO: raise an error if the user doesn't have a private key
signer = pkcs1_15.new(RSA.import_key(sender.private_key))
signed_message = signer.sign(SHA256.new(message_to_sign.encode('utf8')))
signature = {
'keyId': '%s#main-key' % sender.actor,
'algorithm': 'rsa-sha256',
'headers': '(request-target) host date',
'signature': b64encode(signed_message).decode('utf8'),
}
signature = ','.join('%s="%s"' % (k, v) for (k, v) in signature.items())
response = requests.post(
destination,
data=json.dumps(activity),
headers={
'Date': now,
'Signature': signature,
'Host': 'https://%s' % DOMAIN,
'Content-Type': 'application/activity+json; charset=utf-8',
},
)
if not response.ok:
response.raise_for_status()
incoming.handle_response(response)
| 32.988889 | 76 | 0.630852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 890 | 0.299764 |
c887c627a5de312187bb987f26d6bea4c3b72084 | 733 | py | Python | polls/views.py | druss16/danslist | ad06f8fa8df5936db7a60e9820f0c89a77f8879a | [
"MIT"
]
| null | null | null | polls/views.py | druss16/danslist | ad06f8fa8df5936db7a60e9820f0c89a77f8879a | [
"MIT"
]
| null | null | null | polls/views.py | druss16/danslist | ad06f8fa8df5936db7a60e9820f0c89a77f8879a | [
"MIT"
]
| null | null | null | from django.shortcuts import render
from django.http import HttpResponse
from django.template import RequestContext, loader
from .models import Question
# Create your views here.
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {'latest_question_list': latest_question_list}
return render(request, 'polls/index.html', context)
def detail(request, question_id):
return HttpResponse("You're looking at question %s." % question_id)
def results(request, question_id):
response = "You're looking at the results of the question %s."
return HttpResponse(response % question_id)
def vote(request, question_id):
return HttpResponse("You're voting on question %s." % question_id)
| 29.32 | 68 | 0.777626 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.259209 |
c889096998408750f88d5b4c179ee06539614ee4 | 48,562 | py | Python | hawc_hal/HAL.py | torresramiro350/hawc_hal | 048536df22bdfa3ace2925e60d802beb76775849 | [
"BSD-3-Clause"
]
| null | null | null | hawc_hal/HAL.py | torresramiro350/hawc_hal | 048536df22bdfa3ace2925e60d802beb76775849 | [
"BSD-3-Clause"
]
| null | null | null | hawc_hal/HAL.py | torresramiro350/hawc_hal | 048536df22bdfa3ace2925e60d802beb76775849 | [
"BSD-3-Clause"
]
| null | null | null | from __future__ import division
from builtins import str
from builtins import range
from astropy.utils.misc import isiterable
from past.utils import old_div
import copy
import collections
import numpy as np
import healpy as hp
import astropy.units as u
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.stats import poisson
from astropy.convolution import Gaussian2DKernel
from astropy.convolution import convolve_fft as convolve
from astropy.coordinates import Angle
from threeML.plugin_prototype import PluginPrototype
from threeML.utils.statistics.gammaln import logfactorial
from threeML.parallel import parallel_client
from threeML.io.logging import setup_logger
log = setup_logger(__name__)
log.propagate = False
from tqdm.auto import tqdm
from astromodels import Parameter
from hawc_hal.maptree import map_tree_factory
from hawc_hal.maptree.map_tree import MapTree
from hawc_hal.maptree.data_analysis_bin import DataAnalysisBin
from hawc_hal.response import hawc_response_factory
from hawc_hal.convolved_source import ConvolvedPointSource, \
ConvolvedExtendedSource3D, ConvolvedExtendedSource2D, ConvolvedSourcesContainer
from hawc_hal.healpix_handling import FlatSkyToHealpixTransform
from hawc_hal.healpix_handling import SparseHealpix
from hawc_hal.healpix_handling import get_gnomonic_projection
from hawc_hal.psf_fast import PSFConvolutor
from hawc_hal.log_likelihood import log_likelihood
from hawc_hal.util import ra_to_longitude
class HAL(PluginPrototype):
"""
The HAWC Accelerated Likelihood plugin for 3ML.
:param name: name for the plugin
:param maptree: Map Tree (either ROOT or hdf5 format)
:param response: Response of HAWC (either ROOT or hd5 format)
:param roi: a ROI instance describing the Region Of Interest
:param flat_sky_pixels_size: size of the pixel for the flat sky projection (Hammer Aitoff)
"""
def __init__(self, name, maptree, response_file, roi, flat_sky_pixels_size=0.17):
# Store ROI
self._roi = roi
# Set up the flat-sky projection
self.flat_sky_pixels_size=flat_sky_pixels_size
self._flat_sky_projection = self._roi.get_flat_sky_projection(self.flat_sky_pixels_size)
# Read map tree (data)
self._maptree = map_tree_factory(maptree, roi=self._roi)
# Read detector response_file
self._response = hawc_response_factory(response_file)
# Use a renormalization of the background as nuisance parameter
# NOTE: it is fixed to 1.0 unless the user explicitly sets it free (experimental)
self._nuisance_parameters = collections.OrderedDict()
#self._nuisance_parameters['%s_bkg_renorm' % name] = Parameter('%s_bkg_renorm' % name, 1.0,
self._nuisance_parameters[f'{name}_bkg_renorm'] = Parameter(f'{name}_bkg_renorm', 1.0,
min_value=0.5, max_value=1.5,
delta=0.01,
desc="Renormalization for background map",
free=False,
is_normalization=False)
# Instance parent class
super(HAL, self).__init__(name, self._nuisance_parameters)
self._likelihood_model = None
# These lists will contain the maps for the point sources
self._convolved_point_sources = ConvolvedSourcesContainer()
# and this one for extended sources
self._convolved_ext_sources = ConvolvedSourcesContainer()
# All energy/nHit bins are loaded in memory
self._all_planes = list(self._maptree.analysis_bins_labels)
# The active planes list always contains the list of *indexes* of the active planes
self._active_planes = None
# Set up the transformations from the flat-sky projection to Healpix, as well as the list of active pixels
# (one for each energy/nHit bin). We make a separate transformation because different energy bins might have
# different nsides
self._active_pixels = collections.OrderedDict()
self._flat_sky_to_healpix_transform = collections.OrderedDict()
for bin_id in self._maptree:
this_maptree = self._maptree[bin_id]
this_nside = this_maptree.nside
this_active_pixels = roi.active_pixels(this_nside)
this_flat_sky_to_hpx_transform = FlatSkyToHealpixTransform(self._flat_sky_projection.wcs,
'icrs',
this_nside,
this_active_pixels,
(self._flat_sky_projection.npix_width,
self._flat_sky_projection.npix_height),
order='bilinear')
self._active_pixels[bin_id] = this_active_pixels
self._flat_sky_to_healpix_transform[bin_id] = this_flat_sky_to_hpx_transform
# This will contain a list of PSF convolutors for extended sources, if there is any in the model
self._psf_convolutors = None
# Pre-compute the log-factorial factor in the likelihood, so we do not keep to computing it over and over
# again.
self._log_factorials = collections.OrderedDict()
# We also apply a bias so that the numerical value of the log-likelihood stays small. This helps when
# fitting with algorithms like MINUIT because the convergence criterium involves the difference between
# two likelihood values, which would be affected by numerical precision errors if the two values are
# too large
self._saturated_model_like_per_maptree = collections.OrderedDict()
# The actual computation is in a method so we can recall it on clone (see the get_simulated_dataset method)
self._compute_likelihood_biases()
# This will save a clone of self for simulations
self._clone = None
# Integration method for the PSF (see psf_integration_method)
self._psf_integration_method = "exact"
@property
def psf_integration_method(self):
"""
Get or set the method for the integration of the PSF.
* "exact" is more accurate but slow, if the position is free to vary it adds a lot of time to the fit. This is
the default, to be used when the position of point sources are fixed. The computation in that case happens only
once so the impact on the run time is negligible.
* "fast" is less accurate (up to an error of few percent in flux) but a lot faster. This should be used when
the position of the point source is free, because in that case the integration of the PSF happens every time
the position changes, so several times during the fit.
If you have a fit with a free position, use "fast". When the position is found, you can fix it, switch to
"exact" and redo the fit to obtain the most accurate measurement of the flux. For normal sources the difference
will be small, but for very bright sources it might be up to a few percent (most of the time < 1%). If you are
interested in the localization contour there is no need to rerun with "exact".
:param mode: either "exact" or "fast"
:return: None
"""
return self._psf_integration_method
@psf_integration_method.setter
def psf_integration_method(self, mode):
assert mode.lower() in ["exact", "fast"], (
"PSF integration method must be either 'exact' or 'fast'"
)
self._psf_integration_method = mode.lower()
def _setup_psf_convolutors(self):
central_response_bins = self._response.get_response_dec_bin(self._roi.ra_dec_center[1])
self._psf_convolutors = collections.OrderedDict()
for bin_id in central_response_bins:
#Only set up PSF convolutors for active bins.
if bin_id in self._active_planes:
self._psf_convolutors[bin_id] = PSFConvolutor(central_response_bins[bin_id].psf,
self._flat_sky_projection)
def _compute_likelihood_biases(self):
for bin_label in self._maptree:
data_analysis_bin = self._maptree[bin_label]
this_log_factorial = np.sum(logfactorial(data_analysis_bin.observation_map.as_partial().astype(int)))
self._log_factorials[bin_label] = this_log_factorial
# As bias we use the likelihood value for the saturated model
obs = data_analysis_bin.observation_map.as_partial()
bkg = data_analysis_bin.background_map.as_partial()
sat_model = np.clip(obs - bkg, 1e-50, None).astype(np.float64)
self._saturated_model_like_per_maptree[bin_label] = log_likelihood(obs, bkg, sat_model) - this_log_factorial
def get_saturated_model_likelihood(self):
"""
Returns the likelihood for the saturated model (i.e. a model exactly equal to observation - background).
:return:
"""
return sum(self._saturated_model_like_per_maptree.values())
def set_active_measurements(self, bin_id_min=None, bin_id_max=None, bin_list=None):
"""
Set the active analysis bins to use during the analysis. It can be used in two ways:
- Specifying a range: if the response and the maptree allows it, you can specify a minimum id and a maximum id
number. This only works if the analysis bins are numerical, like in the normal fHit analysis. For example:
> set_active_measurement(bin_id_min=1, bin_id_max=9)
- Specifying a list of bins as strings. This is more powerful, as allows to select any bins, even
non-contiguous bins. For example:
> set_active_measurement(bin_list=[list])
:param bin_id_min: minimum bin (only works for fHit analysis. For the others, use bin_list)
:param bin_id_max: maximum bin (only works for fHit analysis. For the others, use bin_list)
:param bin_list: a list of analysis bins to use
:return: None
"""
# Check for legal input
if bin_id_min is not None:
assert bin_id_max is not None, (
"If you provide a minimum bin, you also need to provide a maximum bin."
)
# Make sure they are integers
bin_id_min = int(bin_id_min)
bin_id_max = int(bin_id_max)
self._active_planes = []
for this_bin in range(bin_id_min, bin_id_max + 1):
this_bin = str(this_bin)
if this_bin not in self._all_planes:
raise ValueError(f"Bin {this_bin} is not contained in this maptree.")
self._active_planes.append(this_bin)
else:
assert bin_id_max is None, (
"If you provie a maximum bin, you also need to provide a minimum bin."
)
assert bin_list is not None
self._active_planes = []
for this_bin in bin_list:
if not this_bin in self._all_planes:
raise ValueError(f"Bin {this_bin} is not contained in this maptree.")
self._active_planes.append(this_bin)
if self._likelihood_model:
self.set_model( self._likelihood_model )
def display(self, verbose=False):
"""
Prints summary of the current object content.
"""
log.info("Region of Interest: ")
log.info("-------------------")
self._roi.display()
log.info("")
log.info("Flat sky projection: ")
log.info("--------------------")
log.info(
f"Width x height {self._flat_sky_projection.npix_width} x {self._flat_sky_projection.npix_height} px"
)
#log.info("Width x height: %s x %s px" % (self._flat_sky_projection.npix_width,
# self._flat_sky_projection.npix_height))
log.info(f"Pixel sizes: {self._flat_sky_projection.pixel_size} deg")
#log.info("Pixel sizes: %s deg" % self._flat_sky_projection.pixel_size)
log.info("")
log.info("Response: ")
log.info("---------")
self._response.display(verbose)
log.info("")
log.info("Map Tree: ")
log.info("----------")
self._maptree.display()
log.info("")
#log.info("Active energy/nHit planes ({}):".format(len(self._active_planes)))
log.info(f"Active energy/nHit planes ({len(self._active_planes)}):")
log.info("-------------------------------")
log.info(self._active_planes)
def set_model(self, likelihood_model_instance):
"""
Set the model to be used in the joint minimization. Must be a LikelihoodModel instance.
"""
self._likelihood_model = likelihood_model_instance
# Reset
self._convolved_point_sources.reset()
self._convolved_ext_sources.reset()
# For each point source in the model, build the convolution class
for source in list(self._likelihood_model.point_sources.values()):
this_convolved_point_source = ConvolvedPointSource(source, self._response, self._flat_sky_projection)
self._convolved_point_sources.append(this_convolved_point_source)
# Samewise for extended sources
ext_sources = list(self._likelihood_model.extended_sources.values())
# NOTE: ext_sources evaluate to False if empty
if ext_sources:
# We will need to convolve
self._setup_psf_convolutors()
for source in ext_sources:
if source.spatial_shape.n_dim == 2:
this_convolved_ext_source = ConvolvedExtendedSource2D(source,
self._response,
self._flat_sky_projection)
else:
this_convolved_ext_source = ConvolvedExtendedSource3D(source,
self._response,
self._flat_sky_projection)
self._convolved_ext_sources.append(this_convolved_ext_source)
def get_excess_background(self, ra, dec, radius):
"""
Calculates area, excess (data - background) and model counts of source at different
distance from the source.
:param: radius: radial distance away from the center (degrees).
:returns: tuple of numpy.ndarrays for areas, excess, model, and background
this information is used in the get_radial_profile function.
"""
radius_radians = np.deg2rad(radius)
total_counts = np.zeros(len(self._active_planes), dtype=float)
background = np.zeros_like(total_counts)
observation = np.zeros_like(total_counts)
model = np.zeros_like(total_counts)
signal = np.zeros_like(total_counts)
area = np.zeros_like(total_counts)
n_point_sources = self._likelihood_model.get_number_of_point_sources()
n_ext_sources = self._likelihood_model.get_number_of_extended_sources()
longitude = ra_to_longitude(ra)
latitude = dec
center = hp.ang2vec(longitude, latitude, lonlat=True)
for i, energy_id in enumerate(self._active_planes):
data_analysis_bin = self._maptree[energy_id]
this_nside = data_analysis_bin.observation_map.nside
pixels_at_radius = hp.query_disc(
this_nside,
center,
radius_radians,
inclusive=False,
)
# calculate the areas per bin by the product
# of pixel area by the number of pixels at each radial bin
area[i] = hp.nside2pixarea(this_nside)*pixels_at_radius.shape[0]
# NOTE: select active pixels according to each radial bin
bin_active_pixel_indexes = np.searchsorted(self._active_pixels[energy_id], pixels_at_radius)
# obtain the excess, background, and expected excess at each radial bin
data = data_analysis_bin.observation_map.as_partial()
bkg = data_analysis_bin.background_map.as_partial()
mdl = self._get_model_map(energy_id, n_point_sources, n_ext_sources).as_partial()
bin_data = np.array([data[i] for i in bin_active_pixel_indexes])
bin_bkg = np.array([bkg[i] for i in bin_active_pixel_indexes])
bin_model = np.array([mdl[i] for i in bin_active_pixel_indexes])
this_data_tot = np.sum(bin_data)
this_bkg_tot = np.sum(bin_bkg)
this_model_tot = np.sum(bin_model)
background[i] = this_bkg_tot
observation[i] = this_data_tot
model[i] = this_model_tot
signal[i] = this_data_tot - this_bkg_tot
return area, signal, model, background
def get_radial_profile(
self,
ra,
dec,
active_planes=None,
max_radius=3.0,
n_radial_bins=30,
model_to_subtract=None,
subtract_model_from_model=False,
):
"""
Calculates radial profiles of data - background & model.
:param ra: R.A. of origin for radial profile.
:param dec: Declination of origin of radial profile.
:param active_planes: List of analysis over which to average; if None, use HAWC default (bins 1-9).
:param: max_radius: Radius up to which the radial profile is evaluated;
for the disk to calculate the gamma/hadron weights (Default: 3.0).
:param n_radial_bins: Number of bins for the radial profile (Default: 30).
:param model_to_subtract: Another model that is to be subtracted from the data excess (Default: None).
:param subtract_model_from_model: If True and model_to_subtract is not None,
subtract model from model too (Defalt: False).
:return: np.arrays with the radii, model profile, data profile, data uncertainty, and
list of analysis bins used.
"""
# default is to use all active bins
if active_planes is None:
active_planes = self._active_planes
# Make sure we use bins with data
good_planes = [plane_id in active_planes for plane_id in self._active_planes]
plane_ids = set(active_planes) & set(self._active_planes)
delta_r = 1.0*max_radius/n_radial_bins
radii = np.array([delta_r*(r + 0.5) for r in range(0, n_radial_bins)])
# Get area of all pixels in a given circle
# The area of each ring is then given by the difference between two
# subsequent circe areas.
area = np.array(
[self.get_excess_background(ra, dec, r + 0.5*delta_r)[0] for r in radii ]
)
temp = area[1:] - area[:-1]
area[1:] = temp
# model
# convert 'top hat' excess into 'ring' excesses.
model = np.array(
[self.get_excess_background(ra, dec, r + 0.5*delta_r)[2] for r in radii]
)
temp = model[1:] - model[:-1]
model[1:] = temp
# signals
signal = np.array(
[self.get_excess_background(ra, dec, r + 0.5*delta_r)[1] for r in radii]
)
temp = signal[1:] - signal[:-1]
signal[1:] = temp
# backgrounds
bkg = np.array(
[self.get_excess_background(ra, dec, r + 0.5*delta_r)[3] for r in radii]
)
temp = bkg[1:] - bkg[:-1]
bkg[1:] = temp
counts = signal + bkg
if model_to_subtract is not None:
this_model = copy.deepcopy(self._likelihood_model)
self.set_model(model_to_subtract)
model_subtract = np.array(
[self.get_excess_background(ra, dec, r + 0.5*delta_r)[2] for r in radii]
)
temp = model_subtract[1:] - model_subtract[:-1]
model_subtract[1:] = temp
signal -= model_subtract
if subtract_model_from_model:
model -= model_subtract
self.set_model(this_model)
# NOTE: weights are calculated as expected number of gamma-rays/number of background counts.
# here, use max_radius to evaluate the number of gamma-rays/bkg counts.
# The weights do not depend on the radius, but fill a matrix anyway so
# there's no confusion when multiplying them to the data later.
# Weight is normalized (sum of weights over the bins = 1).
total_excess = np.array(
self.get_excess_background(ra, dec, max_radius)[1]
)[good_planes]
total_model = np.array(
self.get_excess_background(ra, dec, max_radius)[2]
)[good_planes]
total_bkg = np.array(
self.get_excess_background(ra, dec, max_radius)[3]
)[good_planes]
w = np.divide(total_model, total_bkg)
weight = np.array([w/np.sum(w) for r in radii])
# restric profiles to the user-specified analysis bins
area = area[:, good_planes]
signal = signal[:, good_planes]
model = model[:, good_planes]
counts = counts[:, good_planes]
bkg = bkg[:, good_planes]
# average over the analysis bins
excess_data = np.average(signal/area, weights=weight, axis=1)
excess_error = np.sqrt(np.sum(counts*weight*weight/(area*area), axis=1))
excess_model = np.average(model/area, weights=weight, axis=1)
return radii, excess_model, excess_data, excess_error, sorted(plane_ids)
def plot_radial_profile(
self,
ra,
dec,
active_planes=None,
max_radius=3.0,
n_radial_bins=30,
model_to_subtract=None,
subtract_model_from_model=False
):
"""
Plots radial profiles of data - background & model.
:param ra: R.A. of origin for radial profile.
:param dec: Declination of origin of radial profile.
:param active_planes: List of analysis bins over which to average;
if None, use HAWC default (bins 1-9).
:param max_radius: Radius up to which the radial profile is evaluated; also
used as the radius for the disk to calculate the gamma/hadron weights. Default: 3.0
:param model_to_subtract: Another model that is to be subtracted from the data excess (Default: None).
:param subtract_model_from_model: If True and model_to_subtract is not None, subtract from model too (Default: False).
:return: plot of data - background vs model radial profiles.
"""
(
radii,
excess_model,
excess_data,
excess_error,
plane_ids,
) = self.get_radial_profile(
ra,
dec,
active_planes,
max_radius,
n_radial_bins,
model_to_subtract,
subtract_model_from_model,
)
#font = {
# "family":"serif",
# "weight":"regular",
# "size":12
#}
#mpl.rc("font", **font)
fig, ax = plt.subplots(figsize=(10,8))
plt.errorbar(
radii,
excess_data,
yerr=excess_error,
capsize=0,
color="black",
label="Excess (data-bkg)",
fmt=".",
)
plt.plot(radii, excess_model, color="red", label="Model")
plt.legend(bbox_to_anchor=(1.0, 1.0), loc="upper right", numpoints=1)
plt.axhline(0, color="deepskyblue", linestyle="--")
x_limits=[0, max_radius]
plt.xlim(x_limits)
plt.ylabel(r"Apparent Radial Excess [sr$^{-1}$]")
plt.xlabel(
f"Distance from source at ({ra:0.2f} $^{{\circ}}$, {dec:0.2f} $^{{\circ}}$)"
)
if len(plane_ids) == 1:
title = f"Radial Profile, bin {plane_ids[0]}"
else:
tmptitle=f"Radial Profile, bins \n{plane_ids}"
width=70
title="\n".join(
tmptitle[i:i+width] for i in range(0, len(tmptitle), width)
)
title=tmptitle
plt.title(title)
ax.grid(True)
try:
plt.tight_layout()
except:
pass
return fig
def display_spectrum(self):
"""
Make a plot of the current spectrum and its residuals (integrated over space)
:return: a matplotlib.Figure
"""
n_point_sources = self._likelihood_model.get_number_of_point_sources()
n_ext_sources = self._likelihood_model.get_number_of_extended_sources()
total_counts = np.zeros(len(self._active_planes), dtype=float)
total_model = np.zeros_like(total_counts)
model_only = np.zeros_like(total_counts)
net_counts = np.zeros_like(total_counts)
yerr_low = np.zeros_like(total_counts)
yerr_high = np.zeros_like(total_counts)
for i, energy_id in enumerate(self._active_planes):
data_analysis_bin = self._maptree[energy_id]
this_model_map_hpx = self._get_expectation(data_analysis_bin, energy_id, n_point_sources, n_ext_sources)
this_model_tot = np.sum(this_model_map_hpx)
this_data_tot = np.sum(data_analysis_bin.observation_map.as_partial())
this_bkg_tot = np.sum(data_analysis_bin.background_map.as_partial())
total_counts[i] = this_data_tot
net_counts[i] = this_data_tot - this_bkg_tot
model_only[i] = this_model_tot
this_wh_model = this_model_tot + this_bkg_tot
total_model[i] = this_wh_model
if this_data_tot >= 50.0:
# Gaussian limit
# Under the null hypothesis the data are distributed as a Gaussian with mu = model
# and sigma = sqrt(model)
# NOTE: since we neglect the background uncertainty, the background is part of the
# model
yerr_low[i] = np.sqrt(this_data_tot)
yerr_high[i] = np.sqrt(this_data_tot)
else:
# Low-counts
# Under the null hypothesis the data are distributed as a Poisson distribution with
# mean = model, plot the 68% confidence interval (quantile=[0.16,1-0.16]).
# NOTE: since we neglect the background uncertainty, the background is part of the
# model
quantile = 0.16
mean = this_wh_model
y_low = poisson.isf(1-quantile, mu=mean)
y_high = poisson.isf(quantile, mu=mean)
yerr_low[i] = mean-y_low
yerr_high[i] = y_high-mean
residuals = old_div((total_counts - total_model), np.sqrt(total_model))
residuals_err = [old_div(yerr_high, np.sqrt(total_model)),
old_div(yerr_low, np.sqrt(total_model))]
yerr = [yerr_high, yerr_low]
return self._plot_spectrum(net_counts, yerr, model_only, residuals, residuals_err)
def _plot_spectrum(self, net_counts, yerr, model_only, residuals, residuals_err):
fig, subs = plt.subplots(2, 1, gridspec_kw={'height_ratios': [2, 1], 'hspace': 0}, figsize=(12,6))
planes = np.array(self._active_planes)
subs[0].errorbar(planes, net_counts, yerr=yerr,
capsize=0,
color='black', label='Net counts', fmt='.')
subs[0].plot(planes, model_only, label='Convolved model')
subs[0].legend(bbox_to_anchor=(1.0, 1.0), loc="upper right",
numpoints=1)
# Residuals
subs[1].axhline(0, linestyle='--')
subs[1].errorbar(
planes, residuals,
yerr=residuals_err,
capsize=0, fmt='.'
)
y_limits = [min(net_counts[net_counts > 0]) / 2., max(net_counts) * 2.]
subs[0].set_yscale("log", nonpositive='clip')
subs[0].set_ylabel("Counts per bin")
subs[0].set_xticks([])
subs[1].set_xlabel("Analysis bin")
subs[1].set_ylabel(r"$\frac{{cts - mod - bkg}}{\sqrt{mod + bkg}}$")
subs[1].set_xticks(planes)
subs[1].set_xticklabels(self._active_planes)
subs[0].set_ylim(y_limits)
return fig
def get_log_like(self):
"""
Return the value of the log-likelihood with the current values for the
parameters
"""
n_point_sources = self._likelihood_model.get_number_of_point_sources()
n_ext_sources = self._likelihood_model.get_number_of_extended_sources()
# Make sure that no source has been added since we filled the cache
assert (n_point_sources == self._convolved_point_sources.n_sources_in_cache and
n_ext_sources == self._convolved_ext_sources.n_sources_in_cache), (
"The number of sources has changed. Please re-assign the model to the plugin."
)
#assert n_point_sources == self._convolved_point_sources.n_sources_in_cache and \
# n_ext_sources == self._convolved_ext_sources.n_sources_in_cache, \
# "The number of sources has changed. Please re-assign the model to the plugin."
# This will hold the total log-likelihood
total_log_like = 0
for bin_id in self._active_planes:
data_analysis_bin = self._maptree[bin_id]
this_model_map_hpx = self._get_expectation(data_analysis_bin, bin_id, n_point_sources, n_ext_sources)
# Now compare with observation
bkg_renorm = list(self._nuisance_parameters.values())[0].value
obs = data_analysis_bin.observation_map.as_partial() # type: np.array
bkg = data_analysis_bin.background_map.as_partial() * bkg_renorm # type: np.array
this_pseudo_log_like = log_likelihood(obs,
bkg,
this_model_map_hpx)
total_log_like += this_pseudo_log_like - self._log_factorials[bin_id] \
- self._saturated_model_like_per_maptree[bin_id]
return total_log_like
def write(self, response_file_name, map_tree_file_name):
"""
Write this dataset to disk in HDF format.
:param response_file_name: filename for the response
:param map_tree_file_name: filename for the map tree
:return: None
"""
self._maptree.write(map_tree_file_name)
self._response.write(response_file_name)
def get_simulated_dataset(self, name):
"""
Return a simulation of this dataset using the current model with current parameters.
:param name: new name for the new plugin instance
:return: a HAL instance
"""
# First get expectation under the current model and store them, if we didn't do it yet
if self._clone is None:
n_point_sources = self._likelihood_model.get_number_of_point_sources()
n_ext_sources = self._likelihood_model.get_number_of_extended_sources()
expectations = collections.OrderedDict()
for bin_id in self._maptree:
data_analysis_bin = self._maptree[bin_id]
if bin_id not in self._active_planes:
expectations[bin_id] = None
else:
expectations[bin_id] = self._get_expectation(data_analysis_bin, bin_id,
n_point_sources, n_ext_sources) + \
data_analysis_bin.background_map.as_partial()
if parallel_client.is_parallel_computation_active():
# Do not clone, as the parallel environment already makes clones
clone = self
else:
clone = copy.deepcopy(self)
self._clone = (clone, expectations)
# Substitute the observation and background for each data analysis bin
for bin_id in self._clone[0]._maptree:
data_analysis_bin = self._clone[0]._maptree[bin_id]
if bin_id not in self._active_planes:
continue
else:
# Active plane. Generate new data
expectation = self._clone[1][bin_id]
new_data = np.random.poisson(expectation, size=(1, expectation.shape[0])).flatten()
# Substitute data
data_analysis_bin.observation_map.set_new_values(new_data)
# Now change name and return
self._clone[0]._name = name
# Adjust the name of the nuisance parameter
old_name = list(self._clone[0]._nuisance_parameters.keys())[0]
new_name = old_name.replace(self.name, name)
self._clone[0]._nuisance_parameters[new_name] = self._clone[0]._nuisance_parameters.pop(old_name)
# Recompute biases
self._clone[0]._compute_likelihood_biases()
return self._clone[0]
def _get_expectation(self, data_analysis_bin, energy_bin_id, n_point_sources, n_ext_sources):
# Compute the expectation from the model
this_model_map = None
for pts_id in range(n_point_sources):
this_conv_src = self._convolved_point_sources[pts_id]
expectation_per_transit = this_conv_src.get_source_map(energy_bin_id,
tag=None,
psf_integration_method=self._psf_integration_method)
expectation_from_this_source = expectation_per_transit * data_analysis_bin.n_transits
if this_model_map is None:
# First addition
this_model_map = expectation_from_this_source
else:
this_model_map += expectation_from_this_source
# Now process extended sources
if n_ext_sources > 0:
this_ext_model_map = None
for ext_id in range(n_ext_sources):
this_conv_src = self._convolved_ext_sources[ext_id]
expectation_per_transit = this_conv_src.get_source_map(energy_bin_id)
if this_ext_model_map is None:
# First addition
this_ext_model_map = expectation_per_transit
else:
this_ext_model_map += expectation_per_transit
# Now convolve with the PSF
if this_model_map is None:
# Only extended sources
this_model_map = (self._psf_convolutors[energy_bin_id].extended_source_image(this_ext_model_map) *
data_analysis_bin.n_transits)
else:
this_model_map += (self._psf_convolutors[energy_bin_id].extended_source_image(this_ext_model_map) *
data_analysis_bin.n_transits)
# Now transform from the flat sky projection to HEALPiX
if this_model_map is not None:
# First divide for the pixel area because we need to interpolate brightness
#this_model_map = old_div(this_model_map, self._flat_sky_projection.project_plane_pixel_area)
this_model_map = this_model_map/self._flat_sky_projection.project_plane_pixel_area
this_model_map_hpx = self._flat_sky_to_healpix_transform[energy_bin_id](this_model_map, fill_value=0.0)
# Now multiply by the pixel area of the new map to go back to flux
this_model_map_hpx *= hp.nside2pixarea(data_analysis_bin.nside, degrees=True)
else:
# No sources
this_model_map_hpx = 0.0
return this_model_map_hpx
@staticmethod
def _represent_healpix_map(fig, hpx_map, longitude, latitude, xsize, resolution, smoothing_kernel_sigma):
proj = get_gnomonic_projection(fig, hpx_map,
rot=(longitude, latitude, 0.0),
xsize=xsize,
reso=resolution)
if smoothing_kernel_sigma is not None:
# Get the sigma in pixels
sigma = old_div(smoothing_kernel_sigma * 60, resolution)
proj = convolve(list(proj),
Gaussian2DKernel(sigma),
nan_treatment='fill',
preserve_nan=True)
return proj
def display_fit(self, smoothing_kernel_sigma=0.1, display_colorbar=False):
"""
Make a figure containing 4 maps for each active analysis bins with respectively model, data,
background and residuals. The model, data and residual maps are smoothed, the background
map is not.
:param smoothing_kernel_sigma: sigma for the Gaussian smoothing kernel, for all but
background maps
:param display_colorbar: whether or not to display the colorbar in the residuals
:return: a matplotlib.Figure
"""
n_point_sources = self._likelihood_model.get_number_of_point_sources()
n_ext_sources = self._likelihood_model.get_number_of_extended_sources()
# This is the resolution (i.e., the size of one pixel) of the image
resolution = 3.0 # arcmin
# The image is going to cover the diameter plus 20% padding
xsize = self._get_optimal_xsize(resolution)
n_active_planes = len(self._active_planes)
n_columns = 4
fig, subs = plt.subplots(n_active_planes, n_columns,
figsize=(2.7 * n_columns, n_active_planes * 2), squeeze=False)
prog_bar = tqdm(total = len(self._active_planes), desc="Smoothing planes")
images = ['None'] * n_columns
for i, plane_id in enumerate(self._active_planes):
data_analysis_bin = self._maptree[plane_id]
# Get the center of the projection for this plane
this_ra, this_dec = self._roi.ra_dec_center
# Make a full healpix map for a second
whole_map = self._get_model_map(plane_id, n_point_sources, n_ext_sources).as_dense()
# Healpix uses longitude between -180 and 180, while R.A. is between 0 and 360. We need to fix that:
longitude = ra_to_longitude(this_ra)
# Declination is already between -90 and 90
latitude = this_dec
# Background and excess maps
bkg_subtracted, _, background_map = self._get_excess(data_analysis_bin, all_maps=True)
# Make all the projections: model, excess, background, residuals
proj_model = self._represent_healpix_map(fig, whole_map,
longitude, latitude,
xsize, resolution, smoothing_kernel_sigma)
# Here we removed the background otherwise nothing is visible
# Get background (which is in a way "part of the model" since the uncertainties are neglected)
proj_data = self._represent_healpix_map(fig, bkg_subtracted,
longitude, latitude,
xsize, resolution, smoothing_kernel_sigma)
# No smoothing for this one (because a goal is to check it is smooth).
proj_bkg = self._represent_healpix_map(fig, background_map,
longitude, latitude,
xsize, resolution, None)
proj_residuals = proj_data - proj_model
# Common color scale range for model and excess maps
vmin = min(np.nanmin(proj_model), np.nanmin(proj_data))
vmax = max(np.nanmax(proj_model), np.nanmax(proj_data))
# Plot model
images[0] = subs[i][0].imshow(proj_model, origin='lower', vmin=vmin, vmax=vmax)
subs[i][0].set_title('model, bin {}'.format(data_analysis_bin.name))
# Plot data map
images[1] = subs[i][1].imshow(proj_data, origin='lower', vmin=vmin, vmax=vmax)
subs[i][1].set_title('excess, bin {}'.format(data_analysis_bin.name))
# Plot background map.
images[2] = subs[i][2].imshow(proj_bkg, origin='lower')
subs[i][2].set_title('background, bin {}'.format(data_analysis_bin.name))
# Now residuals
images[3] = subs[i][3].imshow(proj_residuals, origin='lower')
subs[i][3].set_title('residuals, bin {}'.format(data_analysis_bin.name))
# Remove numbers from axis
for j in range(n_columns):
subs[i][j].axis('off')
if display_colorbar:
for j, image in enumerate(images):
plt.colorbar(image, ax=subs[i][j])
prog_bar.update(1)
fig.set_tight_layout(True)
return fig
def _get_optimal_xsize(self, resolution):
return 2.2 * self._roi.data_radius.to("deg").value / (resolution / 60.0)
def display_stacked_image(self, smoothing_kernel_sigma=0.5):
"""
Display a map with all active analysis bins stacked together.
:param smoothing_kernel_sigma: sigma for the Gaussian smoothing kernel to apply
:return: a matplotlib.Figure instance
"""
# This is the resolution (i.e., the size of one pixel) of the image in arcmin
resolution = 3.0
# The image is going to cover the diameter plus 20% padding
xsize = self._get_optimal_xsize(resolution)
active_planes_bins = [self._maptree[x] for x in self._active_planes]
# Get the center of the projection for this plane
this_ra, this_dec = self._roi.ra_dec_center
# Healpix uses longitude between -180 and 180, while R.A. is between 0 and 360. We need to fix that:
longitude = ra_to_longitude(this_ra)
# Declination is already between -90 and 90
latitude = this_dec
total = None
for i, data_analysis_bin in enumerate(active_planes_bins):
# Plot data
background_map = data_analysis_bin.background_map.as_dense()
this_data = data_analysis_bin.observation_map.as_dense() - background_map
idx = np.isnan(this_data)
# this_data[idx] = hp.UNSEEN
if i == 0:
total = this_data
else:
# Sum only when there is no UNSEEN, so that the UNSEEN pixels will stay UNSEEN
total[~idx] += this_data[~idx]
delta_coord = (self._roi.data_radius.to("deg").value * 2.0) / 15.0
fig, sub = plt.subplots(1, 1)
proj = self._represent_healpix_map(fig, total, longitude, latitude, xsize, resolution, smoothing_kernel_sigma)
cax = sub.imshow(proj, origin='lower')
fig.colorbar(cax)
sub.axis('off')
hp.graticule(delta_coord, delta_coord)
return fig
def inner_fit(self):
"""
This is used for the profile likelihood. Keeping fixed all parameters in the
LikelihoodModel, this method minimize the logLike over the remaining nuisance
parameters, i.e., the parameters belonging only to the model for this
particular detector. If there are no nuisance parameters, simply return the
logLike value.
"""
return self.get_log_like()
def get_number_of_data_points(self):
"""
Return the number of active bins across all active analysis bins
:return: number of active bins
"""
n_points = 0
for bin_id in self._maptree:
n_points += self._maptree[bin_id].observation_map.as_partial().shape[0]
return n_points
def _get_model_map(self, plane_id, n_pt_src, n_ext_src):
"""
This function returns a model map for a particular bin
"""
if plane_id not in self._active_planes:
raise ValueError(
f"{plane_id} not a plane in the current model"
)
model_map = SparseHealpix(self._get_expectation(self._maptree[plane_id], plane_id, n_pt_src, n_ext_src),
self._active_pixels[plane_id],
self._maptree[plane_id].observation_map.nside)
return model_map
def _get_excess(self, data_analysis_bin, all_maps=True):
"""
This function returns the excess counts for a particular bin
if all_maps=True, also returns the data and background maps
"""
data_map = data_analysis_bin.observation_map.as_dense()
bkg_map = data_analysis_bin.background_map.as_dense()
excess = data_map - bkg_map
if all_maps:
return excess, data_map, bkg_map
return excess
def _write_a_map(self, file_name, which, fluctuate=False, return_map=False):
"""
This writes either a model map or a residual map, depending on which one is preferred
"""
which = which.lower()
assert which in ['model', 'residual']
n_pt = self._likelihood_model.get_number_of_point_sources()
n_ext = self._likelihood_model.get_number_of_extended_sources()
map_analysis_bins = collections.OrderedDict()
if fluctuate:
poisson_set = self.get_simulated_dataset("model map")
for plane_id in self._active_planes:
data_analysis_bin = self._maptree[plane_id]
bkg = data_analysis_bin.background_map
obs = data_analysis_bin.observation_map
if fluctuate:
model_excess = poisson_set._maptree[plane_id].observation_map \
- poisson_set._maptree[plane_id].background_map
else:
model_excess = self._get_model_map(plane_id, n_pt, n_ext)
if which == 'residual':
bkg += model_excess
if which == 'model':
obs = model_excess + bkg
this_bin = DataAnalysisBin(plane_id,
observation_hpx_map=obs,
background_hpx_map=bkg,
active_pixels_ids=self._active_pixels[plane_id],
n_transits=data_analysis_bin.n_transits,
scheme='RING')
map_analysis_bins[plane_id] = this_bin
# save the file
new_map_tree = MapTree(map_analysis_bins, self._roi)
new_map_tree.write(file_name)
if return_map:
return new_map_tree
def write_model_map(self, file_name, poisson_fluctuate=False, test_return_map=False):
"""
This function writes the model map to a file.
The interface is based off of HAWCLike for consistency
"""
if test_return_map:
log.warning("test_return_map=True should only be used for testing purposes!")
return self._write_a_map(file_name, 'model', poisson_fluctuate, test_return_map)
def write_residual_map(self, file_name, test_return_map=False):
"""
This function writes the residual map to a file.
The interface is based off of HAWCLike for consistency
"""
if test_return_map:
log.warning("test_return_map=True should only be used for testing purposes!")
return self._write_a_map(file_name, 'residual', False, test_return_map)
| 38.20771 | 126 | 0.609942 | 47,086 | 0.969606 | 0 | 0 | 2,267 | 0.046683 | 0 | 0 | 15,292 | 0.314896 |
c88aff50b9e6ce0d5c309be594a03b1f208a90db | 15,227 | py | Python | sshcustodian/sshcustodian.py | jkglasbrenner/sshcustodian | 870d1088f27e1528e27f94f55f2efad7dad32d5d | [
"MIT"
]
| null | null | null | sshcustodian/sshcustodian.py | jkglasbrenner/sshcustodian | 870d1088f27e1528e27f94f55f2efad7dad32d5d | [
"MIT"
]
| null | null | null | sshcustodian/sshcustodian.py | jkglasbrenner/sshcustodian | 870d1088f27e1528e27f94f55f2efad7dad32d5d | [
"MIT"
]
| null | null | null | # File: sshcustodian/sshcustodian.py
# -*- coding: utf-8 -*-
# Python 2/3 Compatibility
from __future__ import (unicode_literals, division, absolute_import,
print_function)
from six.moves import filterfalse
"""
This module creates a subclass of the main Custodian class in the Custodian
project (github.com/materialsproject/custodian), which is a wrapper that
manages jobs running on computing clusters. The Custodian module is part of The
Materials Project (materialsproject.org/).
This subclass adds the functionality to copy the temporary directory created
via monty to the scratch partitions on slave compute nodes, provided that the
cluster's filesystem is configured in this way. The implementation invokes a
subprocess to utilize the ssh executable installed on the cluster, so it is not
particularly elegant or platform independent, nor is this solution likely to be
general to all clusters. This is why this modification has not been submitted
as a pull request to the main Custodian project.
"""
# Import modules
import logging
import subprocess
import sys
import datetime
import time
import os
import re
from itertools import islice, groupby
from socket import gethostname
from monty.tempfile import ScratchDir
from monty.shutil import gzip_dir
from monty.json import MontyEncoder
from monty.serialization import dumpfn
from custodian.custodian import Custodian
from custodian.custodian import CustodianError
# Module-level logger
logger = logging.getLogger(__name__)
class SSHCustodian(Custodian):
"""
The SSHCustodian class modifies the Custodian class from the custodian
module to be able to handle clusters that have separate scratch partitions
for each node. When scratch_dir_node_only is enabled, the temp_dir that
monty creates will be copied to all other compute nodes used in the
calculation and subsequently removed when the job is finished.
"""
__doc__ += Custodian.__doc__
def __init__(self, handlers, jobs, validators=None, max_errors=1,
polling_time_step=10, monitor_freq=30,
skip_over_errors=False, scratch_dir=None,
gzipped_output=False, checkpoint=False,
scratch_dir_node_only=False, pbs_nodefile=None):
""" scratch_dir_node_only (bool): If set to True, custodian will grab
the list of nodes in the file path provided to pbs_nodefile and
use copy the temp_dir to the scratch_dir on each node over
ssh. This is necessary on cluster setups where each node has
its own independent scratch partition.
pbs_nodefile (str): The filepath to the list of nodes to be used in
a calculation. If this path does not point to a valid file,
then scratch_dir_node_only will be automatically set to False.
"""
super(SSHCustodian, self).__init__(handlers, jobs, validators,
max_errors, polling_time_step,
monitor_freq, skip_over_errors,
scratch_dir, gzipped_output,
checkpoint)
self.hostname = gethostname()
if pbs_nodefile is None:
self.scratch_dir_node_only = False
self.slave_compute_node_list = None
elif os.path.exists(pbs_nodefile):
self.scratch_dir_node_only = scratch_dir_node_only
self.pbs_nodefile = pbs_nodefile
self.slave_compute_node_list = (
self._process_pbs_nodefile(self.pbs_nodefile, self.hostname))
else:
self.scratch_dir_node_only = False
self.pbs_nodefile = None
self.slave_compute_node_list = None
@staticmethod
def _process_pbs_nodefile(pbs_nodefile, hostname):
with open(pbs_nodefile) as in_file:
nodelist = in_file.read().splitlines()
slave_compute_node_list = [
node for node, _ in groupby(filterfalse(lambda x: x == hostname,
nodelist))
]
return slave_compute_node_list
def _copy_to_slave_node_dirs(self, temp_dir_path):
"""
Copy temporary scratch directory from master node to other nodes.
Args:
temp_dir_path (str): The path to the temporary scratch directory.
It is assumed here that the root path of the scratch directory
is the same on all nodes.
"""
process_list = []
for node in self.slave_compute_node_list:
command = ['rsync', '-azhq', temp_dir_path,
'{0}:{1}'.format(node,
os.path.abspath(self.scratch_dir))]
p = subprocess.Popen(command, shell=False)
process_list.append(p)
# Wait for syncing to finish before moving on
for process in process_list:
process.wait()
def _update_slave_node_vasp_input_files(self, temp_dir_path):
"""
Update VASP input files in the scratch partition on the slave compute
nodes.
Args:
temp_dir_path (str): The path to the temporary scratch directory.
It is assumed here that the root path of the scratch directory
is the same on all nodes.
"""
VASP_INPUT_FILES = [x for x in ["{0}/CHGCAR".format(temp_dir_path),
"{0}/WAVECAR".format(temp_dir_path),
"{0}/INCAR".format(temp_dir_path),
"{0}/POSCAR".format(temp_dir_path),
"{0}/POTCAR".format(temp_dir_path),
"{0}/KPOINTS".format(temp_dir_path)] if
os.path.exists(x)]
process_list = []
for node in self.slave_compute_node_list:
for filepath in VASP_INPUT_FILES:
command = 'scp {0} {1}:{2}/'.format(filepath, node,
temp_dir_path)
p = subprocess.Popen(command, shell=True)
process_list.append(p)
# Wait for syncing to finish before moving on
for process in process_list:
process.wait()
def _delete_slave_node_dirs(self, temp_dir_path):
"""
Delete the temporary scratch directory on the slave nodes.
Args:
temp_dir_path (str): The path to the temporary scratch directory.
It is assumed here that the root path of the scratch directory
is the same on all nodes.
"""
process_list = []
for node in self.slave_compute_node_list:
command = 'ssh {0} "rm -rf {1}"'.format(node, temp_dir_path)
p = subprocess.Popen(command, shell=True)
process_list.append(p)
# Wait for deletion to finish before moving on
for process in process_list:
process.wait()
def _manage_node_scratch(self, temp_dir_path, job_start):
"""
Checks whether the user wants to make use of scratch partitions on each
compute node, and if True, either copies the temporary directory to or
deletes the temporary directory from each slave compute node. If the
user does not specify to use node-specific scratch partitions, then the
function does nothing.
Args:
temp_dir_path (str): The path to the temporary scratch directory.
job_start (bool): If True, then the job has started and the
temporary directory will be copied to the slave compute
nodes. If False, then the temporary directories will be deleted
from the slave compute nodes.
"""
if self.scratch_dir_node_only:
if job_start:
self._copy_to_slave_node_dirs(temp_dir_path)
else:
self._delete_slave_node_dirs(temp_dir_path)
else:
pass
def _update_node_scratch(self, temp_dir_path, job):
"""
Method to update the scratch partitions on the slave compute nodes
if they exist and are running a VASP job.
Args:
temp_dir_path (str): The path to the temporary scratch directory.
job (object): The job object you intend to run. Currently supports
VASP jobs.
"""
vasp_re = re.compile(r'vasp')
if self.scratch_dir is not None:
try:
jobtype = job.get_jobtype()
if self.scratch_dir_node_only:
if vasp_re.match(jobtype):
self._update_slave_node_vasp_input_files(temp_dir_path)
else:
pass
else:
pass
except:
pass
else:
pass
def run(self):
"""
Override of Custodian.run() to include instructions to copy the
temp_dir to the scratch partition on slave compute nodes if requested.
"""
cwd = os.getcwd()
with ScratchDir(self.scratch_dir, create_symbolic_link=True,
copy_to_current_on_exit=True,
copy_from_current_on_enter=True) as temp_dir:
self._manage_node_scratch(temp_dir_path=temp_dir,
job_start=True)
self.total_errors = 0
start = datetime.datetime.now()
logger.info("Run started at {} in {}.".format(
start, temp_dir))
v = sys.version.replace("\n", " ")
logger.info("Custodian running on Python version {}".format(v))
try:
# skip jobs until the restart
for job_n, job in islice(enumerate(self.jobs, 1),
self.restart, None):
self._run_job(job_n, job, temp_dir)
# Checkpoint after each job so that we can recover from
# last point and remove old checkpoints
if self.checkpoint:
super(SSHCustodian, self)._save_checkpoint(cwd, job_n)
except CustodianError as ex:
logger.error(ex.message)
if ex.raises:
raise RuntimeError("{} errors reached: {}. Exited..."
.format(self.total_errors, ex))
finally:
# Log the corrections to a json file.
logger.info("Logging to {}...".format(super(SSHCustodian,
self).LOG_FILE))
dumpfn(self.run_log, super(SSHCustodian, self).LOG_FILE,
cls=MontyEncoder, indent=4)
end = datetime.datetime.now()
logger.info("Run ended at {}.".format(end))
run_time = end - start
logger.info("Run completed. Total time taken = {}."
.format(run_time))
# Remove duplicate copy of log file, provided it ends with
# ".log"
for x in ([x for x in os.listdir(temp_dir)
if re.match(r'\w*\.log', x)]):
os.remove(os.path.join(temp_dir, x))
self._manage_node_scratch(temp_dir_path=temp_dir,
job_start=False)
if self.gzipped_output:
gzip_dir(".")
# Cleanup checkpoint files (if any) if run is successful.
super(SSHCustodian, self)._delete_checkpoints(cwd)
return self.run_log
def _run_job(self, job_n, job, temp_dir):
"""
Overrides custodian.custodian._run_job() to propagate changes to input
files on different scratch partitions on compute nodes, if needed.
"""
self.run_log.append({"job": job.as_dict(), "corrections": []})
job.setup()
for attempt in range(1, self.max_errors - self.total_errors + 1):
# Propagate updated input files, if needed
self._update_node_scratch(temp_dir, job)
logger.info(
"Starting job no. {} ({}) attempt no. {}. Errors "
"thus far = {}.".format(
job_n, job.name, attempt, self.total_errors))
p = job.run()
# Check for errors using the error handlers and perform
# corrections.
has_error = False
# While the job is running, we use the handlers that are
# monitors to monitor the job.
if isinstance(p, subprocess.Popen):
if self.monitors:
n = 0
while True:
n += 1
time.sleep(self.polling_time_step)
if p.poll() is not None:
break
if n % self.monitor_freq == 0:
has_error = self._do_check(self.monitors,
p.terminate)
else:
p.wait()
logger.info("{}.run has completed. "
"Checking remaining handlers".format(job.name))
# Check for errors again, since in some cases non-monitor
# handlers fix the problems detected by monitors
# if an error has been found, not all handlers need to run
if has_error:
self._do_check([h for h in self.handlers
if not h.is_monitor])
else:
has_error = self._do_check(self.handlers)
# If there are no errors detected, perform
# postprocessing and exit.
if not has_error:
for v in self.validators:
if v.check():
s = "Validation failed: {}".format(v)
raise CustodianError(s, True, v)
job.postprocess()
return
# check that all errors could be handled
for x in self.run_log[-1]["corrections"]:
if not x["actions"] and x["handler"].raises_runtime_error:
s = "Unrecoverable error for handler: {}. " \
"Raising RuntimeError".format(x["handler"])
raise CustodianError(s, True, x["handler"])
for x in self.run_log[-1]["corrections"]:
if not x["actions"]:
s = "Unrecoverable error for handler: %s" % x["handler"]
raise CustodianError(s, False, x["handler"])
logger.info("Max errors reached.")
raise CustodianError("MaxErrors", True)
# Inherit Custodian docstrings
__init__.__doc__ = Custodian.__init__.__doc__ + __init__.__doc__
run.__doc__ = Custodian.run.__doc__
_run_job.__doc__ = Custodian._run_job.__doc__
| 43.505714 | 80 | 0.568858 | 13,715 | 0.900703 | 0 | 0 | 388 | 0.025481 | 0 | 0 | 5,862 | 0.384974 |
c88ca1454e3c43e792033b4722a580761e424d90 | 17,217 | py | Python | sherlock/__init__.py | akudelka/sherlock | 9e85f36c01e0cb1d495283f024423bc60c3f7a4e | [
"MIT"
]
| 165 | 2015-01-12T09:09:19.000Z | 2022-03-14T11:26:23.000Z | sherlock/__init__.py | akudelka/sherlock | 9e85f36c01e0cb1d495283f024423bc60c3f7a4e | [
"MIT"
]
| 35 | 2015-01-07T14:57:24.000Z | 2022-03-24T17:43:28.000Z | sherlock/__init__.py | akudelka/sherlock | 9e85f36c01e0cb1d495283f024423bc60c3f7a4e | [
"MIT"
]
| 38 | 2015-03-11T09:10:05.000Z | 2022-01-17T11:29:38.000Z | '''
Sherlock: Distributed Locks with a choice of backend
====================================================
:mod:`sherlock` is a library that provides easy-to-use distributed inter-process
locks and also allows you to choose a backend of your choice for lock
synchronization.
|Build Status| |Coverage Status|
.. |Build Status| image:: https://travis-ci.org/vaidik/sherlock.png
:target: https://travis-ci.org/vaidik/sherlock/
.. |Coverage Status| image:: https://coveralls.io/repos/vaidik/incoming/badge.png
:target: https://coveralls.io/r/vaidik/incoming
Overview
--------
When you are working with resources which are accessed by multiple services or
distributed services, more than often you need some kind of locking mechanism
to make it possible to access some resources at a time.
Distributed Locks or Mutexes can help you with this. :mod:`sherlock` provides
the exact same facility, with some extra goodies. It provides an easy-to-use API
that resembles standard library's `threading.Lock` semantics.
Apart from this, :mod:`sherlock` gives you the flexibilty of using a backend of
your choice for managing locks.
:mod:`sherlock` also makes it simple for you to extend :mod:`sherlock` to use
backends that are not supported.
Features
++++++++
* API similar to standard library's `threading.Lock`.
* Support for With statement, to cleanly acquire and release locks.
* Backend agnostic: supports `Redis`_, `Memcached`_ and `Etcd`_ as choice of
backends.
* Extendable: can be easily extended to work with any other of backend of
choice by extending base lock class. Read :ref:`extending`.
.. _Redis: http://redis.io
.. _Memcached: http://memcached.org
.. _Etcd: http://github.com/coreos/etcd
Supported Backends and Client Libraries
+++++++++++++++++++++++++++++++++++++++
Following client libraries are supported for every supported backend:
* Redis: `redis-py`_
* Memcached: `pylibmc`_
* Etcd: `python-etcd`_
.. _redis-py: http://github.com
.. _pylibmc: http://github.com
.. _python-etcd: https://github.com/jplana/python-etcd
As of now, only the above mentioned libraries are supported. Although
:mod:`sherlock` takes custom client objects so that you can easily provide
settings that you want to use for that backend store, but :mod:`sherlock` also
checks if the provided client object is an instance of the supported clients
and accepts client objects which pass this check, even if the APIs are the
same. :mod:`sherlock` might get rid of this issue later, if need be and if
there is a demand for that.
Installation
------------
Installation is simple.
.. code:: bash
pip install sherlock
.. note:: :mod:`sherlock` will install all the client libraries for all the
supported backends.
Basic Usage
-----------
:mod:`sherlock` is simple to use as at the API and semantics level, it tries to
conform to standard library's :mod:`threading.Lock` APIs.
.. code-block:: python
import sherlock
from sherlock import Lock
# Configure :mod:`sherlock`'s locks to use Redis as the backend,
# never expire locks and retry acquiring an acquired lock after an
# interval of 0.1 second.
sherlock.configure(backend=sherlock.backends.REDIS,
expire=None,
retry_interval=0.1)
# Note: configuring sherlock to use a backend does not limit you
# another backend at the same time. You can import backend specific locks
# like RedisLock, MCLock and EtcdLock and use them just the same way you
# use a generic lock (see below). In fact, the generic Lock provided by
# sherlock is just a proxy that uses these specific locks under the hood.
# acquire a lock called my_lock
lock = Lock('my_lock')
# acquire a blocking lock
lock.acquire()
# check if the lock has been acquired or not
lock.locked() == True
# release the lock
lock.release()
Support for ``with`` statement
++++++++++++++++++++++++++++++
.. code-block:: python
# using with statement
with Lock('my_lock'):
# do something constructive with your locked resource here
pass
Blocking and Non-blocking API
+++++++++++++++++++++++++++++
.. code-block:: python
# acquire non-blocking lock
lock1 = Lock('my_lock')
lock2 = Lock('my_lock')
# successfully acquire lock1
lock1.acquire()
# try to acquire lock in a non-blocking way
lock2.acquire(False) == True # returns False
# try to acquire lock in a blocking way
lock2.acquire() # blocks until lock is acquired to timeout happens
Using two backends at the same time
+++++++++++++++++++++++++++++++++++
Configuring :mod:`sherlock` to use a backend does not limit you from using
another backend at the same time. You can import backend specific locks like
RedisLock, MCLock and EtcdLock and use them just the same way you use a generic
lock (see below). In fact, the generic Lock provided by :mod:`sherlock` is just
a proxy that uses these specific locks under the hood.
.. code-block:: python
import sherlock
from sherlock import Lock
# Configure :mod:`sherlock`'s locks to use Redis as the backend
sherlock.configure(backend=sherlock.backends.REDIS)
# Acquire a lock called my_lock, this lock uses Redis
lock = Lock('my_lock')
# Now acquire locks in Memcached
from sherlock import MCLock
mclock = MCLock('my_mc_lock')
mclock.acquire()
Tests
-----
To run all the tests (including integration), you have to make sure that all
the databases are running. Make sure all the services are running:
.. code:: bash
# memcached
memcached
# redis-server
redis-server
# etcd (etcd is probably not available as package, here is the simplest way
# to run it).
wget https://github.com/coreos/etcd/releases/download/<version>/etcd-<version>-<platform>.tar.gz
tar -zxvf etcd-<version>-<platform>.gz
./etcd-<version>-<platform>/etcd
Run tests like so:
.. code:: bash
python setup.py test
Documentation
-------------
Available `here`_.
.. _here: http://sher-lock.readthedocs.org
Roadmap
-------
* Support for `Zookeeper`_ as backend.
* Support for `Gevent`_, `Multithreading`_ and `Multiprocessing`_.
.. _Zookeeper: http://zookeeper.apache.org/
.. _Gevent: http://www.gevent.org/
.. _Multithreading: http://docs.python.org/2/library/multithreading.html
.. _Multiprocessing: http://docs.python.org/2/library/multiprocessing.html
License
-------
See `LICENSE`_.
**In short**: This is an open-source project and exists in the public domain
for anyone to modify and use it. Just be nice and attribute the credits
wherever you can. :)
.. _LICENSE: http://github.com/vaidik/sherlock/blob/master/LICENSE.rst
Distributed Locking in Other Languages
--------------------------------------
* NodeJS - https://github.com/thedeveloper/warlock
'''
import etcd
import pylibmc
import redis
class _Backends(object):
'''
A simple object that provides a list of available backends.
'''
REDIS = {
'name': 'REDIS',
'library': 'redis',
'client_class': redis.StrictRedis,
'lock_class': 'RedisLock',
'default_args': (),
'default_kwargs': {},
}
ETCD = {
'name': 'ETCD',
'library': 'etcd',
'client_class': etcd.Client,
'lock_class': 'EtcdLock',
'default_args': (),
'default_kwargs': {},
}
MEMCACHED = {
'name': 'MEMCACHED',
'library': 'pylibmc',
'client_class': pylibmc.Client,
'lock_class': 'MCLock',
'default_args': (
['localhost'],
),
'default_kwargs': {
'binary': True,
},
}
_valid_backends = (
REDIS,
ETCD,
MEMCACHED,
)
def register(self, name, lock_class, library, client_class,
default_args=(), default_kwargs={}):
'''
Register a custom backend.
:param str name: Name of the backend by which you would want to refer
this backend in your code.
:param class lock_class: the sub-class of
:class:`sherlock.lock.BaseLock` that you have
implemented. The reference to your implemented
lock class will be used by
:class:`sherlock.Lock` proxy to use your
implemented class when you globally set that
the choice of backend is the one that has been
implemented by you.
:param str library: dependent client library that this implementation
makes use of.
:param client_class: the client class or valid type which you use to
connect the datastore. This is used by the
:func:`configure` function to validate that
the object provided for the `client`
parameter is actually an instance of this class.
:param tuple default_args: default arguments that need to passed to
create an instance of the callable passed to
`client_class` parameter.
:param dict default_kwargs: default keyword arguments that need to
passed to create an instance of the
callable passed to `client_class`
parameter.
Usage:
>>> import some_db_client
>>> class MyLock(sherlock.lock.BaseLock):
... # your implementation comes here
... pass
>>>
>>> sherlock.configure(name='Mylock',
... lock_class=MyLock,
... library='some_db_client',
... client_class=some_db_client.Client,
... default_args=('localhost:1234'),
... default_kwargs=dict(connection_pool=6))
'''
if not issubclass(lock_class, lock.BaseLock):
raise ValueError('lock_class parameter must be a sub-class of '
'sherlock.lock.BaseLock')
setattr(self, name, {
'name': name,
'lock_class': lock_class,
'library': library,
'client_class': client_class,
'default_args': default_args,
'default_kwargs': default_kwargs,
})
valid_backends = list(self._valid_backends)
valid_backends.append(getattr(self, name))
self._valid_backends = tuple(valid_backends)
@property
def valid_backends(self):
'''
Return a tuple of valid backends.
:returns: a list of valid supported backends
:rtype: tuple
'''
return self._valid_backends
def configure(**kwargs):
'''
Set basic global configuration for :mod:`sherlock`.
:param backend: global choice of backend. This backend will be used
for managing locks by :class:`sherlock.Lock` class
objects.
:param client: global client object to use to connect with backend
store. This client object will be used to connect to the
backend store by :class:`sherlock.Lock` class instances.
The client object must be a valid object of the client
library. If the backend has been configured using the
`backend` parameter, the custom client object must belong
to the same library that is supported for that backend.
If the backend has not been set, then the custom client
object must be an instance of a valid supported client.
In that case, :mod:`sherlock` will set the backend by
introspecting the type of provided client object.
:param str namespace: provide global namespace
:param float expire: provide global expiration time. If expicitly set to
`None`, lock will not expire.
:param float timeout: provide global timeout period
:param float retry_interval: provide global retry interval
Basic Usage:
>>> import sherlock
>>> from sherlock import Lock
>>>
>>> # Configure sherlock to use Redis as the backend and the timeout for
>>> # acquiring locks equal to 20 seconds.
>>> sherlock.configure(timeout=20, backend=sherlock.backends.REDIS)
>>>
>>> import redis
>>> redis_client = redis.StrictRedis(host='X.X.X.X', port=6379, db=1)
>>> sherlock.configure(client=redis_client)
'''
_configuration.update(**kwargs)
class _Configuration(object):
def __init__(self):
# Choice of backend
self._backend = None
# Client object to connect with the backend store
self._client = None
# Namespace to use for setting lock keys in the backend store
self.namespace = None
# Lock expiration time. If explicitly set to `None`, lock will not
# expire.
self.expire = 60
# Timeout to acquire lock
self.timeout = 10
# Retry interval to retry acquiring a lock if previous attempts failed
self.retry_interval = 0.1
@property
def backend(self):
return self._backend
@backend.setter
def backend(self, val):
if val not in backends.valid_backends:
backend_names = list(map(
lambda x: 'sherlock.backends.%s' % x['name'],
backends.valid_backends))
error_str = ', '.join(backend_names[:-1])
backend_names = '%s and %s' % (error_str,
backend_names[-1])
raise ValueError('Invalid backend. Valid backends are: '
'%s.' % backend_names)
self._backend = val
@property
def client(self):
if self._client is not None:
return self._client
else:
if self.backend is None:
raise ValueError('Cannot create a default client object when '
'backend is not configured.')
for backend in backends.valid_backends:
if self.backend == backend:
self.client = self.backend['client_class'](
*self.backend['default_args'],
**self.backend['default_kwargs'])
return self._client
@client.setter
def client(self, val):
# When backend is set, check client type
if self.backend is not None:
exc_msg = ('Only a client of the %s library can be used '
'when using %s as the backend store option.')
if isinstance(val, self.backend['client_class']):
self._client = val
else:
raise ValueError(exc_msg % (self.backend['library'],
self.backend['name']))
else:
for backend in backends.valid_backends:
if isinstance(val, backend['client_class']):
self._client = val
self.backend = backend
if self._client is None:
raise ValueError('The provided object is not a valid client'
'object. Client objects can only be '
'instances of redis library\'s client class, '
'python-etcd library\'s client class or '
'pylibmc library\'s client class.')
def update(self, **kwargs):
'''
Update configuration. Provide keyword arguments where the keyword
parameter is the configuration and its value (the argument) is the
value you intend to set.
:param backend: global choice of backend. This backend will be used
for managing locks.
:param client: global client object to use to connect with backend
store.
:param str namespace: optional global namespace to namespace lock keys
for your application in order to avoid conflicts.
:param float expire: set lock expiry time. If explicitly set to `None`,
lock will not expire.
:param float timeout: global timeout for acquiring a lock.
:param float retry_interval: global timeout for retrying to acquire the
lock if previous attempts failed.
'''
for key, val in kwargs.items():
if key not in dir(self):
raise AttributeError('Invalid configuration. No such '
'configuration as %s.' % key)
setattr(self, key, val)
# Create a backends singleton
backends = _Backends()
# Create a configuration singleton
_configuration = _Configuration()
# Import important Lock classes
from . import lock
from .lock import *
| 34.228628 | 100 | 0.606319 | 8,265 | 0.480049 | 0 | 0 | 2,561 | 0.148748 | 0 | 0 | 13,491 | 0.783586 |
c88d252547df6d3f79fae0aefc72512a6ebb61d4 | 7,199 | py | Python | misc.py | ChristophReich1996/Semantic_Pyramid_for_Image_Generation | 00e6e7787a5d90b9c09f50a5d7039cb9b5cd4509 | [
"MIT"
]
| 46 | 2020-04-13T07:54:49.000Z | 2022-03-01T06:29:15.000Z | misc.py | ChristophReich1996/Semantic_Pyramid_for_Image_Generation | 00e6e7787a5d90b9c09f50a5d7039cb9b5cd4509 | [
"MIT"
]
| 2 | 2020-07-27T15:11:09.000Z | 2021-04-04T10:58:03.000Z | misc.py | ChristophReich1996/Semantic_Pyramid_for_Image_Generation | 00e6e7787a5d90b9c09f50a5d7039cb9b5cd4509 | [
"MIT"
]
| 5 | 2020-06-22T01:56:30.000Z | 2021-12-22T04:34:49.000Z | from typing import List, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import random
from skimage.draw import random_shapes
import os
import json
def get_masks_for_training(
mask_shapes: List[Tuple] =
[(1, 128, 128), (1, 64, 64), (1, 32, 32), (1, 16, 16), (1, 8, 8), (4096,), (365,)],
device: str = 'cpu', add_batch_size: bool = False,
p_random_mask: float = 0.3) -> List[torch.Tensor]:
'''
Method returns random masks similar to 3.2. of the paper
:param mask_shapes: (List[Tuple]) Shapes of the features generated by the vgg16 model
:param device: (str) Device to store tensor masks
:param add_batch_size: (bool) If true a batch size is added to each mask
:param p_random_mask: (float) Probability that a random mask is generated else no mask is utilized
:return: (List[torch.Tensor]) Generated masks for each feature tensor
'''
# Select layer where no masking is used. Every output from the deeper layers get mapped out. Every higher layer gets
# masked by a random shape
selected_stage = random.choice(list(range(len(mask_shapes))) + [0, 1])
# Make masks
masks = []
# Apply spatial varying masks
spatial_varying_masks = (np.random.rand() < p_random_mask) \
and (selected_stage < (len(mask_shapes) - 1)) \
and (selected_stage > 0)
# Init random mask
if spatial_varying_masks:
random_mask = random_shapes(tuple(reversed(mask_shapes))[selected_stage + 1][1:],
min_shapes=1,
max_shapes=4,
min_size=min(8, tuple(reversed(mask_shapes))[selected_stage + 1][1] // 2),
allow_overlap=True)[0][:, :, 0]
# Random mask to torch tensor
random_mask = torch.tensor(random_mask, dtype=torch.float32, device=device)[None, :, :]
# Change range of mask to [0, 1]
random_mask = (random_mask == 255.0).float()
# Loop over all shapes
for index, mask_shape in enumerate(reversed(mask_shapes)):
# Case if spatial varying masks are applied after selected stage
if spatial_varying_masks:
if index == selected_stage:
masks.append(torch.ones(mask_shape, dtype=torch.float32, device=device))
elif index < selected_stage:
masks.append(torch.zeros(mask_shape, dtype=torch.float32, device=device))
else:
masks.append(F.interpolate(random_mask[None], size=mask_shape[1:], mode='nearest')[0])
# Case if only one stage is selected
else:
if index == selected_stage:
masks.append(torch.ones(mask_shape, dtype=torch.float32, device=device))
else:
masks.append(torch.zeros(mask_shape, dtype=torch.float32, device=device))
# Add batch size dimension
if add_batch_size:
for index in range(len(masks)):
masks[index] = masks[index].unsqueeze(dim=0)
# Reverse order of masks to match the features of the vgg16 model
masks.reverse()
return masks
def get_masks_for_validation(mask_shapes: Tuple[Tuple[int, int, int], ...] =
((1, 128, 128), (1, 64, 64), (1, 32, 32), (1, 16, 16), (1, 8, 8), (4096,),
(365,)), device: str = 'cpu', add_batch_size: bool = False) -> List[torch.Tensor]:
return get_masks_for_inference(stage_index_to_choose=random.choice(range(len(mask_shapes))),
mask_shapes=mask_shapes, device=device, add_batch_size=add_batch_size)
def get_masks_for_inference(stage_index_to_choose: int,
mask_shapes: Tuple[Tuple[int, int, int], ...] = (
(1, 128, 128), (1, 64, 64), (1, 32, 32), (1, 16, 16), (1, 8, 8), (4096,), (365,)),
device: str = 'cpu',
add_batch_size: bool = False) -> List[torch.Tensor]:
# Init list for masks
masks = []
# Loop over all shapes
for index, mask_shape in enumerate(reversed(mask_shapes)):
if index == stage_index_to_choose:
masks.append(torch.ones(mask_shape, dtype=torch.float32, device=device))
else:
masks.append(torch.zeros(mask_shape, dtype=torch.float32, device=device))
# Add batch size dimension
if add_batch_size:
for index in range(len(masks)):
masks[index] = masks[index].unsqueeze(dim=0)
# Reverse order of masks to match the features of the vgg16 model
masks.reverse()
return masks
def normalize_0_1_batch(input: torch.tensor) -> torch.tensor:
'''
Normalize a given tensor to a range of [-1, 1]
:param input: (Torch tensor) Input tensor
:return: (Torch tensor) Normalized output tensor
'''
input_flatten = input.view(input.shape[0], -1)
return ((input - torch.min(input_flatten, dim=1)[0][:, None, None, None]) / (
torch.max(input_flatten, dim=1)[0][:, None, None, None] -
torch.min(input_flatten, dim=1)[0][:, None, None, None]))
def normalize_m1_1_batch(input: torch.tensor) -> torch.tensor:
'''
Normalize a given tensor to a range of [-1, 1]
:param input: (Torch tensor) Input tensor
:return: (Torch tensor) Normalized output tensor
'''
input_flatten = input.view(input.shape[0], -1)
return 2 * ((input - torch.min(input_flatten, dim=1)[0][:, None, None, None]) / (
torch.max(input_flatten, dim=1)[0][:, None, None, None] -
torch.min(input_flatten, dim=1)[0][:, None, None, None])) - 1
class Logger(object):
"""
Class to log different metrics
"""
def __init__(self) -> None:
self.metrics = dict()
self.hyperparameter = dict()
def log(self, metric_name: str, value: float) -> None:
"""
Method writes a given metric value into a dict including list for every metric
:param metric_name: (str) Name of the metric
:param value: (float) Value of the metric
"""
if metric_name in self.metrics:
self.metrics[metric_name].append(value)
else:
self.metrics[metric_name] = [value]
def save_metrics(self, path: str) -> None:
"""
Static method to save dict of metrics
:param metrics: (Dict[str, List[float]]) Dict including metrics
:param path: (str) Path to save metrics
:param add_time_to_file_name: (bool) True if time has to be added to filename of every metric
"""
# Save dict of hyperparameter as json file
with open(os.path.join(path, 'hyperparameter.txt'), 'w') as json_file:
json.dump(self.hyperparameter, json_file)
# Iterate items in metrics dict
for metric_name, values in self.metrics.items():
# Convert list of values to torch tensor to use build in save method from torch
values = torch.tensor(values)
# Save values
torch.save(values, os.path.join(path, '{}.pt'.format(metric_name)))
| 44.99375 | 120 | 0.607862 | 1,461 | 0.202945 | 0 | 0 | 0 | 0 | 0 | 0 | 2,154 | 0.299208 |
c88f24e0c4f56b49a1514bbc5fcfcc00efd5e15c | 4,204 | py | Python | EasyMCDM/models/Irmo.py | qanastek/EasyMCDM | 7fa2e2dfe9397834ca9f50211ea2717a16785394 | [
"MIT"
]
| 4 | 2022-03-05T20:51:38.000Z | 2022-03-15T17:10:22.000Z | EasyMCDM/models/Irmo.py | qanastek/EasyMCDM | 7fa2e2dfe9397834ca9f50211ea2717a16785394 | [
"MIT"
]
| null | null | null | EasyMCDM/models/Irmo.py | qanastek/EasyMCDM | 7fa2e2dfe9397834ca9f50211ea2717a16785394 | [
"MIT"
]
| 1 | 2022-03-08T13:45:22.000Z | 2022-03-08T13:45:22.000Z | import math
from typing import Dict, List, Tuple, Union
from EasyMCDM.models.MCDM import MCDM
# Instant-Runoff Multicriteria Optimization (IRMO)
class Irmo(MCDM):
# Memory allocation
__slots__ = ['verbose', 'matrix', 'names', 'indexes', 'preferences', 'matrix']
# Constructor
def __init__(self, data : Union[str, dict], col_sep=',', row_sep='\n', verbose=True):
super().__init__(data, col_sep=col_sep, row_sep=row_sep, verbose=verbose)
# Read the lines of indexes
def get_indexes(self, path) -> List:
f = open(path,"r")
content = f.read()
f.close()
return [[int(i) for i in w.split(self.col_sep)] for w in content.split(self.row_sep) if len(w) > 0]
def __getVector(self, i, idx, banned, nbr_rounds):
items_lst = []
for s in self.matrix.keys():
# Check if already banned
if s not in banned:
insert_value = self.matrix[s][i]
else:
# Best item value
if (idx == nbr_rounds - 1 and self.preferences[idx] == "min") or (idx != nbr_rounds - 1 and self.preferences[idx] == "max"):
insert_value = math.inf
else:
insert_value = -math.inf
items_lst.append(insert_value)
return items_lst
# Compute
def __compute(self) -> Tuple[float, float]:
banned = []
# Check if the number of criteria is higher than the number of subjects else reduce the number of rounds
nbr_rounds = len(self.indexes) if len(self.indexes) <= len(self.matrix.keys()) else len(self.matrix.keys())
# For each criteria
for idx, i in enumerate(self.indexes):
# Values for the subjects left
items_vec = self.__getVector(i, idx, banned, nbr_rounds)
# Best item value
if (idx == nbr_rounds - 1 and self.preferences[idx] == "min") or (idx != nbr_rounds - 1 and self.preferences[idx] == "max"):
value = min(items_vec)
else:
value = max(items_vec)
# Worst item index
item_idx = items_vec.index(value)
item_name = list(self.matrix.keys())[item_idx]
# Ban Worst item
banned.append(item_name)
# Reverse the rank
banned.reverse()
return {
"best": banned[0], # Return best
"eleminated": banned
}
# Solve the problem
def solve(
self,
indexes : Union[str, list],
prefs : Union[str, List[str]],
indexes_idx = 0
) -> Dict:
# Define the indexes of the attributes
if type(indexes) == str:
self.indexes = self.get_indexes(indexes)[indexes_idx]
elif type(indexes) == list:
self.indexes = indexes
# Check if the lengths matches togethers
assert len(self.indexes) <= self.constraints_length, '\033[91m' + "The number of indexes as a variable length, please give a consistent length with the matrix constraints !" + '\033[0m'
# Check variable types
assert all(isinstance(e, (int)) for e in self.indexes), '\033[91m' + "The indexes as variable types, please give only integers !" + '\033[0m'
# Get preferences
if type(prefs) == str:
self.preferences = self.get_preferences(prefs)
elif type(prefs) == list:
self.preferences = prefs
# Check if has preferences other than max and min
assert all([a in ['max', 'min'] for a in sorted(list(set(self.preferences)))]), '\033[91m' + "The preferences need to containt only min and max. Found : " + str(sorted(list(set(self.preferences)))) + '\033[0m'
# Check if the lengths matches togethers
assert len(self.preferences) == len(self.indexes), '\033[91m' + "The number of preferences as a variable length, please give a consistent length with the indexes !" + '\033[0m'
return self.__compute() | 37.873874 | 218 | 0.562559 | 4,051 | 0.963606 | 0 | 0 | 0 | 0 | 0 | 0 | 1,159 | 0.27569 |
c8919966f9b0c8cb69e17d80a649cb9b3d0b7138 | 2,046 | py | Python | ramp/estimators/r.py | kvh/ramp | 8618ce673e49b95f40c9659319c3cb72281dacac | [
"MIT"
]
| 214 | 2015-01-01T07:42:25.000Z | 2022-03-08T08:57:49.000Z | ramp/estimators/r.py | Marigold/ramp | f9ddea84bc3b5097c0ddb8a3f71a0fce1775ba76 | [
"MIT"
]
| 8 | 2020-05-19T20:15:40.000Z | 2020-05-19T20:15:41.000Z | ramp/estimators/r.py | Marigold/ramp | f9ddea84bc3b5097c0ddb8a3f71a0fce1775ba76 | [
"MIT"
]
| 87 | 2015-01-13T19:25:15.000Z | 2021-05-16T10:40:05.000Z | import numpy as np
from rpy2.robjects import FloatVector
from rpy2.robjects.packages import importr
from rpy2 import robjects
stats = importr('stats')
base = importr('base')
def matrix_to_r_dataframe(x):
rx = FloatVector(np.ravel(x))
rx = robjects.r['matrix'](rx, nrow = len(x), byrow=True)
return robjects.r["data.frame"](rx)
class REstimator(object):
def __init__(self, r_estimator, **kwargs):
self.estimator = r_estimator
self.kwargs = kwargs
def fit(self, x, y):
rx = matrix_to_r_dataframe(x)
ry = FloatVector(y)
robjects.globalenv["y"] = ry
self.estimator_fit = self.estimator("y ~ .", data=rx,
**self.kwargs)
def predict(self, x):
rx = matrix_to_r_dataframe(x)
return np.array(stats.predict(self.estimator_fit, rx)[0])
class OrderedLogit(object):
def fit(self, x, y):
ordinal = importr('ordinal')
rx = matrix_to_r_dataframe(x)
self.levels = range(int(round(min(y))), int(round(max(y)))+1)
ry = base.factor(FloatVector(y), levels=self.levels, ordered=True)
robjects.globalenv["y"] = ry
self.clmfit = ordinal.clm("y ~ .", data=rx)
#print base.summary(self.clmfit)
def predict(self, x):
rx = matrix_to_r_dataframe(x)
rfac = stats.predict(self.clmfit, rx, type="class")[0]
rvec = [self.levels[v - 1] for v in rfac]
return rvec
class WeightedLM(object):
def fit(self, x, y, weights):
rx = matrix_to_r_dataframe(x)
ry = FloatVector(y)
rw = FloatVector(weights)
robjects.globalenv["score"] = ry
self.lmfit = stats.lm("score ~ .", data=rx, weights=rw)
#print base.summary(self.clmfit)
def predict(self, x):
rx = matrix_to_r_dataframe(x)
rvec = stats.predict(self.lmfit, rx)[0]
return np.array(rvec)
class GBM(REstimator):
def __init__(self, **kwargs):
gbm = importr('gbm')
super(GBM, self).__init__(gbm.gbm, **kwargs)
| 29.652174 | 74 | 0.610948 | 1,680 | 0.821114 | 0 | 0 | 0 | 0 | 0 | 0 | 156 | 0.076246 |
c89234777cdd2b2357d8a397dcec12fefab43a56 | 1,138 | py | Python | tests/decorators/test_timer.py | ShaneMicro/azure-functions-python-library | f56564effbf291a27e1bd5751a38484af387bb7f | [
"MIT"
]
| null | null | null | tests/decorators/test_timer.py | ShaneMicro/azure-functions-python-library | f56564effbf291a27e1bd5751a38484af387bb7f | [
"MIT"
]
| 1 | 2022-03-02T11:49:02.000Z | 2022-03-02T11:49:02.000Z | tests/decorators/test_timer.py | ShaneMicro/azure-functions-python-library | f56564effbf291a27e1bd5751a38484af387bb7f | [
"MIT"
]
| null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import unittest
from azure.functions.decorators.constants import TIMER_TRIGGER
from azure.functions.decorators.core import BindingDirection, DataType
from azure.functions.decorators.timer import TimerTrigger
class TestTimer(unittest.TestCase):
def test_timer_trigger_valid_creation(self):
trigger = TimerTrigger(name="req",
schedule="dummy_schedule",
data_type=DataType.UNDEFINED,
run_on_startup=False,
use_monitor=False,
dummy_field="dummy")
self.assertEqual(trigger.get_binding_name(), "timerTrigger")
self.assertEqual(trigger.get_dict_repr(), {
"type": TIMER_TRIGGER,
"direction": BindingDirection.IN,
'dummyField': 'dummy',
"name": "req",
"dataType": DataType.UNDEFINED,
"schedule": "dummy_schedule",
"runOnStartup": False,
"useMonitor": False
})
| 37.933333 | 70 | 0.598418 | 830 | 0.72935 | 0 | 0 | 0 | 0 | 0 | 0 | 245 | 0.21529 |
c895e6b35498811fbcaa8204ceff2eff7744a4b3 | 8,368 | py | Python | src/client.py | Da3dalu2/SimpleNetworkSimulator | 447bc099b35720ab8d6e8a9703bb2354f1f01cae | [
"MIT"
]
| null | null | null | src/client.py | Da3dalu2/SimpleNetworkSimulator | 447bc099b35720ab8d6e8a9703bb2354f1f01cae | [
"MIT"
]
| null | null | null | src/client.py | Da3dalu2/SimpleNetworkSimulator | 447bc099b35720ab8d6e8a9703bb2354f1f01cae | [
"MIT"
]
| null | null | null | import socket
import threading
import time
from threading import Thread
import utilities as utils
import error_handling as check
BUFFER_SIZE = 1024
BROADCAST_MAC = "FF:FF:FF:FF:FF:FF"
class ClientThread(threading.Thread):
"""
Initializes the client.
The event synchronization primitive, among the initialization parameters
given, is used to guarantee that the client will send its message when
the router is actually listening.
"""
def __init__(self, init_params):
self.connected = False
self.clients_threads = init_params["clients_threads"]
self.arp_table_mac = init_params["arp_table_mac"]
self.client_data = init_params["client_data"]
self.client_id = init_params["client_id"]
self.router_thread = init_params["router_thread"]
self.router_id = init_params["router_id"]
self.sync_event_message = init_params["sync_event_message"]
self.sync_event_connection = init_params["sync_event_connection"]
self.stop_event = threading.Event()
self.sleep_time = 1.0
port = self.client_data["port"]
address = ("localhost", port)
self.client_connection = check.socket_create(
address,
backlog = 0,
timeout = 3,
reuse_address = True
)
threading.Thread.__init__(self, name=self.client_id)
"""
Client main loop.
Listens for messages from the network.
"""
def run(self):
utils.show_status(self.getName(), "starting")
connected = self.go_online()
if(connected is True):
utils.show_status(self.client_id, "listening for incoming packets")
while not self.stop_event.isSet():
self.listen_packets()
# exit procedure
utils.show_status(self.client_id, "going offline")
utils.show_status(self.client_id, "closing connection")
self.client_connection.close()
self.stop_event.clear()
del self.clients_threads[self.client_id]
"""
Tells the client to exit from its main loop.
It goes offline thus closing its connection to the network.
"""
def join(self, timeout=None):
self.stop_event.set()
threading.Thread.join(self, timeout)
"""
Tells the router of its network to start listening for a message
from this client.
"""
def notify_incoming_message(self):
msg = " ".join(["notifying", self.router_id, "of an incoming message"])
utils.show_status(self.client_id, msg)
my_ip_address = self.client_data["ip_address"]
listen_task = threading.Thread(
target=self.router_thread.listen_client_side,
args=[my_ip_address],
daemon=True
)
listen_task.start()
"""
Tells the router of its network to start listening for a connection
from this client.
"""
def notify_incoming_connection(self):
msg = " ".join(["notifying",self.router_id, \
"of an incoming connection"])
utils.show_status(self.client_id, msg)
listen_task = threading.Thread(
target=self.router_thread.listen_connections_client_side,
daemon=True
)
listen_task.start()
"""
Sends packets to other clients.
"""
def send_message(self, recipient_ip, message):
gateway_ip = self.client_data["gateway_ip"]
packet = utils.write_packet(
self.client_data["ip_address"],
recipient_ip,
self.client_data.get("mac_address"),
self.arp_table_mac[gateway_ip],
message
)
utils.show_status(
self.client_id,
"waiting for router listening messages"
)
self.notify_incoming_message()
# waiting for router approving message sending
self.sync_event_message.wait()
sent = check.socket_send(self.client_connection, packet, self.router_id)
if(sent is True):
msg = " ".join(["message sent to", gateway_ip])
utils.show_status(self.client_id, msg)
self.sync_event_message.clear()
"""
Sends a special packet to notify the server it is currently online.
Returns false if the connection was not established or the packet could not
be sent (in the latter case the server will not recognize the client as
online, hence the action go_online is considered failed even if a connection
has been created)
"""
def go_online(self):
utils.show_status(self.client_id, "connecting to the network")
server_ip = self.client_data["server_ip"]
router_port = self.client_data["gateway_port"]
router_address = ("localhost", router_port)
gateway_ip = self.client_data["gateway_ip"]
self.notify_incoming_connection()
# waiting for router approving connection
self.sync_event_connection.wait()
self.sync_event_connection.clear() # ready for reuse
connected = check.socket_connect(
self.client_connection,
router_address,
self.client_id
)
if(connected is True):
utils.show_status(self.client_id, "going online")
# waiting for router completing connection procedure
self.sync_event_connection.wait()
self.sync_event_connection.clear() # ready for reuse
greeting_packet = utils.write_packet(
self.client_data.get("ip_address"),
server_ip,
self.client_data.get("mac_address"),
self.arp_table_mac[gateway_ip],
"{going online}"
)
utils.show_status(
self.client_id,
"waiting for router accepting message"
)
self.notify_incoming_message()
# waiting for router approving message sending
self.sync_event_message.wait()
self.sync_event_message.clear()
check.socket_send(
self.client_connection,
greeting_packet,
self.client_id,
"Greeting packet could not be sent"
)
return connected
"""
Sends a special packet to notify the server it is currently offline.
Then closes its connection to the network.
"""
def go_offline(self):
utils.show_status(self.client_id, "going offline")
gateway_ip = self.client_data["gateway_ip"]
server_ip = self.client_data["server_ip"]
leave_packet = utils.write_packet(
self.client_data.get("ip_address"),
server_ip,
self.client_data.get("mac_address"),
self.arp_table_mac[gateway_ip],
"{going offline}"
)
self.notify_incoming_message()
self.sync_event_message.wait() # wait for router approval
self.sync_event_message.clear()
check.socket_send(
self.client_connection,
leave_packet,
self.client_id,
"Leave packet could not be sent"
)
self.join()
"""
Listens for packets from the server.
"""
def listen_packets(self):
received_message = check.socket_recv(
self.client_connection,
self.client_id
)
if(received_message is not None and len(received_message) > 0):
parsed_message = utils.read_packet(received_message)
time.sleep(2) # give time to router to show its status
msg = " ".join(["message received from:",
parsed_message["source_ip"]])
utils.show_status(self.client_id, msg)
utils.report(
self.client_id,
parsed_message,
"reading received packet"
)
if(parsed_message["destination_mac"] == BROADCAST_MAC):
msg = " ".join(["received an ARP request from",
parsed_message["source_ip"]])
utils.show_status(self.client_id, msg)
self.send_message(
parsed_message.get("source_ip"),
"{ARP reply}"
)
| 32.30888 | 80 | 0.603848 | 8,181 | 0.977653 | 0 | 0 | 0 | 0 | 0 | 0 | 2,403 | 0.287165 |
c8962401f6f771809773c10b2765a3a3a3c92f1b | 2,569 | py | Python | great_expectations/rule_based_profiler/types/builder.py | afeld/great_expectations | ca2dc1f8951c727040d680b543aee91753c2c862 | [
"Apache-2.0"
]
| 1 | 2022-01-26T18:51:29.000Z | 2022-01-26T18:51:29.000Z | great_expectations/rule_based_profiler/types/builder.py | afeld/great_expectations | ca2dc1f8951c727040d680b543aee91753c2c862 | [
"Apache-2.0"
]
| null | null | null | great_expectations/rule_based_profiler/types/builder.py | afeld/great_expectations | ca2dc1f8951c727040d680b543aee91753c2c862 | [
"Apache-2.0"
]
| 1 | 2021-11-29T07:37:28.000Z | 2021-11-29T07:37:28.000Z | import json
from great_expectations.core.util import convert_to_json_serializable
from great_expectations.types import SerializableDictDot, safe_deep_copy
from great_expectations.util import deep_filter_properties_iterable
class Builder(SerializableDictDot):
"""
A Builder provides methods to serialize any builder object of a rule generically.
"""
def to_json_dict(self) -> dict:
"""
# TODO: <Alex>2/4/2022</Alex>
This implementation of "SerializableDictDot.to_json_dict() occurs frequently and should ideally serve as the
reference implementation in the "SerializableDictDot" class itself. However, the circular import dependencies,
due to the location of the "great_expectations/types/__init__.py" and "great_expectations/core/util.py" modules
make this refactoring infeasible at the present time.
"""
dict_obj: dict = self.to_dict()
serializeable_dict: dict = convert_to_json_serializable(data=dict_obj)
return serializeable_dict
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for key, value in self.to_raw_dict().items():
value_copy = safe_deep_copy(data=value, memo=memo)
setattr(result, key, value_copy)
return result
def __repr__(self) -> str:
"""
# TODO: <Alex>2/4/2022</Alex>
This implementation of a custom "__repr__()" occurs frequently and should ideally serve as the reference
implementation in the "SerializableDictDot" class. However, the circular import dependencies, due to the
location of the "great_expectations/types/__init__.py" and "great_expectations/core/util.py" modules make this
refactoring infeasible at the present time.
"""
json_dict: dict = self.to_json_dict()
deep_filter_properties_iterable(
properties=json_dict,
inplace=True,
)
return json.dumps(json_dict, indent=2)
def __str__(self) -> str:
"""
# TODO: <Alex>2/4/2022</Alex>
This implementation of a custom "__str__()" occurs frequently and should ideally serve as the reference
implementation in the "SerializableDictDot" class. However, the circular import dependencies, due to the
location of the "great_expectations/types/__init__.py" and "great_expectations/core/util.py" modules make this
refactoring infeasible at the present time.
"""
return self.__repr__()
| 42.114754 | 119 | 0.69093 | 2,342 | 0.911639 | 0 | 0 | 0 | 0 | 0 | 0 | 1,470 | 0.572207 |
c8963aa9c2fd19d072617ac3bc9699a61aa29633 | 449 | py | Python | Day_3_Boolean_Logic_Conditionals/Day3_ex1_RL.py | lenovreg/Python_TietoEvry_Feb2022 | 1e37f524c1b78bb9752500261a953b812fc697db | [
"MIT"
]
| null | null | null | Day_3_Boolean_Logic_Conditionals/Day3_ex1_RL.py | lenovreg/Python_TietoEvry_Feb2022 | 1e37f524c1b78bb9752500261a953b812fc697db | [
"MIT"
]
| null | null | null | Day_3_Boolean_Logic_Conditionals/Day3_ex1_RL.py | lenovreg/Python_TietoEvry_Feb2022 | 1e37f524c1b78bb9752500261a953b812fc697db | [
"MIT"
]
| null | null | null | # #1. Health check
# # Ask user for their temperature.
# # If the user enters below 35, then output "not too cold"
# # If 35 to 37 (inclusive), output "all right"
# # If the temperature over 37, then output "possible fever"
#
user_temp = float(input('What is your temperature?'))
if user_temp < 35:
print('not too cold?')
elif user_temp >= 35 and user_temp <= 37:
print('all right')
else: # temperature over 37
print('possible fever')
| 32.071429 | 62 | 0.679287 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 312 | 0.694878 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.