max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
main.py | Pommers/LCExtract | 0 | 12792951 | <filename>main.py
from src.LCExtract.LCExtract import LCExtract
if __name__ == '__main__':
LCExtract()
| 1.03125 | 1 |
import_export_google_civic/migrations/0001_initial.py | adborden/WeVoteBase | 0 | 12792952 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='GoogleCivicCandidateCampaign',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=254, verbose_name=b'google civic candidate name')),
('party', models.CharField(max_length=254, null=True, verbose_name=b'google civic party', blank=True)),
('photo_url', models.CharField(max_length=254, null=True, verbose_name=b'google civic photoUrl', blank=True)),
('order_on_ballot', models.CharField(max_length=254, null=True, verbose_name=b'google civic order on ballot', blank=True)),
('google_civic_contest_office_id', models.CharField(max_length=254, verbose_name=b'google civic internal temp contest_office_id id')),
('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote contest_office_id id', blank=True)),
('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google election id')),
('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)),
('we_vote_candidate_campaign_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote candidate campaign id', blank=True)),
('we_vote_politician_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote politician id', blank=True)),
('candidate_url', models.URLField(null=True, verbose_name=b'website url of candidate campaign', blank=True)),
('facebook_url', models.URLField(null=True, verbose_name=b'facebook url of candidate campaign', blank=True)),
('twitter_url', models.URLField(null=True, verbose_name=b'twitter url of candidate campaign', blank=True)),
('google_plus_url', models.URLField(null=True, verbose_name=b'google plus url of candidate campaign', blank=True)),
('youtube_url', models.URLField(null=True, verbose_name=b'youtube url of candidate campaign', blank=True)),
('email', models.CharField(max_length=254, null=True, verbose_name=b'google civic candidate campaign email', blank=True)),
('phone', models.CharField(max_length=254, null=True, verbose_name=b'google civic candidate campaign email', blank=True)),
('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')),
],
),
migrations.CreateModel(
name='GoogleCivicContestOffice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('office', models.CharField(max_length=254, verbose_name=b'google civic office')),
('google_civic_election_id', models.CharField(max_length=254, null=True, verbose_name=b'google civic election id', blank=True)),
('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)),
('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote contest office id', blank=True)),
('number_voting_for', models.CharField(max_length=254, null=True, verbose_name=b'google civic number of candidates to vote for', blank=True)),
('number_elected', models.CharField(max_length=254, null=True, verbose_name=b'google civic number of candidates who will be elected', blank=True)),
('contest_level0', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 0', blank=True)),
('contest_level1', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 1', blank=True)),
('contest_level2', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 2', blank=True)),
('ballot_placement', models.CharField(max_length=254, null=True, verbose_name=b'google civic ballot placement', blank=True)),
('primary_party', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)),
('district_name', models.CharField(max_length=254, verbose_name=b'google civic district name')),
('district_scope', models.CharField(max_length=254, verbose_name=b'google civic district scope')),
('district_ocd_id', models.CharField(max_length=254, verbose_name=b'google civic district ocd id')),
('electorate_specifications', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)),
('special', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)),
('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')),
],
),
migrations.CreateModel(
name='GoogleCivicContestReferendum',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('referendum_title', models.CharField(max_length=254, verbose_name=b'google civic referendum title')),
('referendum_subtitle', models.CharField(max_length=254, verbose_name=b'google civic referendum subtitle')),
('referendum_url', models.CharField(max_length=254, null=True, verbose_name=b'google civic referendum details url')),
('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google civic election id')),
('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)),
('ballot_placement', models.CharField(max_length=254, null=True, verbose_name=b'google civic ballot placement', blank=True)),
('primary_party', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)),
('district_name', models.CharField(max_length=254, verbose_name=b'google civic district name')),
('district_scope', models.CharField(max_length=254, verbose_name=b'google civic district scope')),
('district_ocd_id', models.CharField(max_length=254, verbose_name=b'google civic district ocd id')),
('electorate_specifications', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)),
('special', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)),
('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')),
],
),
migrations.CreateModel(
name='GoogleCivicElection',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('google_civic_election_id', models.CharField(unique=True, max_length=20, verbose_name=b'google civic election id')),
('we_vote_election_id', models.CharField(max_length=20, unique=True, null=True, verbose_name=b'we vote election id', blank=True)),
('name', models.CharField(max_length=254, verbose_name=b'google civic election name')),
('election_day', models.CharField(max_length=254, verbose_name=b'google civic election day')),
('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')),
],
),
]
| 1.695313 | 2 |
aliyun_rocketmq_provider/__init__.py | Ed-XCF/airflow-provider-rocketmq | 0 | 12792953 | def get_provider_info():
return {
"package-name": "airflow-providers-aliyun-rocketmq",
"name": "Aliyun RocketMQ Airflow Provider",
"description": "Airflow provider for aliyun rocketmq",
"hook-class-names": ["aliyun_rocketmq_provider.hooks.aliyun_rocketmq.AliyunRocketMQHook"],
"versions": ["0.1.2"]
}
| 1.601563 | 2 |
rrs_cleaner.py | mwweinberg/red_raid_squeakquel | 0 | 12792954 | from bs4 import BeautifulSoup
import urllib
#sets the URLs
h1 = "test_eng.html"
h2 = "test2_eng.html"
# need to either figure out how to skip "None" results or turn search_all into a string
def headliner(url):
soup = BeautifulSoup((open(url)), "lxml")
head1 = soup.find_all(['h1','h2','h3'])
head2 = soup.h2.string
head3 = soup.h3.string
print head1
print head1[0].get_text()
#print head1[1].get_text()
#print head2[2].get_text()
#print head2
#print head3
print ""
headliner(h1)
| 3.25 | 3 |
setup.py | test-room-7/alias | 2 | 12792955 | <filename>setup.py
import os
import sys
from setuptools import setup
from setuptools.command.install import install
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))
from alias import ALIASES_DIR_VAR # noqa: E402
from alias import get_aliases_dir # noqa: E402
def set_env_var(var, value):
os.system(f'setx {var} "{value}"')
class PostInstallCommand(install):
"""Post-installation for installation mode."""
def run(self):
install.run(self)
if ALIASES_DIR_VAR not in os.environ:
aliases_dir = get_aliases_dir()
set_env_var(ALIASES_DIR_VAR, aliases_dir)
setup(
cmdclass={
'install': PostInstallCommand,
}
)
| 2.203125 | 2 |
test/client/network/test_get_public_ip.py | redmic-project/device-oag-buoy-buoy-client | 0 | 12792956 | import unittest
from unittest.mock import patch
from nose.tools import eq_
from buoy.client.network import ip
class MockResponse:
def __init__(self, **kwargs):
self.content = str.encode(kwargs.pop('content', ""))
self.status_code = kwargs.pop('status_code', 404)
class TestPublicIP(unittest.TestCase):
def setUp(self):
self.services = ['http://icanhazip.com', 'http://ipv4bot.whatismyipaddress.com',
'https://api.ipify1.org', 'http://ip.42.pl/raw']
@patch.object(ip, 'get')
def test_get_public_ip_return_ip_in_last_service(self, mock_method):
service_ok = self.services[-1]
max_attempts = len(self.services)
ip_expected = "172.16.58.3"
def mocked_requests_get(*args, **kwargs):
mock_resp = MockResponse()
if args[0] == service_ok:
mock_resp = MockResponse(content=ip_expected, status_code=200)
return mock_resp
mock_method.side_effect = mocked_requests_get
eq_(ip_expected, ip.get_public_ip(services=self.services))
eq_(mock_method.call_count, max_attempts)
@patch.object(ip, 'get')
def test_get_public_ip_return_ip_in_first_service(self, mock_method):
service_ok = self.services[0]
max_attempts = 1
ip_expected = "172.16.58.3"
def mocked_requests_get(*args, **kwargs):
mock_resp = MockResponse()
if args[0] == service_ok:
mock_resp = MockResponse(content=ip_expected, status_code=200)
return mock_resp
mock_method.side_effect = mocked_requests_get
eq_(ip_expected, ip.get_public_ip(services=self.services))
eq_(mock_method.call_count, max_attempts)
@patch.object(ip, 'get')
def test_get_public_ip_return_exception(self, mock_method):
max_attempts = len(self.services)
def mocked_requests_get(*args, **kwargs):
return MockResponse()
mock_method.side_effect = mocked_requests_get
self.assertRaises(ip.NoIPException, ip.get_public_ip, services=self.services)
eq_(mock_method.call_count, max_attempts)
if __name__ == '__main__':
unittest.main()
| 2.703125 | 3 |
data_handler/PostDataGenerator/InputEstimators/InputEstimationVisualizer.py | muratcancicek/pointer_head | 0 | 12792957 | <reponame>muratcancicek/pointer_head
from .MappingFunctions import Boundary, StaticMapping, DynamicMapping
from datetime import datetime
from ... import Paths
import numpy as np
import cv2
import os
class InputEstimationVisualizer(object):
def __init__(self, sceneScale = 1,
landmarkColorStr = '#00ff00', screenColorStr = '#0000ff'):
super()
self._size = (1280, 720)
def addBox(self, frame, pPts):
color = (255, 255, 255)
cv2.polylines(frame, [pPts], True, color, 2, cv2.LINE_AA)
if len(pPts) > 4:
_pPts = []
for start, end in [(1,6), (2, 7), (3, 8)]:
p = (tuple(pPts[start]), tuple(pPts[end]))
_pPts.append(p)
for start, end in _pPts:
cv2.line(frame, start, end, color, 2, cv2.LINE_AA)
return frame
def addLandmarks(self, frame, landmarks, c = (255, 0, 0)):
for i, (x, y) in enumerate(landmarks):
#if not i in [39, 42]:
# continue
cv2.circle(frame, (x, y), 6, c, -1, cv2.LINE_AA)
return frame
def addPointer(self, frame, outputValues):
#boundaries = self._mappingFunc.getOutputBoundaries()
outputSize = (1920, 1080)
outputValues[0] = outputSize[0] - outputValues[0]
boundaries = Boundary(0, outputSize[0], 0, outputSize[1])
(height, width, depth) = frame.shape
(xRange, yRange, _) = boundaries.getRanges()
if xRange != width or yRange != height:
xRange, yRange = boundaries.getVolumeAbsRatio(outputValues)
x, y = int(xRange*width), int(yRange*height)
else:
x, y = outputValues[:2].astype(int)
cv2.circle(frame, (x, y), 1, (0, 0, 235), 56, cv2.LINE_AA)
return frame
def addAllInputs(self, frame, pPts = None,
landmarks = None, outputValues = None):
if not landmarks is None:
frame = self.addLandmarks(frame, landmarks.astype(int))
if not pPts is None:
frame = self.addBox(frame, pPts.astype(int))
if not outputValues is None:
frame = self.addPointer(frame, outputValues.astype(int))
return frame
def showFrame(self, frame, delay = 1):
cv2.imshow('frame', frame)
k = cv2.waitKey(delay)
if k == 27 or k == ord('q'):
return False
else:
return True
def __addText(self, frame, text, pos, color, largeScale = True):
if largeScale:
cv2.putText(frame, text, pos,
cv2.FONT_HERSHEY_SIMPLEX, 2, color, thickness=8)
else:
cv2.putText(frame, text, pos,
cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, thickness=1)
return frame
def _addValuesLineByLine(self, frame, values, labels,
position, colors, largeScale = True):
for v, l, c in zip(values, labels, colors):
text = "{:s}: {:7.2f}".format(l, float(v))
frame = self.__addText(frame, text, position, c, largeScale)
position = (position[0], position[1]+(70 if largeScale else 30))
return frame
def _addValues(self, inputValues, frame,
pos = (20, 60), prefix = '', largeScale = True):
labels = [prefix+l for l in ['X', 'Y', 'Z']]
g = 0 if largeScale else 200
colors = ((0, 0, 255), (0, 255, 0), (255, g, 0))
return self._addValuesLineByLine(frame, inputValues, labels,
pos, colors, largeScale)
def _addMeasurements(self, inputValues, pose, frame, largeScale = True):
initialPos, gap = ((20, 60), 200) if largeScale else ((20, 30), 90)
frame = self._addValues(pose[:3], frame, pos = initialPos,
prefix = 'Pos', largeScale = largeScale)
initialPos = (initialPos[0], initialPos[1] + gap)
frame = self._addValues(pose[3:], frame, pos =initialPos,
prefix = 'Or', largeScale = largeScale)
initialPos = (initialPos[0], initialPos[1] + gap)
frame = self._addValues(inputValues, frame, pos = initialPos,
prefix = 'Gaze', largeScale = largeScale)
return frame
def showFrameWithAllInputs(self, frame, pPts = None, landmarks = None,
outputValues = None, pose = None, delay = 1):
frame = self.addAllInputs(frame, pPts, landmarks, outputValues)
h, w, _ = frame.shape
cv2.line(frame, (0, int(h/2)), (w, int(h/2)), (0,0,0), 5)
cv2.line(frame, (int(w/2), 0), (int(w/2), h), (0,0,0), 5)
if not pose is None:
frame = self._addMeasurements(outputValues, pose, frame)
return self.showFrame(frame, delay)
def playSubjectVideoWithLandmarks(self, estimator, streamer):
self._estimator = estimator
for frame in streamer:
landmarks = estimator.detectFacialLandmarks(frame)
k = self.showFrameWithAllInputs(frame, landmarks = landmarks)
if not k:
break
return
def playSubjectVideoWithAllInputs(self, estimator, streamer):
self._estimator = estimator
for frame in streamer:
annotations = \
estimator.estimateInputValuesWithAnnotations(frame)
pose = estimator.getHeadPose()
inputValues, pPts, landmarks = annotations
k = self.showFrameWithAllInputs(frame, pPts,
landmarks, inputValues, pose)
if not k:
break
return
def replaySubjectVideoWithPostData(self, postData, streamer):
if not isinstance(postData, tuple):
jointStreamer = zip(postData, streamer)
for landmarks, frame in jointStreamer:
k = self.showFrameWithAllInputs(frame, landmarks = landmarks)
if not k:
break
return
else:
jointStreamer = zip(*(postData + (streamer,)))
for headGaze, pose, landmarks, pPts, frame in jointStreamer:
k = self.showFrameWithAllInputs(frame, pPts,
landmarks, headGaze, pose)
if not k:
break
return
def __initializeRecorder(self, id, trailName, fps = 30, dims = (1280, 720)):
fourcc = cv2.VideoWriter_fourcc(*'MP42')
dir = Paths.MergedVideosFolder + ('%s%s' % (id, Paths.sep))
if not os.path.isdir(dir):
os.makedirs(dir, exist_ok = True)
now = str(datetime.now())[:-7].replace(':', '-').replace(' ', '_')
recordName = trailName + '_%s_%s_WithAllInput.avi' % (id, now)
print(dir + recordName, 'will be written')
print(dims)
return cv2.VideoWriter(dir + recordName, fourcc, fps, dims)
def recordSubjectVideoWithPostdata(self, postData, id, trailName, streamer):
recorder = self.__initializeRecorder(id, trailName, dims = (1280, 720))
print(self._size)
if not isinstance(postData, tuple):
jointStreamer = zip(postData, streamer)
for landmarks, frame in jointStreamer:
frame = self.addAllInputs(frame, landmarks = landmarks)
k = self.showFrame(frame)
if not k:
recorder.release()
break
recorder.write(frame.astype(np.uint8))
else:
jointStreamer = zip(*(postData + (streamer,)))
for headGaze, pose, landmarks, pPts, frame in jointStreamer:
frame = self.addAllInputs(frame, pPts, landmarks, outputValues)
k = self.showFrame(frame)
if not k:
recorder.release()
break
recorder.write(frame.astype(np.uint8))
recorder.release()
return
| 2 | 2 |
projects/Alleria/alleria/config.py | sm047/detectron2 | 5 | 12792958 | <reponame>sm047/detectron2<gh_stars>1-10
#!/usr/bin/env python3
# @Time : 4/6/20 5:47 PM
# @Author : fangcheng.ji
# @FileName: config.py
from detectron2.config import CfgNode as CN
def add_alleria_config(cfg):
_C = cfg
# ---------------------------------------------------------------------------- #
# Data Augmentation
# ---------------------------------------------------------------------------- #
# mosaic augmentation
_C.DATALOADER.MOSAIC_PROB = 0.33
# mix-up augmentation
_C.DATALOADER.MIXUP_PROB = 0.34
_C.DATALOADER.CUTOUT_PROB = 0.0
# output prediction and ground truth to image
_C.TEST.VISUAL_OUTPUT = False
_C.TEST.AUG.NMS_TH = 0.6
# ensemble multi model
_C.TEST.ENSEMBLE = CN()
_C.TEST.ENSEMBLE.ENABLED = False
_C.TEST.ENSEMBLE.NUM = 2
_C.TEST.ENSEMBLE.CONFIGS = ("",)
| 1.515625 | 2 |
NYK.py | innovationgarage/shipdash | 0 | 12792959 | import json
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None
from sklearn.preprocessing import Imputer, StandardScaler
import DataSource
import os.path
class NYK(DataSource.DataSource):
def __init__(self, app, dsrc_name='', dsrc_type='csv', dsrc_path='data/', file_name='', header_rows=None, date_cols=None, skip_rows=None, lat1=None, long1=None, lat2=None, long2=None):
DataSource.DataSource.__init__(self, app, dsrc_name)
self.dsrc_type = dsrc_type
self.dsrc_path = dsrc_path
self.file_name = file_name
self.header_rows = header_rows
self.date_cols = date_cols
self.skip_rows = skip_rows
self.lat1 = lat1
self.long1 = long1
self.lat2 = lat2
self.long2 = long2
self.read_prepare_data()
self.init_dsrc()
"""These methods are fine-tuned for the current data sets. I need to
generalize them once I know more about different types of data coming
in"""
@classmethod
def clean(cls, df, name):
"""Find all empty space or all NaN columns and drops them from the DataFrame"""
df.replace(r'\s+', np.nan, regex=True, inplace=True)
df.replace(r'-', np.nan, regex=True, inplace=True)
df.dropna(axis=1, how='all', inplace=True)
df.columns = [str(x) for x in df.columns]
df.reset_index(level=[0], inplace=True)
df.rename(columns={'index': 'ind'}, inplace=True)
"""This is to find coordinate columns etc. manually, because we don't
know anything about the structure of our data!"""
# df.to_csv('data/'+name+'_clean.csv')
return df
@classmethod
def scale_impute(cls, df, method):
"""Find float columns, impute their NaN values with 'method', and then min-max scale the column/feature"""
fill_NaN = Imputer(missing_values=np.nan, strategy=method, axis=1)
df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] = fill_NaN.fit_transform(
df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])]
)
scaler = StandardScaler()
df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] = scaler.fit_transform(
df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])]
)
return df
@classmethod
def convert_coordinate(cls, df, col_in, col_out):
"""Convert coordinates of the format [d]ddmm.mmm to [dd]d.ddd"""
##FIXME! This is assuming all coordinates are E and N
df[col_out] = (df[col_in]/100 - (df[col_in]/100).astype(int))*100.*0.0166666667 + (df[col_in]/100).astype(int)
return df
@classmethod
def wgs84_to_web_mercator(cls, df, lon, lat):
"""Convert decimal longitude/latitude to Web Mercator format"""
k = 6378137
df['wm%s'%lon] = df[lon] * (k * np.pi/180.0)
df['wm%s'%lat] = np.log(np.tan((90 + df[lat]) * np.pi/360.0)) * k
return df
def read_prepare_data(self):
"""Use all data tools above to deliver the final cleaned DataFrame"""
self.data = self.dsrc_types[self.dsrc_type](
os.path.join(self.dsrc_path, self.file_name),
header = self.header_rows,
parse_dates = self.date_cols,
skiprows = self.skip_rows,
error_bad_lines = False,
low_memory = False
)
self.data['timestamp2'] = pd.to_datetime(self.data[0])
self.data['timestamp1'] = pd.to_datetime(self.data[1])
self.clean(self.data, self.dsrc_name)
self.convert_coordinate(self.data, str(self.lat1), 'lat1')
self.convert_coordinate(self.data, str(self.long1), 'long1')
self.convert_coordinate(self.data, str(self.lat2), 'lat2')
self.convert_coordinate(self.data, str(self.long2), 'long2')
self.scale_impute(self.data, 'mean')
self.wgs84_to_web_mercator(self.data, 'long1', 'lat1')
self.wgs84_to_web_mercator(self.data, 'long2', 'lat2')
self.data['timestamp_date'] = self.data['timestamp1'].dt.strftime('%Y-%m-%d')
DataSource.DataSource.types['NYK'] = NYK
| 2.9375 | 3 |
complex-flock.py | jasonmpittman/100-days-of-alife-code | 0 | 12792960 | #!/usr/bin/env python3
# Created on 05/07/2018
# @author: <NAME>
# @license: MIT-license
# Purpose: example of multiple agent flocking behavior
# Explanation:
import pygame, sys, random, math
pygame.init()
stopped = False
window_height = 800
window_width = 600
black = (0,0,0)
white = (255,255,255)
class agent:
def __init__(self, x, y):
self.x = x
self.y = y
self.velocityX = 10
self.velocityY = 10
while not stopped:
ev = pygame.event.get()
for event in ev:
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
if event.type == pygame.QUIT:
running = False | 3.171875 | 3 |
autograd_minimize/torch_wrapper.py | brunorigal/autograd_minimize | 8 | 12792961 | import numpy as np
import torch
from .base_wrapper import BaseWrapper
from torch.autograd.functional import hvp, vhp, hessian
from typing import List, Tuple, Dict, Union, Callable
from torch import nn, Tensor
class TorchWrapper(BaseWrapper):
def __init__(self, func, precision='float32', hvp_type='vhp', device='cpu'):
self.func = func
# Not very clean...
if 'device' in dir(func):
self.device = func.device
else:
self.device = torch.device(device)
if precision == 'float32':
self.precision = torch.float32
elif precision == 'float64':
self.precision = torch.float64
else:
raise ValueError
self.hvp_func = hvp if hvp_type == 'hvp' else vhp
def get_value_and_grad(self, input_var):
assert 'shapes' in dir(
self), 'You must first call get input to define the tensors shapes.'
input_var_ = self._unconcat(torch.tensor(
input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes)
loss = self._eval_func(input_var_)
input_var_grad = input_var_.values() if isinstance(
input_var_, dict) else input_var_
grads = torch.autograd.grad(loss, input_var_grad)
if isinstance(input_var_, dict):
grads = {k: v for k, v in zip(input_var_.keys(), grads)}
return [loss.cpu().detach().numpy().astype(np.float64),
self._concat(grads)[0].cpu().detach().numpy().astype(np.float64)]
def get_hvp(self, input_var, vector):
assert 'shapes' in dir(
self), 'You must first call get input to define the tensors shapes.'
input_var_ = self._unconcat(torch.tensor(
input_var, dtype=self.precision, device=self.device), self.shapes)
vector_ = self._unconcat(torch.tensor(
vector, dtype=self.precision, device=self.device), self.shapes)
if isinstance(input_var_, dict):
input_var_ = tuple(input_var_.values())
if isinstance(vector_, dict):
vector_ = tuple(vector_.values())
if isinstance(input_var_, list):
input_var_ = tuple(input_var_)
if isinstance(vector_, list):
vector_ = tuple(vector_)
loss, vhp_res = self.hvp_func(self.func, input_var_, v=vector_)
return self._concat(vhp_res)[0].cpu().detach().numpy().astype(np.float64)
def get_hess(self, input_var):
assert 'shapes' in dir(
self), 'You must first call get input to define the tensors shapes.'
input_var_ = torch.tensor(
input_var, dtype=self.precision, device=self.device)
def func(inp):
return self._eval_func(self._unconcat(inp, self.shapes))
hess = hessian(func, input_var_, vectorize=False)
return hess.cpu().detach().numpy().astype(np.float64)
def get_ctr_jac(self, input_var):
assert 'shapes' in dir(
self), 'You must first call get input to define the tensors shapes.'
input_var_ = self._unconcat(torch.tensor(
input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes)
ctr_val = self._eval_ctr_func(input_var_)
input_var_grad = input_var_.values() if isinstance(
input_var_, dict) else input_var_
grads = torch.autograd.grad(ctr_val, input_var_grad)
return grads.cpu().detach().numpy().astype(np.float64)
def _reshape(self, t, sh):
if torch.is_tensor(t):
return t.reshape(sh)
elif isinstance(t, np.ndarray):
return np.reshape(t, sh)
else:
raise NotImplementedError
def _tconcat(self, t_list, dim=0):
if torch.is_tensor(t_list[0]):
return torch.cat(t_list, dim)
elif isinstance(t_list[0], np.ndarray):
return np.concatenate(t_list, dim)
else:
raise NotImplementedError
def _gather(self, t, i, j):
if isinstance(t, np.ndarray) or torch.is_tensor(t):
return t[i:j]
else:
raise NotImplementedError
def torch_function_factory(model, loss, train_x, train_y, precision='float32', optimized_vars=None):
"""
A factory to create a function of the torch parameter model.
:param model: torch model
:type model: torch.nn.Modle]
:param loss: a function with signature loss_value = loss(pred_y, true_y).
:type loss: function
:param train_x: dataset used as input of the model
:type train_x: np.ndarray
:param train_y: dataset used as ground truth input of the loss
:type train_y: np.ndarray
:return: (function of the parameters, list of parameters, names of parameters)
:rtype: tuple
"""
# named_params = {k: var.cpu().detach().numpy() for k, var in model.named_parameters()}
params, names = extract_weights(model)
device = params[0].device
prec_ = torch.float32 if precision == 'float32' else torch.float64
if isinstance(train_x, np.ndarray):
train_x = torch.tensor(train_x, dtype=prec_, device=device)
if isinstance(train_y, np.ndarray):
train_y = torch.tensor(train_y, dtype=prec_, device=device)
def func(*new_params):
load_weights(model, {k: v for k, v in zip(names, new_params)})
out = apply_func(model, train_x)
return loss(out, train_y)
func.device = device
return func, [p.cpu().detach().numpy() for p in params], names
def apply_func(func, input_):
if isinstance(input_, dict):
return func(**input_)
elif isinstance(input_, list) or isinstance(input_, tuple):
return func(*input_)
else:
return func(input_)
# Adapted from https://github.com/pytorch/pytorch/blob/21c04b4438a766cd998fddb42247d4eb2e010f9a/benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py
# Utilities to make nn.Module "functional"
# In particular the goal is to be able to provide a function that takes as input
# the parameters and evaluate the nn.Module using fixed inputs.
def _del_nested_attr(obj: nn.Module, names: List[str]) -> None:
"""
Deletes the attribute specified by the given list of names.
For example, to delete the attribute obj.conv.weight,
use _del_nested_attr(obj, ['conv', 'weight'])
"""
if len(names) == 1:
delattr(obj, names[0])
else:
_del_nested_attr(getattr(obj, names[0]), names[1:])
def _set_nested_attr(obj: nn.Module, names: List[str], value: Tensor) -> None:
"""
Set the attribute specified by the given list of names to value.
For example, to set the attribute obj.conv.weight,
use _del_nested_attr(obj, ['conv', 'weight'], value)
"""
if len(names) == 1:
setattr(obj, names[0], value)
else:
_set_nested_attr(getattr(obj, names[0]), names[1:], value)
def extract_weights(mod: nn.Module) -> Tuple[Tuple[Tensor, ...], List[str]]:
"""
This function removes all the Parameters from the model and
return them as a tuple as well as their original attribute names.
The weights must be re-loaded with `load_weights` before the model
can be used again.
Note that this function modifies the model in place and after this
call, mod.parameters() will be empty.
"""
orig_params = [p for p in mod.parameters() if p.requires_grad]
# Remove all the parameters in the model
names = []
for name, p in list(mod.named_parameters()):
if p.requires_grad:
_del_nested_attr(mod, name.split("."))
names.append(name)
# Make params regular Tensors instead of nn.Parameter
params = tuple(p.detach().requires_grad_() for p in orig_params)
return params, names
def load_weights(mod: nn.Module, params: Dict[str, Tensor]) -> None:
"""
Reload a set of weights so that `mod` can be used again to perform a forward pass.
Note that the `params` are regular Tensors (that can have history) and so are left
as Tensors. This means that mod.parameters() will still be empty after this call.
"""
for name, p in params.items():
_set_nested_attr(mod, name.split("."), p)
| 2.15625 | 2 |
tests/test_components.py | pritchardn/MonteCarloDlgApp | 0 | 12792962 | <gh_stars>0
import pytest
from montecarlodlgapp import MonteCarloAppDrop, MyDataDROP
given = pytest.mark.parametrize
def test_myApp_class():
first = MonteCarloAppDrop("a", "a")
second = MonteCarloAppDrop("a", "a")
second.randomSeed = 100
first.initialize()
second.initialize()
pi_1 = first.run()
assert pi_1 != second.run()
def test_myData_class():
assert MyDataDROP("a", "a").getIO() == "Hello from MyDataDROP"
def test_myData_dataURL():
assert MyDataDROP("a", "a").dataURL == "Hello from the dataURL method"
| 2.453125 | 2 |
corehq/apps/smsbillables/management/commands/bootstrap_unicel_gateway.py | dslowikowski/commcare-hq | 1 | 12792963 | import logging
from django.core.management.base import LabelCommand
from corehq.apps.accounting.models import Currency
from corehq.apps.sms.models import INCOMING, OUTGOING
from corehq.apps.smsbillables.models import SmsGatewayFee
from corehq.apps.unicel.api import UnicelBackend
logger = logging.getLogger('accounting')
class Command(LabelCommand):
help = "bootstrap Unicel gateway fees"
args = ""
label = ""
def handle(self, *labels, **options):
SmsGatewayFee.create_new(UnicelBackend.get_api_id(), INCOMING, 0.50,
currency=Currency.objects.get(code="INR"))
SmsGatewayFee.create_new(UnicelBackend.get_api_id(), OUTGOING, 0.50,
currency=Currency.objects.get(code="INR"))
logger.info("Updated Unicel gateway fees.")
| 1.96875 | 2 |
src/robusta/runner/main.py | shahar-lev/robusta | 0 | 12792964 | import os
import os.path
from inspect import getmembers
import manhole
from .log_init import init_logging
from .web import Web
from ..core.playbooks.playbooks_event_handler_impl import PlaybooksEventHandlerImpl
from .. import api as robusta_api
from .config_loader import ConfigLoader
from ..model.config import Registry
def main():
init_logging()
registry = Registry()
event_handler = PlaybooksEventHandlerImpl(registry)
loader = ConfigLoader(registry, event_handler)
if os.environ.get("ENABLE_MANHOLE", "false").lower() == "true":
manhole.install(locals=dict(getmembers(robusta_api)))
Web.init(event_handler)
Web.run() # blocking
loader.close()
if __name__ == "__main__":
main()
| 1.851563 | 2 |
app/recipe/views.py | HHHMHA/recipe-app-api | 1 | 12792965 | from rest_framework import viewsets, mixins
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from .serializers import TagSerializer, IngredientSerializer, RecipeSerializer
from core.models import Tag, Ingredient, Recipe
class BaseRecipeAttrViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin):
"""Base class for a user owned recipe attributes"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
"""Return objects for the current authenticated user only"""
qs = super(BaseRecipeAttrViewSet, self).get_queryset()
return qs.filter(user=self.request.user).order_by('-name')
def perform_create(self, serializer):
"""Create a new object"""
serializer.save(user=self.request.user)
class TagViewSet(BaseRecipeAttrViewSet):
"""Manage tags in the database"""
serializer_class = TagSerializer
queryset = Tag.objects.all()
class IngredientViewSet(BaseRecipeAttrViewSet):
"""Manage ingredients in the database"""
serializer_class = IngredientSerializer
queryset = Ingredient.objects.all()
class RecipeViewSet(viewsets.ModelViewSet):
"""Manage Recipe in the database"""
serializer_class = RecipeSerializer
queryset = Recipe.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
# Must override because the old one order by name
def get_queryset(self):
"""Retrieve the recipes for the authenticated user"""
qs = super(RecipeViewSet, self).get_queryset()
return qs.filter(user=self.request.user)
| 2.1875 | 2 |
tests/support/sample_app/serializers.py | proteanhq/flask-authentic | 3 | 12792966 | """ Serializers used by the sample app """
from authentic.entities import Account
from protean.context import context
from protean_flask.core.serializers import EntitySerializer
from protean_flask.core.serializers import ma
from .entities import Human
class AccountSerializer(EntitySerializer):
""" Serializer for Account Entity"""
id = ma.fields.Integer()
class Meta:
entity = Account
fields = ('id', 'name', 'username', 'email', 'title', 'phone',
'timezone', 'is_locked', 'is_active', 'is_verified')
class HumanSerializer(EntitySerializer):
""" Serializer for Human Entity"""
current_account = ma.fields.Method('get_current_account')
def get_current_account(self, obj):
""" Return the current logged in user """
if context.account:
return context.account.id
else:
return None
class Meta:
entity = Human
| 2.796875 | 3 |
twitter_api/tweets_locale_by_tag.py | lcontini/twitter_search_api | 3 | 12792967 | #### NOT WORKING
import logging
import configs
import tweepy
import pymongo
import json
# TWITTER PARAMS
HASHTAGS_LIST = configs.HASHTAGS_LIST
# MONGODB PARAMS
MONGO_COL_TWEETS = configs.MONGO_COL_TWEETS
MONGO_COL_USER = configs.MONGO_COL_USER
MONGO_COL_TTAGS = configs.MONGO_COL_TTAGS
MONGO_COL_LOCALE = configs.MONGO_COL_LOCALE
def get_user_locale_info(api, db_connection, mongo_col_tweets, mongo_col_user):
logging.info("entrando na funcao get_user_locale_info")
tweet_col = db_connection[mongo_col]
user_data_list = []
user_data = {}
user_list = tweet_col.find({}, {'hashtag':1, 'user':1, 'lang':1})
for user in user_list:
user_data['hashtag'] = user['hashtag']
user_data['user'] = user['user']
user_data['lang'] = user['lang']
user_data_list.append(user_data)
user_data = {}
user_data = user_data_list
user_locale_data = get_user_info(api, db_connection, mongo_col_tweets, user_data)
return(user_locale_data)
def get_user_info(api, db_connection, mongo_col_user, user_list):
user_col = db_connection[mongo_col_user]
logging.info("entrando na funcao get_user_info")
filtered_user_list = []
insert_ids = []
for user in user_list:
logging.info("looking up for user {0}".format(user['user']))
user_raw = api.get_user(screen_name=user['user'])
user_raw_json = user_raw._json
user_filtered = {'hashtag': user['hashtag'], 'name': user_raw_json['name'], 'lang': user['lang'], 'location': user_raw_json['location']}
# x = user_col.insert_many(user_locale_list)
insert_ids.append(x.insert_ids)
return(filtered_user_list)
def insert_user_locale_info(api, db_connection, mongo_col_user, user_locale_list):
user_col = db_connection[mongo_col_user]
count_documents = user_col.count()
if not count_documents == 0:
logging.info("Collection \"{0}\" is not empty. Performing cleanup".format(mongo_col_user))
clean_collection = configs.cleanup_collection(db_connection, mongo_col_user)
logging.info("Collection cleanup: {0} documents were deleted from the collection.".format(clean_collection))
x = user_col.insert_many(user_locale_list)
return(len(x.inserted_ids))
# def group_tweets_by_tag(api, db_connection, hashtags_list):
# print()
# def get_locale_by_tag(api, db_connection, hashtags_list):
# print()
#### NOT WORKING
# def main():
# logging.info("Collecting lang/locale count, per tag, for the given hashtags: {0}".format(' '.join(HASHTAGS_LIST)))
# api_auth = configs.twitter_auth()
# mongodb_connection = configs.mongodb_connect()
# user_locale_list = get_user_locale_info(api_auth, mongodb_connection, MONGO_COL_TWEETS)
# insert_user_locale_info(api_auth, mongodb_connection, MONGO_COL_USER, user_locale_list)
# tweets_by_tag = group_tweets_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_TTAGS, HASHTAGS_LIST)
# locale_by_tag = get_locale_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_LOCALE, HASHTAGS_LIST)
# logging.info("Lang/Locale count per tag stored into the collection \"{0}\"".format(MONGO_COL_LOCALE))
# if __name__ == "__main__":
# configs.logging_basic_config()
# main()
| 2.671875 | 3 |
baiduTransAPI.py | dujiaxin/Dajia_AI | 1 | 12792968 | # coding=utf-8
import http.client
import hashlib
import urllib
import random
import json
class BaiduTranslate:
appid = '' # 填写你的appid
secretKey = '' # 填写你的密钥
httpClient = None
def __init__(self, appid, secretKey):
self.appid = appid
self.secretKey = secretKey
def translate(self,q):
myurl = '/api/trans/vip/translate'
fromLang = 'auto' # 原文语种
toLang = 'zh' # 译文语种
salt = random.randint(32768, 65536)
q = q
sign = self.appid + q + str(salt) + self.secretKey
sign = hashlib.md5(sign.encode()).hexdigest()
myurl = myurl + '?appid=' + self.appid + '&q=' + urllib.parse.quote(q) + '&from=' + fromLang + '&to=' + toLang + '&salt=' + str(
salt) + '&sign=' + sign
try:
httpClient = http.client.HTTPConnection('api.fanyi.baidu.com')
httpClient.request('GET', myurl)
# response是HTTPResponse对象
response = httpClient.getresponse()
result_all = response.read().decode("utf-8")
result = json.loads(result_all)
print (result)
return result['trans_result'][0]['dst']
except Exception as e:
print (e)
finally:
if httpClient:
httpClient.close() | 2.59375 | 3 |
quotes/api/views.py | vyahello/quotes | 3 | 12792969 | <reponame>vyahello/quotes
"""Module represents API for routes."""
from typing import List, Type
from rest_framework.generics import (
ListCreateAPIView,
RetrieveUpdateDestroyAPIView,
)
from app.models import Quote
from .serializers import QuoteSerializer
from .permissions import IsOwnerOrReadOnly
class Quotes(ListCreateAPIView):
"""Responsible for retrieving all quotes from an application.
Endpoint is `/api/`
``GET``: retrieve all quotes
``POST``: creates a new quote
"""
queryset: List[Quote] = Quote.objects.all()
serializer_class: Type[QuoteSerializer] = QuoteSerializer
class QuoteDetail(RetrieveUpdateDestroyAPIView):
"""Responsible for retrieving a single quote from an application.
Endpoint is `/api/<id>`
``GET``: retrieve a single quote
``PUT``: updates a single quote
``DELETE``: deletes a single quote
"""
permission_classes = (IsOwnerOrReadOnly,)
queryset: List[Quote] = Quote.objects.all()
serializer_class: Type[QuoteSerializer] = QuoteSerializer
| 2.296875 | 2 |
dataverse/dataverse.py | rliebz/dataverse-client-python | 1 | 12792970 | import requests
from dataset import Dataset
from exceptions import (
InsufficientMetadataError, MethodNotAllowedError, OperationFailedError,
ConnectionError
)
from utils import get_element, get_elements, sanitize
class Dataverse(object):
def __init__(self, connection, collection):
self.connection = connection
self.collection = collection
self._contents_json = None
@property
def is_published(self):
collection_info = requests.get(
self.collection.get('href'),
auth=self.connection.auth,
).content
status_tag = get_element(
collection_info,
namespace="http://purl.org/net/sword/terms/state",
tag="dataverseHasBeenReleased",
)
status = status_tag.text
return status.lower() == 'true'
@property
def alias(self):
return self.collection.get('href').split('/')[-1]
@property
def title(self):
return sanitize(get_element(
self.collection,
namespace='atom',
tag='title',
).text)
def get_contents(self, refresh=False):
if not refresh and self._contents_json:
return self._contents_json
content_uri = 'https://{0}/api/dataverses/{1}/contents'.format(
self.connection.host, self.alias
)
resp = requests.get(
content_uri,
params={'key': self.connection.token}
)
if resp.status_code != 200:
raise ConnectionError('Atom entry could not be retrieved.')
self._contents_json = resp.json()['data']
return self._contents_json
def publish(self):
edit_uri = 'https://{0}/dvn/api/data-deposit/v1.1/swordv2/edit/dataverse/{1}'.format(
self.connection.host, self.alias
)
resp = requests.post(
edit_uri,
headers={'In-Progress': 'false'},
auth=self.connection.auth,
)
if resp.status_code != 200:
raise OperationFailedError('The Dataverse could not be published.')
def add_dataset(self, dataset):
if get_element(dataset._entry, 'title', 'dcterms') is None:
raise InsufficientMetadataError('This dataset must have a title.')
if get_element(dataset._entry, 'description', 'dcterms') is None:
raise InsufficientMetadataError('This dataset must have a description.')
if get_element(dataset._entry, 'creator', 'dcterms') is None:
raise InsufficientMetadataError('This dataset must have an author.')
resp = requests.post(
self.collection.get('href'),
data=dataset.get_entry(),
headers={'Content-type': 'application/atom+xml'},
auth=self.connection.auth,
)
if resp.status_code != 201:
raise OperationFailedError('This dataset could not be added.')
dataset.dataverse = self
dataset._refresh(receipt=resp.content)
def delete_dataset(self, dataset):
if dataset._state == 'DELETED' or dataset._state == 'DEACCESSIONED':
return
resp = requests.delete(
dataset.edit_uri,
auth=self.connection.auth,
)
if resp.status_code == 405:
raise MethodNotAllowedError('Published datasets can only be '
'deleted from the GUI. For more information, please refer to '
'https://github.com/IQSS/dataverse/issues/778')
dataset._state = 'DEACCESSIONED'
def get_datasets(self):
collection_info = requests.get(
self.collection.get('href'),
auth=self.connection.auth,
).content
entries = get_elements(collection_info, tag='entry')
return [Dataset.from_dataverse(entry, self) for entry in entries]
def get_dataset_by_doi(self, doi):
return next((s for s in self.get_datasets() if s.doi == doi), None)
def get_dataset_by_title(self, title):
return next((s for s in self.get_datasets() if s.title == title), None)
def get_dataset_by_string_in_entry(self, string):
return next((s for s in self.get_datasets() if string in s.get_entry()), None)
| 2.625 | 3 |
mergify_engine/tests/functional/actions/test_update.py | truthiswill/mergify-engine | 266 | 12792971 | # -*- encoding: utf-8 -*-
#
# Copyright © 2018–2021 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
from mergify_engine import config
from mergify_engine import context
from mergify_engine.tests.functional import base
class TestUpdateAction(base.FunctionalTestBase):
async def test_update_action(self):
rules = {
"pull_request_rules": [
{
"name": "update",
"conditions": [f"base={self.main_branch_name}"],
"actions": {"update": {}},
},
{
"name": "merge",
"conditions": [f"base={self.main_branch_name}", "label=merge"],
"actions": {"merge": {}},
},
]
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr()
commits = await self.get_commits(p2["number"])
assert len(commits) == 1
await self.add_label(p1["number"], "merge")
await self.run_engine()
await self.wait_for("pull_request", {"action": "closed"})
p1 = await self.get_pull(p1["number"])
assert p1["merged"]
await self.wait_for("push", {"ref": f"refs/heads/{self.main_branch_name}"})
await self.run_engine()
commits = await self.get_commits(p2["number"])
assert len(commits) == 2
assert commits[-1]["commit"]["author"]["name"] == config.BOT_USER_LOGIN
assert commits[-1]["commit"]["message"].startswith("Merge branch")
async def test_update_action_on_closed_pr_deleted_branch(self):
rules = {
"pull_request_rules": [
{
"name": "update",
"conditions": [f"base={self.main_branch_name}"],
"actions": {"update": {}},
},
{
"name": "merge",
"conditions": [f"base={self.main_branch_name}", "label=merge"],
"actions": {"merge": {}, "delete_head_branch": {}},
},
]
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr()
commits = await self.get_commits(p2["number"])
assert len(commits) == 1
await self.add_label(p1["number"], "merge")
await self.run_engine()
p1 = await self.get_pull(p1["number"])
assert p1["merged"]
await self.wait_for("push", {"ref": f"refs/heads/{self.main_branch_name}"})
await self.run_engine()
commits = await self.get_commits(p2["number"])
assert len(commits) == 2
assert commits[-1]["commit"]["author"]["name"] == config.BOT_USER_LOGIN
assert commits[-1]["commit"]["message"].startswith("Merge branch")
# Now merge p2 so p1 is not up to date
await self.add_label(p2["number"], "merge")
await self.run_engine()
ctxt = await context.Context.create(self.repository_ctxt, p1, [])
checks = await ctxt.pull_engine_check_runs
for check in checks:
assert check["conclusion"] == "success", check
| 2.09375 | 2 |
0701-0800/0725-Split Linked List in Parts/0725-Split Linked List in Parts.py | jiadaizhao/LeetCode | 49 | 12792972 | <reponame>jiadaizhao/LeetCode<filename>0701-0800/0725-Split Linked List in Parts/0725-Split Linked List in Parts.py
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def splitListToParts(self, root: ListNode, k: int) -> List[ListNode]:
le = 0
head = root
while head:
le += 1
head = head.next
count, remain = divmod(le, k)
result = [None] * k
head = root
prev = None
for i in range(k):
if head is None:
break
else:
result[i] = head
for j in range(count):
prev = head
head = head.next
if remain > 0:
prev = head
head = head.next
remain -= 1
prev.next = None
return result
| 3.625 | 4 |
spirv_tools/passes/dead_inst_elim.py | kristerw/spirv-tools | 22 | 12792973 | <reponame>kristerw/spirv-tools
"""Removes unused instructions.
The definition of "unused instruction" is an instruction having a return
ID that is not used by any non-debug and non-decoration instruction, and
does not have side effects."""
from spirv_tools import ir
def remove_debug_if_dead(inst):
"""Remove debug instruction if it is not used."""
assert inst.op_name in ir.DEBUG_INSTRUCTIONS
if inst.op_name == 'OpName':
if inst.operands[0].inst is None:
inst.destroy()
def remove_decoration_if_dead(inst):
"""Remove decoration instruction if it is not used."""
assert inst.op_name in ir.DECORATION_INSTRUCTIONS
if inst.op_name != 'OpDecorationGroup':
if inst.operands[0].inst is None:
inst.destroy()
def process_function(module, function):
"""Run the pass on one function."""
# We need to re-run the pass if elimination of a phi-node makes
# instructions dead in an already processed basic block.
rerun = True
while rerun:
rerun = False
processed_bbs = set()
for inst in function.instructions_reversed():
if inst.op_name == 'OpLabel':
processed_bbs.add(inst.basic_block)
if not inst.has_side_effects() and not inst.uses():
if inst.op_name == 'OpPhi':
processed_bbs.add(inst.basic_block)
operands = inst.operands[:]
inst.destroy()
for operand in operands:
if (operand.inst.op_name != 'OpLabel' and
operand.inst.basic_block in processed_bbs and
not operand.inst.uses()):
rerun = True
break
else:
inst.destroy()
def run(module):
"""Remove all unused instructions."""
# Garbage collect old unused debug and decoration instructions.
# This is done before the real pass because:
# * They need some special handling, as they do not have inst.result_id
# * They come in the wrong order with regard to constants, so we would
# need extra code in the real pass to ensure constants used in OpLine
# are removed.
# Note: the debug and decoration instructions that are live at the start
# of this pass is handled by the real pass when the instruction they
# point to is removed.
for inst in module.global_instructions.name_insts:
remove_debug_if_dead(inst)
for inst in module.global_instructions.op_string_insts:
remove_debug_if_dead(inst)
for inst in reversed(module.global_instructions.decoration_insts):
remove_decoration_if_dead(inst)
# Remove unused instructions in functions.
for function in module.functions:
process_function(module, function)
# Remove unused global instructions.
for inst in module.global_instructions.instructions_reversed():
if not inst.has_side_effects() and not inst.uses():
inst.destroy()
| 2.21875 | 2 |
docassemble/ALRMFinancialCounsellingForm/data/modules/test_access.py | mferrare/docassemble-ALRMFinancialCounsellingForm | 0 | 12792974 | import msal
import logging
import requests
import json
config = {
"authority": "https://login.microsoftonline.com/9f4083b0-0ac6-4dee-b0bb-b78b1436f9f3",
"client_id": "d584a43a-c4c1-4fbe-9c1c-3cae87420e6e",
"scope": [ "https://graph.microsoft.com/.default" ],
"secret": "<KEY>",
"endpoint": "https://graph.microsoft.com/v1.0/users"
}
# Create a preferably long-lived app instance that maintains a token cache.
app = msal.ConfidentialClientApplication(
config["client_id"], authority=config["authority"],
client_credential=config["secret"]
)
# The pattern to acquire a token looks like this.
result = None
# First, the code looks up a token from the cache.
# Because we're looking for a token for the current app, not for a user,
# use None for the account parameter.
result = app.acquire_token_silent(config["scope"], account=None)
if not result:
logging.info("No suitable token exists in cache. Let's get a new one from AAD.")
result = app.acquire_token_for_client(scopes=config["scope"])
if "access_token" in result:
# Call a protected API with the access token.
endpoint_root = 'https://graph.microsoft.com/v1.0'
http_headers = {
'Authorization' : 'Bearer ' + result['access_token'],
'Accept' : 'application/json',
'Content-Type' : 'application/json'
}
# Look for our site
siteName='MFPersonal'
endpoint = '{}/sites?search={}'.format(endpoint_root, siteName)
siteq = requests.get(endpoint, headers=http_headers, stream=False).json()
# We may not have a site
try:
our_site = None
for a_site in siteq['value']:
if a_site['name'] == siteName:
our_site = a_site
break
list_ep = '{}/sites/{}/lists/{}'.format(endpoint_root, our_site['id'], 'Test%20List')
the_list = requests.get(list_ep, headers=http_headers, stream=False).json()
listitem_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id'])
the_items = requests.get(listitem_ep, headers=http_headers, stream=False).json()
an_item_ep = '{}/sites/{}/lists/{}/items/1'.format(endpoint_root, our_site['id'], the_list['id'])
an_item = requests.get(an_item_ep, headers=http_headers, stream=False).json()
new_item = {
'fields': {
'Title' : 'Another item',
'testfield' : 'another test field'
}
}
payload = json.dumps(new_item)
new_item_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id'])
make_new_item = requests.post(new_item_ep, headers=http_headers, data=payload, stream=False).json()
except Error as e:
print(str(e))
print(result["token_type"])
else:
print(result.get("error"))
print(result.get("error_description"))
print(result.get("correlation_id")) # You might need this when reporting a bug. | 2.625 | 3 |
raysect/core/math/cython/tests/test_tetrahedra.py | raysect/source | 71 | 12792975 | # Copyright (c) 2014-2021, Dr <NAME>, Raysect Project
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Raysect Project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Unit tests for the Vector3D object.
"""
import unittest
import numpy as np
# from raysect.core.math.cython.tetrahedra import _test_inside_tetrahedra as inside_tetrahedra
from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra
class TestTetrahedra(unittest.TestCase):
def test_inside_tetrahedra(self):
"""Tests the inside tetrahedra algorithm."""
# defining triangle vertices
v1x, v1y, v1z = 0, 0, 0
v2x, v2y, v2z = 1, 0, 0
v3x, v3y, v3z = 0, 1, 0
v4x, v4y, v4z = 0, 0, 1
# test vertices are inside
self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v1x, v1y, v1z))
self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v2x, v2y, v2z))
self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v3x, v3y, v3z))
self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v4x, v4y, v4z))
# check line segments are inside
self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0, 0))
self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0.5, 0))
self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0, 0.5))
self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0, 0.5))
self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0.5, 0.5))
self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0.5, 0))
# check an interior point
self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.25, 0.25, 0.25))
# check an exterior point
self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, -0.5, -0.5, -0.5))
self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, -0.01, 0.5))
self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, -0.01, 0.5, 0.5))
self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.25, 0.25, -0.01))
self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0.5, 0.0001))
self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 1.0, 1.0, 1.0))
self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.3333, 0.3333, 0.335))
if __name__ == "__main__":
unittest.main()
| 1.359375 | 1 |
test_relation.py | melahi/my-tensorflow-layers | 1 | 12792976 | <reponame>melahi/my-tensorflow-layers
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import testing_utils, backend
from tensorflow.python.keras.utils import get_custom_objects
from tensorflow.python.platform import test
from relation import Relation
class RelationTest(test.TestCase):
@staticmethod
def test_relation_layer():
backend.set_session(None)
input_data = np.array([[[3, 2, 4],
[1, 5, 2]],
[[30, 20, 40],
[10, 50, 20]]], dtype=np.float32)
weights = np.array([[1, 0],
[5, 6],
[7, 8]], dtype=np.float32)
bias = np.array([4, 7], dtype=np.float32)
expected_output = np.array([[[6926, 8642],
[6845, 8822]],
[[663440, 807500],
[655340, 825500]]], dtype=np.float32)
tf.reset_default_graph()
get_custom_objects()['Relation'] = Relation
kwargs = {'relations': 2,
'kernel_initializer': tf.constant_initializer(weights),
'bias_initializer': tf.constant_initializer(bias)
}
a = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
output = testing_utils.layer_test(Relation,
kwargs=kwargs,
input_data=input_data,
expected_output=expected_output)
if not np.array_equal(output, expected_output):
raise AssertionError('The output is not equal to our expected output')
@staticmethod
def test_my_case():
backend.set_session(None)
input_data = np.array([[[15, 0, 10],
[13, 1, 10],
[13, 5, 19],
[19, 19, 4]],
[[5, 14, 10],
[9, 11, 12],
[4, 7, 7],
[1, 9, 0]],
[[14, 17, 1],
[1, 9, 16],
[7, 6, 9],
[17, 7, 3]]], dtype=np.float32)
w1 = np.array([[3, 8, 6, 7, 8],
[3, 1, 0, 8, 7],
[4, 9, 8, 1, 9]], dtype=np.float32)
w2 = np.array([[3, 0, 0, 2, 3],
[3, 8, 9, 5, 7],
[3, 9, 7, 0, 7]], dtype=np.float32)
g = np.array([[7, 9, 4, 9, 0],
[1, 1, 5, 4, 0],
[6, 1, 7, 1, 3]], dtype=np.float32)
bias = np.array([0, 0, 0, 0, 0], dtype=np.float32)
expected_output = np.array([[[128560, 219497, 209334, 128295, 48435],
[126451, 210377, 201342, 124197, 47274],
[160195, 262057, 249294, 152200, 61335],
[160195, 217673, 193350, 247137, 62754]],
[[61893, 76272, 131794, 81197, 34404],
[65721, 87599, 151154, 83381, 36927],
[49365, 66150, 117274, 57173, 27096],
[38577, 45665, 81458, 52805, 20745]],
[[86352, 109637, 130294, 167782, 34419],
[84894, 119274, 153562, 92742, 32244],
[75660, 111732, 142482, 98638, 29112],
[80034, 123045, 149130, 137230, 31983]]], dtype=np.float32)
kwargs = {'relations': 5}
layer_cls = Relation(**kwargs)
my_i = tf.keras.layers.Input(input_data.shape[1:], dtype=tf.float32)
my_layer = layer_cls(my_i)
model = tf.keras.Model(my_i, my_layer)
weights = [w1, w2, g, bias]
layer_cls.set_weights(weights)
output = model.predict(input_data)
if not np.array_equal(output, expected_output):
raise AssertionError("The output is not equal with the expected output")
if __name__ == '__main__':
test.main()
| 2.28125 | 2 |
typeidea/comment/models.py | ShanGis/TypeIdea | 0 | 12792977 | from django.db import models
from django.contrib.auth.models import User
from blog.models import Post
class Comment(models.Model):
STATUS_ITEMS = (
(1,'正常'),
(2,'删除'),
)
target = models.CharField(max_length=200, null=True, verbose_name='评论目标')
content = models.CharField(max_length=2000, verbose_name='内容')
nickname = models.CharField(max_length=50, verbose_name='别名')
status = models.PositiveIntegerField(default=1, choices=STATUS_ITEMS, verbose_name='状态')
websit = models.URLField(verbose_name='网址')
email = models.EmailField(verbose_name='邮箱')
created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
class Meta():
verbose_name = verbose_name_plural = '评论'
def __str__(self):
return '{}'.format(self.target)
def nickname_show(self):
return '来自{}的评论'.format(self.nickname)
nickname_show.short_description = '评论者'
| 2.140625 | 2 |
mmdet/ops/ops/rotated/rotate_roi_align.py | qgh1223/SLRDet | 27 | 12792978 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from mmdet import _Custom as _C
from apex import amp
class _RROIAlign(Function):
@staticmethod
def forward(ctx, features, rois, out_size, spatial_scale, sample_num=0):
out_h, out_w = _pair(out_size)
assert isinstance(out_h, int) and isinstance(out_w, int)
ctx.spatial_scale = spatial_scale
ctx.sample_num = sample_num
ctx.save_for_backward(rois)
ctx.feature_size = features.size()
output = _C.rotate_roi_align_forward(
features, rois, spatial_scale, out_h, out_w, sample_num
)
# return output, argmax # DEBUG ONLY
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
feature_size = ctx.feature_size
spatial_scale = ctx.spatial_scale
sample_num = ctx.sample_num
rois = ctx.saved_tensors[0]
assert (feature_size is not None and grad_output.is_cuda)
batch_size, num_channels, data_height, data_width = feature_size
out_w = grad_output.size(3)
out_h = grad_output.size(2)
grad_input = grad_rois = None
if ctx.needs_input_grad[0]:
grad_input = _C.rotate_roi_align_backward(
grad_output.contiguous(),
rois,
spatial_scale,
out_h,
out_w,
batch_size,
num_channels,
data_height,
data_width,
sample_num
)
return grad_input, grad_rois, None, None, None
rroi_align = _RROIAlign.apply
class RROIAlign(nn.Module):
def __init__(self, out_size, spatial_scale, sample_num=0):
super(RROIAlign, self).__init__()
self.out_size = out_size
self.spatial_scale = spatial_scale
self.sample_num = sample_num
@amp.float_function
def forward(self, features, rois):
return rroi_align(
features, rois, self.out_size, self.spatial_scale, self.sample_num
)
def __repr__(self):
format_str = self.__class__.__name__
format_str += '(out_size={}, spatial_scale={}, sample_num={}'.format(
self.out_size, self.spatial_scale, self.sample_num)
return format_str
| 1.945313 | 2 |
eqv_transformer/dynamics_predictor.py | oxcsml/lie-transformer | 36 | 12792979 | from torch import nn
from attrdict import AttrDict
import torch
import torch.nn.functional as F
from lie_conv.dynamicsTrainer import Partial
from torchdiffeq import odeint
from lie_conv.hamiltonian import SpringV, SpringH, HamiltonianDynamics, KeplerV, KeplerH
class DynamicsPredictor(nn.Module):
"""This class implements forward pass through our model, including loss computation."""
def __init__(self, predictor, debug=False, task="spring", model_with_dict=True):
super().__init__()
self.predictor = predictor
self.debug = debug
self.task = task
self.model_with_dict = model_with_dict
if self.debug:
print("DynamicsPredictor is in DEBUG MODE.")
def _rollout_model(self, z0, ts, sys_params, tol=1e-4):
"""inputs [z0: (bs, z_dim), ts: (bs, T), sys_params: (bs, n, c)]
outputs pred_zs: (bs, T, z_dim)"""
dynamics = Partial(self.predictor, sysP=sys_params)
zs = odeint(dynamics, z0, ts[0], rtol=tol, method="rk4")
return zs.permute(1, 0, 2)
def forward(self, data):
o = AttrDict()
(z0, sys_params, ts), true_zs = data
pred_zs = self._rollout_model(z0, ts, sys_params)
mse = (pred_zs - true_zs).pow(2).mean()
if self.debug:
if self.task == "spring":
# currently a bit inefficient to do the below?
with torch.no_grad():
(z0, sys_params, ts), true_zs = data
z = z0
m = sys_params[..., 0] # assume the first component encodes masses
D = z.shape[-1] # of ODE dims, 2*num_particles*space_dim
q = z[:, : D // 2].reshape(*m.shape, -1)
p = z[:, D // 2 :].reshape(*m.shape, -1)
V_pred = self.predictor.compute_V((q, sys_params))
k = sys_params[..., 1]
V_true = SpringV(q, k)
mse_V = (V_pred - V_true).pow(2).mean()
# dynamics
dyn_tz_pred = self.predictor(ts, z0, sys_params)
H = lambda t, z: SpringH(
z, sys_params[..., 0].squeeze(-1), sys_params[..., 1].squeeze(-1)
)
dynamics = HamiltonianDynamics(H, wgrad=False)
dyn_tz_true = dynamics(ts, z0)
mse_dyn = (dyn_tz_true - dyn_tz_pred).pow(2).mean()
o.mse_dyn = mse_dyn
o.mse_V = mse_V
o.prediction = pred_zs
o.mse = mse
o.loss = mse # loss wrt which we train the model
if self.debug:
o.reports = AttrDict({"mse": o.mse, "mse_V": o.mse_V, "mse_dyn": o.mse_dyn})
else:
o.reports = AttrDict({"mse": o.mse})
if not self.model_with_dict:
return pred_zs
return o
| 2.109375 | 2 |
hackerrank/Algorithms/implementation/strange-code.py | tjeubaoit/algorithm | 0 | 12792980 | <reponame>tjeubaoit/algorithm<filename>hackerrank/Algorithms/implementation/strange-code.py
# Complete the strangeCounter function below.
def strangeCounter(t):
c, i = 0, 0
while c < t:
c += 3 << i
i += 1
return c - t + 1
def strangeCounter2(t):
rem = 3
while t > rem:
t = t-rem
rem *= 2
return rem-t+1
| 3.59375 | 4 |
brainstat/tutorial/__init__.py | rmarkello/BrainStat | 0 | 12792981 | <gh_stars>0
"""Functions required for the BrainStat Tutorials"""
| 1.09375 | 1 |
src/sim/basicExampleTest/results/aco.py | andremtsilva/dissertacao | 0 | 12792982 | <reponame>andremtsilva/dissertacao
import acopy
import networkx as nx
def main():
G = nx.read_graphml('graph_binomial_tree_5.graphml')
print(G.nodes())
print(nx.get_node_attributes(G, 'IPT'))
"""
solver = acopy.Solver(rho=.03, q=1)
colony = acopy.Colony(alpha=1, beta=3)
tour = solver.solve(G, colony, limit=100)
print(tour.cost)
print(tour.nodes)
"""
if __name__ == '__main__':
main()
| 2.984375 | 3 |
gameServer/playerCode/trade.py | hydrogen602/settlersPy | 0 | 12792983 | <gh_stars>0
from typing import Dict, List
from ..extraCode.location import Resource
from ..extraCode.util import ActionError
from .player import Player
class Trade:
def __init__(self, cost: Dict[Resource, int], goodsOffered: Dict[Resource, int]):
self._cost: Dict[Resource, int] = cost
self._goodsOffered: Dict[Resource, int] = goodsOffered
def purchase(self, player: Player):
'''
throws `ActionError`.
player is the one accepting the trade
'''
player.requireResources(self._cost) # raises ActionError
player.takeResources(self._cost)
player.giveResources(self._goodsOffered)
class InterPlayerTrade(Trade):
def __init__(self, cost: Dict[Resource, int], goodsOffered: Dict[Resource, int], offeror: Player, offerees: List[Player] = []):
'''
Offeror is the one proposing the offer,
Offerees is the recipients of the offer.
If Offerees is empty, then anyone can accept the offer.
InterPlayerTrade are for only a single trade and so become
invalid after a successful trade.
Throws ActionError if the Offeror does not have what he offers
'''
offeror.requireResources(goodsOffered) # throws ActionError
self.__offeror: Player = offeror
self.__offerees: List[Player] = offerees
self.__closedDeal = False # whether the deal has been closed
self.__invalid = False # whether the deal has been invalidated
super().__init__(cost, goodsOffered)
def __isValidOfferee(self, player: Player) -> bool:
'''
Returns whether or not this player can accept the given trade deal.
'''
return len(self.__offerees) == 0 or player in self.__offerees
# len(self.__offerees) == 0 means anyone can accept
# player in self.__offerees checks if the player is in the list of accepted players
def purchase(self, player: Player):
# errors here would go to player
if not self.__isValidOfferee(player):
raise ActionError(f"Player is not allowed to take this trade offer")
if self.__invalid:
raise ActionError(f"This trade offer is invalid due to {self.__offeror.name}")
if self.__closedDeal:
raise ActionError(f"This offer has already been accepted")
if not self.__offeror.hasResources(self._goodsOffered):
# offeror no longer has what he offered
self.__invalid = True
raise ActionError(f"{self.__offeror.name} doesn't have what he/she offered")
player.requireResources(self._cost)
# both have what they wish to trade
player.takeResources(self._cost)
self.__offeror.takeResources(self._goodsOffered)
player.giveResource(self._goodsOffered)
self.__offeror.giveResources(self._cost)
self.__closedDeal = True
| 3.046875 | 3 |
ieee_1584/equations.py | LiaungYip/arcflash | 1 | 12792984 | <reponame>LiaungYip/arcflash
# Copyright 2022, <NAME> - https://www.penwatch.net
# Licensed under the MIT License. Refer LICENSE.txt.
import logging
from math import log10, sqrt
from ieee_1584.cubicle import Cubicle
from ieee_1584.tables import table_1, table_3, table_4, table_5
def I_arc_intermediate(c: Cubicle, V_oc: float, I_bf: float):
# Equation 1
assert V_oc in (0.6, 2.7, 14.3,)
k = table_1[(c.EC, V_oc,)]
x1 = + k["k1"] \
+ k["k2"] * log10(I_bf) \
+ k["k3"] * log10(c.G)
x2 = + k["k4"] * I_bf ** 6 \
+ k["k5"] * I_bf ** 5 \
+ k["k6"] * I_bf ** 4 \
+ k["k7"] * I_bf ** 3 \
+ k["k8"] * I_bf ** 2 \
+ k["k9"] * I_bf ** 1 \
+ k["k10"]
I_a = (10 ** x1) * x2
return I_a
def I_arc_min(c: Cubicle, I_arc: float):
# Equation 2
return I_arc * (1 - 0.5 * c.VarCF)
def E_AFB_intermediate(c: Cubicle, V_oc: float, I_arc: float, I_bf: float, T: float, I_arc_600: float = None):
logging.warning(
"Function E_AFB_intermediate() is deprecated. Use intermediate_E() and intermediate_AFB_from_E() instead.")
E = intermediate_E(c, V_oc, I_arc, I_bf, T, I_arc_600)
AFB = intermediate_AFB_from_E(c, V_oc, E)
return E, AFB
def intermediate_E(c: Cubicle, V_oc: float, I_arc: float, I_bf: float, T: float, I_arc_600: float = None):
# Implements equations 3, 4, 5, 6 for "intermediate incident energy".
assert (V_oc <= 0.6) or (V_oc in (0.6, 2.7, 14.3,))
if V_oc <= 0.6:
k = table_3[c.EC]
elif V_oc == 2.7:
k = table_4[c.EC]
elif V_oc == 14.3:
k = table_5[c.EC]
else:
k = None
x1 = 12.552 / 50 * T
x2 = k["k1"] + k["k2"] * log10(c.G)
if I_arc_600 is None: # HV case. Eqs 3, 4, 5
x3_num = k["k3"] * I_arc
else: # LV case. Eq 6.
x3_num = k["k3"] * I_arc_600
x3_den = + k["k4"] * I_bf ** 7 \
+ k["k5"] * I_bf ** 6 \
+ k["k6"] * I_bf ** 5 \
+ k["k7"] * I_bf ** 4 \
+ k["k8"] * I_bf ** 3 \
+ k["k9"] * I_bf ** 2 \
+ k["k10"] * I_bf
x3 = x3_num / x3_den
x4 = + k["k11"] * log10(I_bf) \
+ k["k13"] * log10(I_arc) \
+ log10(1 / c.CF)
x5 = k["k12"] * log10(c.D)
# Equations 3, 4, 5, 6
E = x1 * 10 ** (x2 + x3 + x4 + x5)
assert E >= 0
return E
def intermediate_AFB_from_E(c: Cubicle, V_oc: float, E: float):
# Implements equations 7, 8, 9, 10, for "intermediate arc flash boundary", in a simpler way.
#
# Calculates the (intermediate) arc flash boundary, i.e. AFB_600, from the incident energy i.e. E_600 only.
# Knowledge of T, G, I_arc, I_bf, and CF is not required, as it would be if using Eq's 7, 8, 9, 10 directly.
# This is useful for multi-time-step calculations where there is no singular value of T, I_arc, or I_bf.
#
# Motivation:
# ===========
#
# The IEEE 1584-2018 formulas for arc flash boundary (AFB), i.e. eq's 7, 8, 9, and 10, are pretty complicated.
#
# In particular, the equations for AFB requires knowledge of time T, busbar gap G, the currents I_arc and I_bf,
# and size correction factor CF.
#
# This is a problem when doing multi-time-step arc flash calculations where the values of T, I_arc, and I_bf are
# different for each time-step. What single value of I_arc would you plug into Eq 7, when I_arc is 10 kA for 100 ms,
# then 5 kA for 900 ms, then 2 kA for 1,000 ms?
#
# Details:
# ========
#
# Consider Eq 3 for the quantity E_600.
#
# First, we recognise that the relationship between incident energy E_600 (J/cm²) and distance D (mm) is simply
# that __the energy E_600 falls off exponentially with distance D__. If we rearrange Eq 3 using exponent identities
# we can simplify to:
#
# E_600 = F * ( D ^ k12 )
#
# Where:
# * E_600 is the __intermediate__ arcing energy at V = 0.6 kV, with units of J/cm²,
# * F_600 is a (reasonably complicated) function of time T, busbar gap G, currents I_arc and I_bf, and size
# correction factor CF,
# * D is the distance (mm) from the fault to where E_600 has been measured.
# * k12 is a constant __"distance exponent"__ from Table 3, Table 4, or Table 5.
#
# We can calculate what F_600** would have been:
#
# F_600 = E_600 / ( D ^ k12 )
#
# Once we know the value of F_600, we can calculate E_600' at any distance D' we like:
#
# E_600' (at distance D') = F_600 * ( D' ^ k12 )
#
# Alternately, we can calculate the distance D' that will give a particular value of E_600'.
#
# D' = (E_600' / F_600) ^ ( 1 / k12 )
#
# Finally note that the arc flash boundary, AFB_600, is simply the special case where E_600' is equal to exactly
# 1.2 cal/cm². Noting that 1.2 cal/cm² * 4.184 J/cal = 5.0208 J/cm²,
#
# AFB_600 = (5.0208 / F_600) ^ ( 1 / k12 )
#
# ** Sidenote: The physical meaning of the quantity "F_600" is that F_600 is, in some sense, the total amount of
# energy released (i.e. Joules). (The distribution of energy is not isotropic, i.e. k12 != -2.00, so this
# interpretation is not exact.)
#
# Sidenote 2: the funny number "50/12.552" in Eq 3/4/5/6 turns into the magic number 20 in Eq 7/8/9/10.
# 1.2 cal/cm² × 4.184 J/cal = 5.0208 J/cm²
# 50 / 12.552 * 5.0208 = 20 (exact)
assert (V_oc <= 0.6) or (V_oc in (0.6, 2.7, 14.3,))
if V_oc <= 0.6:
k = table_3[c.EC]
elif V_oc == 2.7:
k = table_4[c.EC]
elif V_oc == 14.3:
k = table_5[c.EC]
else:
k = None
# After all the explanation, calculation of the (intermediate) AFB is simply 2 lines.
F = E / (c.D ** k["k12"])
AFB = (5.0208 / F) ** (1 / k["k12"])
assert AFB >= 0
return AFB
def interpolate(c: Cubicle, x_600, x_2700, x_14300):
V_oc = c.V_oc
# Eq 16, Eq 19, Eq 22
x1 = (((x_2700 - x_600) / 2.1) * (V_oc - 2.7)) + x_2700
# Eq 17, Eq 20, Eq 23
x2 = (((x_14300 - x_2700) / 11.6) * (V_oc - 14.3)) + x_14300
# Eq 18, Eq 21, Eq 24
x3 = ((x1 * (2.7 - V_oc)) / 2.1) + ((x2 * (V_oc - 0.6)) / 2.1)
if 0.600 < V_oc <= 2.7:
return x3
elif V_oc > 2.7:
return x2
def I_arc_final_LV(c: Cubicle, I_arc_600, I_bf):
# Equation 25
V_oc = c.V_oc
x1 = (0.6 / V_oc) ** 2
x2 = 1 / (I_arc_600 ** 2)
x3 = (0.6 ** 2 - V_oc ** 2) / (0.6 ** 2 * I_bf ** 2)
x4 = sqrt(x1 * (x2 - x3))
return 1 / x4
| 2.078125 | 2 |
setup.py | Pratilipi-Labs/python-logware | 0 | 12792985 | <reponame>Pratilipi-Labs/python-logware<gh_stars>0
from setuptools import setup
setup(name='logware',
version='0.1.4',
description='Logging middleware for python web services',
url='https://github.com/Pratilipi-Labs/python-logware',
author='Giridhar',
author_email='<EMAIL>',
license='MIT',
packages=['logware'],
install_requires=[
'webob'
],
zip_safe=False)
| 1.023438 | 1 |
run/index-correct.py | ai-ku/uwsd | 3 | 12792986 | #! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
import sys
from collections import defaultdict as dd
from nlp_utils import fopen
pos_file = sys.argv[1]
aw_file = sys.argv[2]
TAG = "-NONE-"
aw_lines = fopen(aw_file).readlines()
indices = dd(list)
for line in aw_lines:
line = line.split()
line_ind = int(line[2])
term_ind = int(line[3])
indices[line_ind].append(term_ind)
whole_ind = []
for i, line in enumerate(fopen(pos_file)):
line = line.split()
remove = []
for j, t in enumerate(line):
if t == TAG:
remove.append(j)
m = len(indices[i])
remove_part = [0] * m
for j, ind in enumerate(indices[i]):
rr = 0
for tag_ind in remove:
if tag_ind < ind:
rr += 1
remove_part[j] = rr
result = []
for ind, rem in zip(indices[i], remove_part):
#print ind, rem
result.append(ind - rem)
whole_ind.extend(result)
#print ' '.join([str(k) for k in result])
for line, new_index in zip(aw_lines, whole_ind):
line = line.split()
line.append(str(new_index))
print "\t".join(line)
| 2.578125 | 3 |
dpmm/data.py | jmeyers314/DP_SNe | 20 | 12792987 | import numpy as np
from utils import pick_discrete
class PseudoMarginalData(object):
def __init__(self, data, interim_prior):
# Data should have dims [NOBJ, NSAMPLE, NDIM] or [NOBJ, NSAMPLE] if NDIM is 1
# interim_prior should have dims [NOBJ, NSAMPLE]
self.data = data
self.interim_prior = interim_prior
if self.data.ndim == 2:
self.nobj, self.nsample = self.data.shape
else:
self.nobj, self.nsample, self.ndim = self.data.shape
if self.interim_prior.shape != (self.nobj, self.nsample):
ds = self.data.shape
ips = self.interim_prior.shape
raise ValueError(("data shape [NOBJ, NSAMPLE, NDIM] = [{}, {}, {}]" +
" inconsistent with interim_prior shape [NOBJ, NSAMPLE] = [{}, {}]")
.format(ds[0], ds[1], ds[2], ips[0], ips[2]))
def __len__(self):
return self.nobj
def __getitem__(self, index):
import numbers
cls = type(self)
# *Leave* a shallow axis in the case a single object is requested.
if isinstance(index, numbers.Integral):
return cls(self.data[np.newaxis, index], self.interim_prior[np.newaxis, index])
else:
return cls(self.data[index], self.interim_prior[index])
def random_sample(self):
"""Return a [NOBJ, NDIM] numpy array sampling over NSAMPLE using inverse interim_prior
weights. Needed to compute a posterior object."""
ps = 1./self.interim_prior
ps /= np.sum(ps, axis=1)[:, np.newaxis]
return np.array([self.data[i, pick_discrete(p)] for i, p in enumerate(ps)])
class NullManip(object):
def init(self, D):
pass
def __call__(self, D):
return D
def unmanip(self, D):
return D
def update(self, D, phi, c, prior):
pass
| 2.8125 | 3 |
bitgo/cmd.py | geeks121/pybitgo | 3 | 12792988 | <filename>bitgo/cmd.py<gh_stars>1-10
__author__ = 'sserrano'
import json
import os
from .bitgo import BitGo
def load_config(filename):
if os.path.exists(filename):
try:
return json.load(open(filename, 'rb'))
except ValueError:
return {}
else:
return {}
def update_config(filename, cnf):
config = load_config(filename)
config.update(cnf)
json.dump(config, open(filename, "wb"))
def main():
import getpass
from optparse import OptionParser
import sys
from os.path import expanduser
home = expanduser("~")
parser = OptionParser()
parser.add_option("-a", "--access-token", dest="access_token",
help="access token")
parser.add_option("-w", "--wallet-id", dest="wallet_id",
help="wallet id")
(options, args) = parser.parse_args()
if len(args) == 0:
print "a command is required, available: access_token, get_wallets, get_balance, send"
sys.exit(1)
action = args[0]
config_filename = os.path.join(home, ".bitgo")
config = load_config(config_filename)
if action == 'access_token':
username = raw_input('username: ')
password = <PASSWORD>('password: ')
otp = raw_input('otp: ')
bitgo = BitGo()
access_token = bitgo.get_access_token(username, password, otp)
print "access_token: ", access_token
if raw_input("store y/n? ") == "y":
update_config(config_filename, {'access_token': access_token})
sys.exit(0)
if options.access_token:
access_token = options.access_token
elif 'access_token' in config:
access_token = config['access_token']
else:
print "the --access-token is a required parameter"
sys.exit(1)
bitgo = BitGo(access_token=access_token)
if action == 'get_wallets':
print bitgo.get_wallets()
elif action == 'get_balance':
if options.wallet_id is None:
print "option -w {{ wallet_id }} is required for get_balance"
sys.exit(1)
print bitgo.get_balance(options.wallet_id) / float(10**8)
elif action == 'get_wallet':
if options.wallet_id is None:
print "option -w {{ wallet_id }} is required for get_wallet"
sys.exit(1)
otp = getpass.getpass('otp: ')
bitgo.unlock(otp)
print bitgo.get_wallet(options.wallet_id)
elif action == 'get_unspents':
if options.wallet_id is None:
print "option -w {{ wallet_id }} is required for get_unspents"
sys.exit(1)
print bitgo.get_unspents(options.wallet_id)
elif action == 'send':
if options.wallet_id is None:
print "option -w {{ wallet_id }} is required for send"
sys.exit(1)
if len(args) != 3:
print "address and amount are required"
sys.exit(1)
otp = raw_input('otp: ')
passcode = getpass.getpass('passcode: ')
bitgo.unlock(otp)
print bitgo.send(options.wallet_id, passcode, args[1], float(args[2]) * 10**8)
else:
print "invalid command"
if __name__ == '__main__':
main() | 2.40625 | 2 |
2017/d5/jumps.py | remarkablerocket/advent-of-code | 0 | 12792989 | #!/usr/bin/env python3
from operator import eq as isequal
def import_input(path):
with open(path, encoding='utf-8') as infile:
return [int(line) for line in infile]
instructions = import_input("input.txt")
class Jumper:
def __init__(self, instructions):
self.instructions = instructions
def solve(self):
steps = 0
i = 0
while 0 <= i < len(self.instructions):
jump = self.instructions[i]
if self.instructions[i] >= 3:
self.instructions[i] -= 1
else:
self.instructions[i] += 1
i += jump
steps += 1
return steps
jump = Jumper(instructions)
print(jump.solve())
| 3.53125 | 4 |
domainparsers/imgur.py | petarGitNik/reddit-image-downloader | 3 | 12792990 | #!/usr/bin/python3
"""
This module contains classes for parsing the imgur.com site. It consists of three
classes:
~ ImgurException
~ ImgurFileFormats
~ Imgur
Imgur is the main class and it obtains list of direct image urls that could be
used to download images. Example usage:
```python3
imgur = Imgur('http://imgur.com/gallery/vTTHZ')
imgur.prepare_images()
images = imgur.images
```
imgur.images is a deque of two keyed dictionaries. Example usage:
```python3
for image in images:
print(image['url'], image['filename'])
```
If images need to be downloaded in order they appear in an album, their filenames
have to be numerated. Full examples:
```python3
imgur = Imgur('http://imgur.com/gallery/vTTHZ')
imgur.prepare_images()
imgur.images # These are not guaranteed to appear in order when downloaded
imgur.numerate_images()
images = imgur.images
```
Note: For up to date version of this class visit:
https://github.com/petarGitNik/imgur-downloader
"""
import re
from collections import deque
from urllib.request import urlopen
from urllib.error import HTTPError
from urllib.error import URLError
__version__ = 'v0.2'
__status__ = 'Development'
class ImgurException(Exception):
"""
This exception is raised if supplied link is invalid.
"""
pass
class ImgurFileFormats(object):
"""
Contains extensions for file formats that are allowed on imgur. Source:
https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload-
Archived:
http://archive.is/89Uky
https://web.archive.org/web/20170222111303/https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload-
"""
JPG = '.jpg'
JPEG = '.jpeg'
PNG = '.png'
GIF = '.gif'
APNG = '.apng'
TIFF = '.tiff'
PDF = '.pdf'
XCF = '.xcf'
#WEBM = '.webm'
#MP4 = '.mp4'
@classmethod
def formats(cls):
"""
Return a set consisting of all class attributes. Class attributes must
not be callable.
"""
formats = set()
for attribute in ImgurFileFormats.__dict__.keys():
if attribute[:2] != '__':
value = getattr(ImgurFileFormats, attribute)
if not callable(value):
formats.add(value)
return formats
class Imgur(object):
"""
Imgur contains all necessary methods to extract image or album images from
imgur link.
"""
def __init__(self, url):
"""
Initiate Imgur object.
"""
self.url = self.sanitize(url)
self.images = deque()
def sanitize(self, url):
"""
Check if the supplied link is valid. If not, raise ImgurException. This
method checks only if the domain is valid.
"""
if re.match('https?\:\/\/(i\.)?imgur\.com\/', url):
if self.is_it_gifv(url):
return self.sanitize_gifv(url)
return url
raise ImgurException('Invalid link.')
def sanitize_gifv(self, url):
"""
Remove 'v' from .gifv
"""
pattern = 'https?\:\/\/i\.imgur\.com\/[a-zA-Z0-9]+\.gif'
return re.match(pattern, url).group(0)
def is_it_gifv(self, url):
"""
Check if the supplied link points to .gifv page.
"""
if '.gifv' in url:
return True
return False
def is_it_image(self):
"""
Check if the url points to image. Examples:
http(s)://i.imgur.com/[image_hash].[extension]
http(s)://i.imgur.com/[image_hash]
http(s)://imgur.com/[image_hash]
"""
# https*\:\/\/(i\.)?imgur\.com\/[a-zA-Z0-9]*(\.[a-zA-Z]{1,4})?
return not self.is_it_album()
def is_it_album(self):
"""
Check if the url points to an album. Examples:
http(s)://imgur.com/a/[album_hash]
http(s)://imgur.com/gallery/[album_hash]
"""
return ('/a/' in self.url) or ('/gallery/' in self.url)
def is_it_grid(self):
"""
Check if the url points to a grid view. Example:
http(s)://imgur.com/a/[album_hash]?grid
"""
return self.url.endswith('?grid')
def change_gallery(self):
"""
Change /gallery/ to /a/ in url.
"""
return self.url.replace('/gallery/', '/a/')
def turn_into_grid(self):
"""
Append ?grid to url.
"""
if self.is_it_album():
if not self.is_it_grid():
return ''.join([self.change_gallery(), '?grid'])
else:
return self.url
raise ImgurException('Cannot convert single image into album grid.')
def prepare_images(self):
"""
Parses HTML from the provided url to obtain link(s) to image(s). Raises
exception if the link already ends with an extension.
"""
if self.is_it_image():
if self.contains_extension(self.url):
self.images.append(
self.pack_image(self.url, self.get_image_filename(self.url))
)
return
else:
self.parse_and_prepare_images(self.url)
return
grid = self.turn_into_grid()
self.parse_and_prepare_images(grid)
return
def parse_and_prepare_images(self, url):
"""
Obtain and parse html, and append image dictionaries to image deque.
"""
pattern = '\{"hash":"([a-zA-Z0-9]+)".*?"ext":"([\.a-zA-Z0-9\?\#]+)".*?\}'
try:
html = urlopen(url).read().decode('utf-8')
filenames_with_duplicates = re.findall(pattern, html)
filenames_clean = self.remove_duplicates(filenames_with_duplicates)
urls = self.build_image_url_list(filenames_clean)
for url in urls:
self.images.append(
self.pack_image(url, self.get_image_filename(url))
)
except HTTPError as e:
print(e.status)
except URLError as e:
print(e.reason)
def build_image_url_list(self, filenames):
"""
Build list of direct links to images. Input filenames list is a list of
tuples e.g. [('jedEzFL', '.jpg'), ('lciC5G8', '.jpg')]. The output looks
like:
['https://i.imgur.com/jedEzFL.jpg', 'https://i.imgur.com/lciC5G8.jpg']
"""
urls = []
for filename, extension in filenames:
urls.append(''.join(['https://i.imgur.com/', filename, extension]))
return urls
def remove_duplicates(self, filenames):
"""
Remove duplicates from a list of tuples containing filenames with
extensions.
"""
clean = []
for filename in filenames:
if filename not in clean:
clean.append(filename)
return clean
def contains_extension(self, url):
"""
Check if the image url contains extension. If there is an extension it
is returned. Otherwise, None is returned.
"""
for extension in ImgurFileFormats.formats():
if extension in url:
return extension
return None
def get_image_filename(self, url):
"""
Get image file name from its url. Examples:
https://i.imgur.com/jedEzFL.jpg -> jedEzFL.jpg
https://i.imgur.com/jedEzFL.jpg?1 -> jedEzFL.jpg
"""
candidate = url.split('/')[-1]
extension = self.contains_extension(url)
pattern = ''.join(['.+\\', extension])
return re.match(pattern, candidate).group(0)
def pack_image(self, url, filename):
"""
Returns a dictionary with image url and corresponding filename.
"""
return {'url' : url, 'filename' : filename}
def number_of_images(self):
"""
Get the number of images from the images attribute.
"""
return len(self.images)
def numerate_images(self):
"""
Append ordinal number to image filename.
"""
total = self.digits_in_a_number(len(self.images))
ordinal = '{0:0%dd}' % total
for index, image in enumerate(self.images, start=1):
image['filename'] = ''.join([
ordinal.format(index), '-', image['filename']
])
def digits_in_a_number(self, number):
"""
Return how many digits are there in a number.
"""
return len(str(number))
| 2.984375 | 3 |
pyfibre_analysis_tools/__init__.py | franklongford/pyfibre_analysis_scripts | 0 | 12792991 | from .analysis_tools import load_databases # noqa: E501
from .plotting import confidence_ellipse, scatter, plot_roc_curve, plot_lda_analysis # noqa: E501
| 1.085938 | 1 |
sa.py | Crismaria11/Lab03_SecDS | 0 | 12792992 | import pefile
import numpy as np
# import os
execs = [
"1F2EB7B090018D975E6D9B40868C94CA",
"33DE5067A433A6EC5C328067DC18EC37",
"65018CD542145A3792BA09985734C12A",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"A316D5AECA269CA865077E7FFF356E7D",
"<KEY>",
"AL65_DB05DF0498B59B42A8E493CF3C10C578",
"B07322743778B5868475DBE66EEDAC4F",
"B98hX8E8622C393D7E832D39E620EAD5D3B49",
"BVJ2D9FBF759F527AF373E34673DC3ACA462",
"DS22_A670D13D4D014169C4080328B8FEB86",
"EEE99EC8AA67B05407C01094184C33D2B5A44",
"F6655E39465C2FF5B016980D918EA028",
"F8437E44748D2C3FCF84019766F4E6DC",
"<KEY>",
"FGTR43_EF8E0FB20E7228C7492CCDC59D87C690",
"<KEY>",
"FTTR9EA3C16194CE354C244C1B74C46CD92E",
"<KEY>",
"GFT4_7DDD3D72EAD03C7518F5D47650C8572",
"<KEY>",
"<KEY>",
"JKK8CA6FE7A1315AF5AFEAC2961460A80569",
"<KEY>",
"<KEY>",
"L11_1415EB8519D13328091CC5C76A624E3D",
"NBV_8B75BCBFF174C25A0161F30758509A44",
"NV99_C9C9DBF388A8D81D8CFB4D3FC05F8E4",
"PL98_BD8B082B7711BC980252F988BB0CA936",
"POL55_A4F1ECC4D25B33395196B5D51A06790",
"QW2_4C6BDDCCA2695D6202DF38708E14FC7E",
"RTC_7F85D7F628CE62D1D8F7B39D8940472",
"SAM_B659D71AE168E774FAAF38DB30F4A84",
"TG78Z__727A6800991EEAD454E53E8AF164A99C",
"VBMM9_149B7BD7218AAB4E257D28469FDDB0D",
"VC990_468FF2C12CFFC7E5B2FE0EE6BB3B239E",
]
prueba = {"correlativo": None, "nameExec": None, "sectionName": [], "sectionVA": [],
"sectionVS": [], "sectionSR": [], "kernel32": [], "msvcrt": [], "shell32": [],
"user32": [], "ws232": [], "ADVAPI32": [], "GDI32": [], "KERNEL32": [],
"NETAPI32": [], "PSAPI": [], "WININET": [], "ntdll": [], "TimeStamp": None}
# pe = pefile.PE("65018CD542145A3792BA09985734C12A")
# algo = [10, 20, 30, 40, 50]
granPrueba = []
entrysList = []
for a in execs:
sectionNames = []
sectionVA = []
sectionVS = []
sectionSR = []
kernel32 = []
msvcrt = []
shell32 = []
user32 = []
ws232 = []
ADVAPI32 = []
GDI32 = []
KERNEL32 = []
NETAPI32 = []
PSAPI = []
WININET = []
ntdll = []
# print(execs.index(a) + 1)
print("a")
print(a)
c = execs.index(a) + 1
pe = pefile.PE(a)
prueba["correlativo"] = c
prueba["nameExec"] = a
print(c)
print("Secciones")
for section in pe.sections:
print(section.Name, hex(section.VirtualAddress), hex(section.Misc_VirtualSize), section.SizeOfRawData)
b = section.Name
sectionNames.append(b.decode('utf-8'))
sectionVA.append(section.VirtualAddress)
sectionVS.append(section.Misc_VirtualSize)
sectionSR.append(section.SizeOfRawData)
prueba["sectionName"] = sectionNames
prueba["sectionVA"] = sectionVA
prueba["sectionVS"] = sectionVS
prueba["sectionSR"] = sectionSR
print()
print()
print("Entradas")
for entry in pe.DIRECTORY_ENTRY_IMPORT:
print('Llamadas DLL:')
print (entry.dll)
l = entry.dll
print('Llamadas a funciones:')
entrysList.append(str(l.decode('utf-8')))
if str(entry.dll) == "b'KERNEL32.DLL'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
kernel32.append(x.decode('utf-8'))
prueba["kernel32"] = kernel32
elif str(entry.dll) == "b'ADVAPI32.dll'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
ADVAPI32.append(x.decode('utf-8'))
prueba["ADVAPI32"] = ADVAPI32
elif str(entry.dll) == "b'GDI32.dll'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
GDI32.append(x.decode('utf-8'))
prueba["GDI32"] = GDI32
elif str(entry.dll) == "b'KERNEL32.dll'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
KERNEL32.append(x.decode('utf-8'))
prueba["KERNEL32"] = KERNEL32
elif str(entry.dll) == "b'NETAPI32.dll'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
NETAPI32.append(x.decode('utf-8'))
prueba["NETAPI32"] = NETAPI32
elif str(entry.dll) == "b'PSAPI.DLL'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
PSAPI.append(x.decode('utf-8'))
prueba["PSAPI"] = PSAPI
elif str(entry.dll) == "b'WININET.dll'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
WININET.append(x.decode('utf-8'))
prueba["WININET"] = WININET
elif str(entry.dll) == "b'ntdll.dll'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
ntdll.append(x.decode('utf-8'))
prueba["ntdll"] = ntdll
elif str(entry.dll) == "b'MSVCRT.dll'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
msvcrt.append(x.decode('utf-8'))
prueba["msvcrt"] = msvcrt
elif str(entry.dll) == "b'SHELL32.dll'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
shell32.append(x.decode('utf-8'))
prueba["shell32"] = shell32
elif str(entry.dll) == "b'USER32.dll'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
user32.append(x.decode('utf-8'))
prueba["user32"] = user32
elif str(entry.dll) == "b'WS2_32.dll'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
ws232.append(x.decode('utf-8'))
prueba["ws232"] = ws232
# listamalware = os.listdir(path)
print()
print()
print("TimeStamp")
print("TimeDateStamp : " + pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1])
z = pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1]
print(z)
prueba["TimeStamp"] = z
print(c)
# print()
# print()
# print(pe.FILE_HEADER.NumberOfSections)
granPrueba.append(prueba)
prueba = {"correlativo": None, "nameExec": None, "sectionName": [], "sectionVA": [],
"sectionVS": [], "sectionSR": None, "kernel32": None, "msvcrt": None, "shell32": None,
"user32": None, "ws232": None, "TimeStamp": None}
# print(granPrueba)
import pandas as pd
df = pd.DataFrame(granPrueba)
print(df)
# print(entrysList)
def unique(list1):
x = np.array(list1)
print(np.unique(x))
unique(entrysList)
df.to_csv("dataset.csv")
| 1.335938 | 1 |
main.py | roesel/growth | 1 | 12792993 | # -*- coding: utf-8 -*-
''' The main script to run, enables profiling. '''
import numpy as np
import math
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from crystal import *
from plot_init_tools import *
def main(num_of_growths):
''' Main method to start simulation. Uncomment specific simulation or write a new one. '''
# Main simulation crystal, x is dimensions (m=n=x)
# Stairs
x, init = make_init("stairs", 200)
c = Crystal(x, x, initial_grid=init.copy(), mode="step", hist_int=int(num_of_growths/4), \
border_policy="loop", use_height=False)
c.grow(num_of_growths)
plot_crystal(c)
# # Step
# x, init = make_init("step", 200)
# c = Crystal(x, x, initial_grid=init.copy(), mode="step", hist_int=int(num_of_growths/4), \
# border_policy="loop")
# #c.print_grid()
# c.grow(num_of_growths)
# #c.print_grid()
# plot_crystal(c)
#
# # Screw
# x, init = make_init("screw", 200)
# c = Crystal(x, x, initial_grid=init.copy(), mode="spin", hist_int=int(num_of_growths/8), \
# border_policy="flex")
# #c.print_grid()
# c.grow(num_of_growths)
# #c.print_grid()
# plot_crystal(c)
# # A crystal object serving to visualize only "what grew" without init state
# d = Crystal(x, x, initial_grid=(c.grid-init))
# plot_crystal(d, 2)
# Show history of simulation
plot_history(c)
# # Generate a publishable plot
# plot_out(c)
def profile():
''' Function used to profile code for speedups. '''
import cProfile
cProfile.run('main(50)', 'pstats')
from pstats import Stats
p = Stats('pstats')
p.strip_dirs().sort_stats('time').print_stats(10)
main(50)
#profile()
| 2.859375 | 3 |
setup.py | TomaLaPlazaConCabeza/web-app | 0 | 12792994 | <reponame>TomaLaPlazaConCabeza/web-app
from setuptools import find_packages, setup
with open("README.md") as handle:
LONG_DESCRIPTION = handle.read()
setup(
name="web_app",
description="Wep App for TomaLaPlazaConCabeza",
long_description=LONG_DESCRIPTION,
version="0.1.0-dev",
author="TomaLaPlazaConCabeza",
author_email="<EMAIL>",
url="https://github.com/TomaLaPlazaConCabeza/web-app",
license="BSD-3-clause",
packages=find_packages(),
python_requires=">=3.8",
zip_safe=False,
install_requires=[
"Flask>=1.1.2,<1.2.0",
"numpy>=1.18.4,<1.19.0",
"Shapely>=1.7.0,<1.8.0",
"descartes>=1.1.0,<1.2.0",
],
)
| 1.4375 | 1 |
src/basic/go_cache.py | invokerrrr/alphago_weak | 0 | 12792995 | # -*- coding: utf-8 -*-
from os import path, getcwd, makedirs, listdir, remove
from typing import *
import pickle
from abc import ABCMeta, abstractmethod
from sgfmill.sgf import Sgf_game
import numpy as np
from .go_types import *
__all__ = ["set_cache_dir", "get_cache_dir", "get_game_dir", "get_archive_dir",
"get_array_dir", "GameData", "GameArchive",
"GameDatabase", "ArrayDatabase"]
default_cache_dir = path.join(path.dirname(path.realpath(__file__)), "../..", ".data")
cache_dir = default_cache_dir
archive_folder = path.join(cache_dir, ".kgs")
game_folder = path.join(cache_dir, ".game")
array_folder = path.join(cache_dir, ".array")
def set_cache_dir(directory: Optional[str] = None) -> NoReturn:
global cache_dir, archive_folder, game_folder, array_folder
if directory is None:
directory = default_cache_dir
cache_dir = path.join(getcwd(), directory)
archive_folder = path.join(cache_dir, ".kgs")
game_folder = path.join(cache_dir, ".game")
array_folder = path.join(cache_dir, ".array")
makedirs(get_cache_dir(), exist_ok=True)
makedirs(get_archive_dir(), exist_ok=True)
makedirs(get_game_dir(), exist_ok=True)
makedirs(get_array_dir(), exist_ok=True)
def get_cache_dir() -> str:
return cache_dir
def get_archive_dir() -> str:
return archive_folder
def get_game_dir() -> str:
return game_folder
def get_array_dir() -> str:
return array_folder
class GameData(NamedTuple):
size: int
winner: GoPlayer
sequence: List[Tuple[Optional[GoPlayer], Optional[GoPoint]]]
komi: float
setup_stones: Tuple[Optional[Set[GoPoint]], Optional[Set[GoPoint]], Optional[Set[GoPoint]]]
@classmethod
def from_sgf(cls, sgf_game: Sgf_game):
size = sgf_game.get_size()
winner = GoPlayer.to_player(sgf_game.get_winner())
sequence = list(map(lambda move: (GoPlayer.to_player(move[0]), move[1]),
(node.get_move() for node in sgf_game.get_main_sequence())))
komi = sgf_game.get_komi()
setup_stones = sgf_game.get_root().get_setup_stones()
return cls(size, winner, sequence, komi, setup_stones)
@staticmethod
def from_pickle(name: str, size: Union[int, str] = 19):
with open(path.join(get_game_dir(), str(size), name), "rb") as f:
return pickle.load(f)
@staticmethod
def pickle_exists(name: str, size: Union[int, str] = 19):
return path.exists(path.join(get_game_dir(), str(size), name))
def to_pickle(self, name: str):
makedirs(self.root(), exist_ok=True)
dest = self.path(name)
with open(dest, "wb") as f:
pickle.dump(self, f)
def root(self):
return path.join(get_game_dir(), str(self.size))
def path(self, name: str):
return path.join(self.root(), name)
class GameArchive(metaclass=ABCMeta):
name = "none"
@classmethod
def archive_map(cls):
_dict = {_cls.name: _cls for _cls in cls.__subclasses__()}
for v in cls.__subclasses__():
_dict.update(v.archive_map())
return _dict
@abstractmethod
def retrieve(self, force=False) -> NoReturn:
"""
Retrieve all archives available from Internet.
:param force: whether forces to download archive if it has already existed
"""
pass
@abstractmethod
def extract(self, force=False) -> NoReturn:
"""
Extract all game archives to Game Cache Folder, every single file should end with `.game.pkl` and be
start with it's size of the board.
"""
pass
@abstractmethod
def unpack(self, force=False) -> NoReturn:
"""
Unpack all game archives to
:param force: whether forces to download archive if it has already existed
"""
pass
def download(self, force=False):
self.retrieve(force=force)
self.unpack(force=force)
self.extract(force=force)
class GameDatabase:
def __init__(self, size=19):
self.size = size
def __len__(self):
return len(self.keys())
def __getitem__(self, name: str) -> GameData:
return GameData.from_pickle(name, self.size)
def __setitem__(self, name: str, data: GameData):
data.to_pickle(name)
def __delitem__(self, name: str):
remove(path.join(get_game_dir(), str(self.size), name))
def __contains__(self, name: str):
return path.exists(path.join(get_game_dir(), str(self.size), name))
def __eq__(self, other):
if isinstance(other, GameDatabase):
return self.size == other.size
return NotImplemented
def root(self):
return path.join(get_game_dir(), str(self.size))
def keys(self) -> List[str]:
return listdir(self.root())
def values(self) -> Iterable[GameData]:
for key in self.keys():
yield self[key]
def items(self) -> Iterable[Tuple[str, GameData]]:
for key in self.keys():
yield key, self[key]
class ArrayDatabase:
def __init__(self, method: str, size=19):
self.size = size
self.method = method
makedirs(self.root(), exist_ok=True)
def __len__(self):
return len(self.keys())
def __getitem__(self, key: str) -> Tuple[np.ndarray, ...]:
file = path.join(self.root(), key)
with open(file, "rb") as f:
return pickle.load(f)
def __setitem__(self, key: str, value: Tuple[np.ndarray, ...]):
file = path.join(self.root(), key)
with open(file, "wb") as f:
pickle.dump(value, f)
def __delitem__(self, key: str):
file = path.join(self.root(), key)
remove(file)
def __contains__(self, key: str):
return path.exists(path.join(self.root(), key))
def root(self):
return path.join(get_array_dir(), str(self.size), self.method)
def keys(self) -> List[str]:
return listdir(self.root())
def values(self) -> Iterable[np.ndarray]:
for key in self.keys():
yield self[key]
def items(self) -> Iterable[Tuple[str, np.ndarray]]:
for key in self.keys():
yield key, self[key]
| 2.484375 | 2 |
test/integration/targets/script/files/no_shebang.py | Container-Projects/ansible-provider-docs | 37 | 12792996 | <reponame>Container-Projects/ansible-provider-docs
import sys
sys.stdout.write("Script with shebang omitted")
| 1.328125 | 1 |
god_zip.py | mbmccoy/voice_of_god | 2 | 12792997 | from collections import defaultdict
import gzip
import os
import random
import textwrap
class Heresy(Exception):
"""You have defiled the word of God!"""
pass
def bits(byte_string):
"""Generates a sequence of bits from a byte stream"""
for byte in byte_string:
for bit_num in range(8):
# Extract bit from byte
byte, bit = byte >> 1, byte % 2
yield bit
def generate_ngram_dict(filename, tuple_length):
"""Generate a dict with ngrams as key following words as value
:param filename: Filename to read from.
:param tuple_length: The length of the ngram keys
:return: Dict of the form {ngram: [next_words], ... }
"""
def file_words(file_pointer):
"""Generator for words in a file"""
for line in file_pointer:
for word in line.split():
yield word
ngrams = defaultdict(lambda: set())
with open(filename, 'r') as fp:
word_list = []
for word in file_words(fp):
if len(word_list) < tuple_length:
word_list.append(word)
continue
ngrams[tuple(word_list)].add(word)
word_list = word_list[1:] + [word]
return {key: tuple(val) for key, val in ngrams.items()}
class GodZip(object):
"""Turn unholy bits into holy words!"""
hallelujah = "Sayeth the Lord:\n\n"
amen = "\n\nAmen."
def __init__(self, tuple_length=3, line_width=70, compress=True):
self.compress = compress
self.line_width = line_width
self.tuple_length = tuple_length
data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'bible-kjv.raw.txt')
self.god_grams = generate_ngram_dict(data_path, tuple_length)
self.capital_tuples = [key for key, value in self.god_grams.items()
if key[0][0].isupper()]
def praise(self, unholy_bytes):
"""Encode unholy bytes or unholy unicode into Holy text"""
if not unholy_bytes:
raise Heresy("Thou shalt not be silent in the face of the Lord!")
if not isinstance(unholy_bytes, bytes):
unholy_bytes = unholy_bytes.encode()
if self.compress:
unholy_bytes = gzip.compress(unholy_bytes)
# Start with a capitalized tuple
speech_of_god = list(random.choice(self.capital_tuples))
for bit in bits(unholy_bytes):
holy_tuple = tuple(speech_of_god[-self.tuple_length:])
holy_words = self.god_grams[holy_tuple]
# Make sure that we have some words to choose from
while len(holy_words) <= 1:
chosen_word = holy_words[0]
speech_of_god.append(chosen_word)
holy_tuple = tuple(speech_of_god[-self.tuple_length:])
holy_words = self.god_grams[holy_tuple]
# Select from even indices if bit == 0, odd if bit == 1
chosen_word = random.choice(holy_words[bit::2])
speech_of_god.append(chosen_word)
holy_sentences = ' '.join(speech_of_god).split('. ')
annotated_speech_of_god = '.\n\n'.join(
[
'\n'.join(textwrap.wrap("[{}] ".format(idx + 1) + holy_phrase, width=self.line_width))
for idx, holy_phrase in enumerate(holy_sentences)
]
)
return self.hallelujah + annotated_speech_of_god + self.amen
def reveal_from_words(self, holy_words):
"""Decode a list of holy words into unholy bytes."""
try:
holy_tuple = tuple(holy_words[:self.tuple_length])
except:
raise Heresy("You mock the word of God!")
unholy_bytes = b''
unholy_num = 0
bit_counter = 0
for holy_word in holy_words[self.tuple_length:]:
try:
holy_ngram_list = self.god_grams[holy_tuple]
except:
raise Heresy("Thou shalt not modify the word of God!")
holy_tuple = tuple(holy_tuple[1:] + (holy_word,))
if len(holy_ngram_list) <= 1:
continue
try:
unholy_bit = holy_ngram_list.index(holy_word) % 2
except:
raise Heresy("Not one word of God shall be changed!")
unholy_num |= unholy_bit << bit_counter
bit_counter += 1
if bit_counter % 8 == 0:
unholy_bytes += bytes([unholy_num])
unholy_num = 0
bit_counter = 0
if self.compress:
unholy_bytes = gzip.decompress(unholy_bytes)
return unholy_bytes
def reveal(self, annotated_speech):
"""Decode holy speech into bytes"""
split_annotated_speech = annotated_speech.split('\n\n')
# Check for hallelujah and amen
if split_annotated_speech[0] != self.hallelujah.strip() \
or split_annotated_speech[-1] != self.amen.strip():
raise Heresy("Your praise is insufficient!")
# Remove hallelujah and amen
try:
holy_annotated_sentences = split_annotated_speech[1:-1]
except:
raise Heresy("The word of God will not be silenced!")
# Remove line annotations
try:
holy_words = ' '.join([sentence.split('] ')[1]
for sentence in holy_annotated_sentences]).split()
except:
raise Heresy("How dare you imitate the word of God!")
return self.reveal_from_words(holy_words)
def hex_expand(byte_str):
return ':'.join('{:02x}'.format(byte) for byte in byte_str)
if __name__ == '__main__':
god = GodZip(compress=False)
hello_world = "Hello world!"
print("I praise unto God: %s\n\n" % hello_world)
holy_hello_world = god.praise(hello_world)
print(holy_hello_world)
assert(hello_world == god.reveal(holy_hello_world).decode())
| 3.421875 | 3 |
backend/apps/users/urls.py | tomoya-kwansei/emonotateV2 | 0 | 12792998 | <reponame>tomoya-kwansei/emonotateV2
from rest_framework.routers import DefaultRouter
from django.conf.urls import url, include
from .views import *
router = DefaultRouter()
router.register(r'users', UserViewSet)
router.register(r'curves', CurveViewSet, basename='curves')
router.register(r'contents', ContentViewSet, basename='contents')
router.register(r'youtube', YouTubeContentViewSet, basename='youtube')
router.register(r'valuetypes', ValueTypeViewSet, basename='valuetypes')
router.register(r'requests', RequestViewSet, basename='requests')
urlpatterns = router.urls
urlpatterns += url(r'sign_s3/$', sign_s3),
| 2.3125 | 2 |
tests/conftest.py | JackGuyver/pyansys | 0 | 12792999 | import socket
import os
import pytest
import pyvista
from pyansys.misc import get_ansys_bin
import pyansys
from pyansys.errors import MapdlExitedError
pyvista.OFF_SCREEN = True
# check for a valid MAPDL install with CORBA
valid_rver = ['182', '190', '191', '192', '193', '194', '195', '201']
EXEC_FILE = None
for rver in valid_rver:
if os.path.isfile(get_ansys_bin(rver)):
EXEC_FILE = get_ansys_bin(rver)
if 'PYANSYS_IGNORE_ANSYS' in os.environ:
HAS_ANSYS = False
else:
HAS_ANSYS = EXEC_FILE is not None
skip_no_ansys = pytest.mark.skipif(not HAS_ANSYS, reason="Requires ANSYS installed")
modes = ['corba']
# if os.name == 'posix': # console only for linux
# modes.append('console')
collect_ignore = []
if not HAS_ANSYS:
collect_ignore.append("test_post.py")
@pytest.fixture(scope="session", params=modes)
def mapdl():
# launch in shared memory parallel for Windows VM
# configure shared memory parallel for VM
additional_switches = ''
if os.name == 'nt' and socket.gethostname() == 'WIN-FRDMRVG7QAB':
additional_switches = '-smp'
elif os.name == 'posix':
os.environ['I_MPI_SHM_LMT'] = 'shm' # necessary on ubuntu and dmp
mapdl = pyansys.launch_mapdl(EXEC_FILE, override=True, mode='corba',
additional_switches=additional_switches)
mapdl._show_matplotlib_figures = False # don't show matplotlib figures
yield mapdl
### test exit ###
# must be after yield as this uses a module scoped fixture
mapdl.exit()
assert mapdl._exited
with pytest.raises(RuntimeError):
mapdl.prep7()
assert not os.path.isfile(mapdl._lockfile)
assert 'MAPDL exited' in str(mapdl)
with pytest.raises(MapdlExitedError):
mapdl.prep7()
| 1.820313 | 2 |
bm25.py | ChosenOne2241/BM25 | 0 | 12793000 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Description: Build a structural data from orginial Cranfield collection and
# implement the BM25 alogrithm information retrieval;
# also 5 evaluation methods (precision, recall, MAP, P at N and
# NDCG at N) are applied.
# Tested under Python 3.5 on Ubuntu 16.04.
# Author: '(<NAME>.)
# Date created: 2018-05-07
# Here are some Python standard modules used in the script.
import argparse
# Used to parse program arguments.
# More details are here: https://docs.python.org/3/library/argparse.html
import readline
# Used to create a typing history buffer for `manual` mode.
# More details are here: https://docs.python.org/3/library/readline.html
import json
# Used to create a human-readable JSON file for index information and the like.
import string # Used to do some regex operations.
import math
import os
# Here are some Python libraries that places locally.
import porter
STOP_WORDS_PATH = "stopwords.txt"
DOCUMENT_PATH = "./cran/cran.all.1400"
QUERY_PATH = "./cran/cran.qry"
RELEVANCE_PATH = "./cran/cranqrel"
INDEX_PATH = "index.json"
EVALUATION_PATH = "evaluation_output.txt"
# Labels in `cran.all.1400` and `cranqrel` text files.
ID = ".I"
TITLE = ".T"
AUTHORS = ".A"
BIBLIOGRAPHY = ".B"
WORDS = ".W"
LABELS = [ID, TITLE, AUTHORS, BIBLIOGRAPHY, WORDS]
CONTENTS = [AUTHORS, BIBLIOGRAPHY, WORDS]
DELIMITER_SYMBOL = "*"
BOUNDARY_LENGTH = 80
# It decides the length of the boundary between two `manual` queries.
MOST_RELEVANT = 15
# At most top `MOST_RELEVANT` results are returned for each query.
USER_STOP_WORD = "QUIT"
# When user types `USER_STOP_WORD`, the program ends; it is case-sensitive.
RELEVANCE_SCORE_THRESHOLD = 4
# Filter out ones with relevance score larger than `RELEVANCE_SCORE_THRESHOLD`
# from `QUERY_PATH`. The default value is 4 (-1, 1, 2, 3, 4), which means all
# documents in it will be reserved.
RELEVANCE_SCORE_FIX = 5
# It is a number used as minuend to convert original relevance scores to
# NDCG-friendly ones.
# Constants used in BM25 model.
K = 1.0
B = 0.75
# A constant used in Precision at N and NDCG at N.
# If `MOST_RELEVANT` is equal to `N`, precision will be the same as P at N for Cranfield collection.
# N.B.: `N` cannot be larger than `MOST_RELEVANT`.
N = 10
def is_number(word):
""" A helper function to check if a string can be converted to an integer.
Used to process documents and queries.
"""
try:
int(word)
return True
except ValueError:
return False
def is_valid(word):
""" A helper function to check if a string is valid.
Used to process documents and queries.
"""
if word != "" and word not in stop_words and not is_number(word):
return True
else:
return False
def get_arguments():
parser = argparse.ArgumentParser(description = "A script used to build BM25 model and relative evaluation methods. If the index JSON file is not available, just type `python3 bm25.py` to generate one in the working directory and extra arguments will be ignored in this case")
parser.add_argument("-m", required = False, choices = ["manual", "evaluation"], default = "manual", help = "mode selection; `manual` mode is chosen by default if it is not specified")
parser.add_argument("-o", required = False, nargs = "?", const = EVALUATION_PATH, metavar = "FILE NAME", help = "BM25 evaluation result output in lines of 3-tuples (query ID, document ID, and its rank [1 - 15]) form; if `FILE NAME` is not given, the default output file name is `evaluation_output.txt`")
return parser.parse_args()
def load_stop_words():
stop_words = set()
with open(STOP_WORDS_PATH, "r") as fp:
for line in fp:
stop_words.add(line.rstrip())
return stop_words
def process_documents():
""" Build vectors of each term and calculate lengths of each documents.
Also a dictionary containing pairs of original words and stemmed words
are returned.
"""
def add_new_word(word):
# A helper function to add a new word in `term_vectors`.
if word not in stemming:
stemming[word] = stemmer.stem(word)
stemmed_word = stemming[word]
if stemmed_word not in term_vectors:
term_vectors[stemmed_word] = {}
if document_ID in term_vectors[stemmed_word]:
(term_vectors[stemmed_word])[document_ID] += 1
else:
term_vectors[stemmed_word].update({document_ID : 1})
stemming = {}
term_vectors = {}
# `term_vectors` structure: {[Key] Term : [Value] {[Key] Document ID : [Value] Appearance Times}}.
document_lengths = {}
average_length = 0.0
num_of_documents = 0
with open(DOCUMENT_PATH, "r") as fp:
document_ID = 0
length = 0.0
for line in fp:
current_section = line[0 : 2]
if current_section in LABELS:
if current_section == ID:
document_lengths[document_ID] = math.sqrt(length)
# Calculate the previous document length and start a new one.
# The empty entry for document 0 is also created although
# in Cranfield collection, document ID begins from 001.
average_length += document_lengths[document_ID]
document_ID += 1
# Ignore original document IDs, which is the numbers followed by ".I",
# since they may not be consecutive.
num_of_documents += 1
length = 0.0
section = current_section
continue # Update and go to next line immediately.
elif section in CONTENTS:
line = line.translate(removing_punctuation_map)
line = line.replace("--", " ")
# Also, treat two consecutive hyphens as a space.
for term in line.split():
# Split according to whitespace characters and deal with two special cases:
# abbreviations with "." and hyphenated compounds.
term = term.replace(".", "")
# Remove full stops in one term, used to convert abbreviations
# like "m.i.t." (line 1222) / "u.s.a." (line 32542) into "mit" / "usa".
# In the meantime, something like "..e.g.at" (line 17393),
# "i.e.it" (line 17287), "trans.amer.math.soc.33" (line 31509),
# or "studies.dash" (line 516) will not be handled as expected.
# All float-point numbers like "3.2x10" (line 18799), "79.5degree"
# (line 20026) will be converted into integers by just removing dots.
# And similarly, phrases like "m. i. t." (line 36527) and
# "i. e." (line 11820) will be ignored.
# "r.m.s." (line 20241) will become "rm" stored in the dictionary after stemming.
compound = term.replace("-", "")
if is_valid(compound):
add_new_word(compound)
if section == WORDS:
length += 1.0
# Treat a compound word as one word; words in `AUTHORS`
# and `BIBLIOGRAPHY` section will not be counted.
term_split = term.split("-")
if len(term_split) > 1:
# If only one item in `term_split`, which means there is no hyphen in this word.
# There may exist a term with an ending hyphens like
# "sub- and" (line 14632), which causes an extra empty string is created
# and makes term_split look like ["sub", ""].
for element in term_split:
# Deal with each part of compound words like "two-step" (line 38037) or
# type names like "75s-t6" (line 28459) or "a52b06" (line 25717).
if is_valid(element):
add_new_word(element)
# Filter out all pure integers; for example, for "f8u-3" (line 35373),
# both "f8u" and "f8u3" will be saved, but not "3".
# Calculate the last length since Cranfield collection does not have ending symbols.
document_lengths[document_ID] = math.sqrt(length)
# Skip the document with index 0 from document length vector.
del document_lengths[0]
average_length = (document_lengths[document_ID] + average_length) / num_of_documents
for document in document_lengths.keys():
document_lengths[document] = document_lengths[document] / average_length
# Now document_lengths stores a normalised length for each document.
return stemming, term_vectors, document_lengths
def process_single_query(query):
""" Process single line text.
Used by `process_queries` function and `manual` mode.
"""
def add_new_word(word):
# A helper function to add a new word in `query_terms`.
if word not in stemming:
stemming[word] = stemmer.stem(word)
stemmed_word = stemming[word]
if stemmed_word not in query_terms:
query_terms.append(stemmed_word)
query_terms = []
query = query.strip()
query = query.translate(removing_punctuation_map)
query = query.replace("--", " ")
for term in query.split():
term = term.replace(".", "").lower()
compound = term.replace("-", "")
if is_valid(compound):
add_new_word(compound)
term_split = term.split("-")
if len(term_split) > 1:
for element in term_split:
if is_valid(element):
add_new_word(element)
return query_terms
def process_queries():
with open(QUERY_PATH, "r") as fp:
query_list = {}
query = []
query_ID = 0
for line in fp:
current_section = line[0 : 2]
if current_section in LABELS:
if current_section == ID:
query_list[query_ID] = query
query = []
query_ID += 1
# Ignore original query IDs, which is the numbers followed
# by ".I", since they are not consecutive.
if current_section == WORDS:
section = current_section
continue
elif section in CONTENTS:
if query == []:
query = process_single_query(line)
else:
query += process_single_query(line)
query_list[query_ID] = query # Add the last entry.
del query_list[0] # Skip the first one.
return query_list
def bm25_similarities(query):
""" It returns a descending list with at most top `MOST_RELEVANT` pairs
(Document ID, Similarity) based on BM25 to calculate similarities.
"""
similarities = []
for document_ID in range(1, nums_of_documents + 1):
# Document ID begins from 1.
similarity = 0.0
for term in query:
if term in term_vectors and document_ID in term_vectors[term]:
frequency = (term_vectors[term])[document_ID]
n_i = len(term_vectors[term])
idf = math.log((nums_of_documents - n_i + 0.5) / (n_i + 0.5), 2)
similarity += frequency * (1.0 + K) / (frequency + K * ((1.0 - B) + B * document_lengths[document_ID])) * idf
if similarity > 0.0: # Ignore the one with similarity score 0.
pair = (document_ID, similarity)
similarities.append(pair)
# Sort results in desceding order.
similarities = sorted(similarities, key = lambda x : x[1], reverse = True)
if len(similarities) > MOST_RELEVANT:
return similarities[0 : MOST_RELEVANT]
else:
return similarities
def manual_mode():
""" When in `manual` mode, the function will not end until user types "QUIT".
"""
while True:
print(DELIMITER_SYMBOL * BOUNDARY_LENGTH)
# Print `BOUNDARY_LENGTH` `DELIMITER_SYMBOL`s to fill the default
# width of terminal window.
user_query = input("Enter query (type \"QUIT\" to terminate): ")
if user_query == USER_STOP_WORD:
break
query_terms = process_single_query(user_query)
print("Results for query " + str(query_terms))
print("Rank\tID\tScore")
rank = 1
for result in bm25_similarities(query_terms):
print("{0}\t{1}\t{2}".format(str(rank), result[0], str(result[1])), end = "\n")
rank += 1
def load_relevance_scores():
relevance_scores = {}
# `relevance_scores` structure: {[KEY] query ID : [Value] [(Document ID, Relevance Score)]}
with open(RELEVANCE_PATH, "r") as fp:
for line in fp:
fields = line.split()
query_ID = int(fields[0])
pair = (int(fields[1]), int(fields[2]))
if query_ID in relevance_scores:
relevance_scores[query_ID].append(pair)
# It assumes no repetition of document IDs for each query.
else:
relevance_scores[query_ID] = [pair]
for query_ID in relevance_scores:
# Sort pairs in ascending order for each query; the less the relevance
# score is, the more relevant the document is.
relevance_scores[query_ID] = sorted(relevance_scores[query_ID], key = lambda x : x[1])
return relevance_scores
def make_query_results():
""" It returns possible relevant documents for each query based on BM25 model.
"""
query_list = process_queries()
query_results = {}
# `query_results` structure: {[KEY] query ID : [Value] [(Document ID, Relevance Score)]}, which is exactly the same structure and length as `relevance_scores`.
for query_ID in query_list:
rank = 1
query_results[query_ID] = []
for pair in bm25_similarities(query_list[query_ID]):
query_results[query_ID].append((pair[0], rank))
rank += 1
return query_results
def make_relevance_set(query_ID): # Relevant documents (Rel).
relevance_set = set()
for pair in relevance_scores[query_ID]:
if pair[1] <= RELEVANCE_SCORE_THRESHOLD:
# We only include queries whose relevance scores are less than or equal
# to `RELEVANCE_SCORE_THRESHOLD` here.
relevance_set.add(pair[0])
return relevance_set
def make_retrieval_set(query_ID): # Retrieval documents (Ret).
retrieval_set = set()
for pair in query_results[query_ID]:
retrieval_set.add(pair[0])
return retrieval_set
def precision():
""" It calculates arithmetic mean of precisions for all queries.
"""
precision = 0.0
for query_ID in relevance_scores:
relevance_set = make_relevance_set(query_ID)
retrieval_set = make_retrieval_set(query_ID)
appearance_times = 0
for document_ID in retrieval_set:
if document_ID in relevance_set:
appearance_times += 1
precision += appearance_times / len(retrieval_set)
precision = precision / len(query_results)
return precision
def recall():
""" It calculates arithmetic mean of recalls for all queries.
"""
recall = 0.0
for query_ID in relevance_scores:
relevance_set = make_relevance_set(query_ID)
retrieval_set = make_retrieval_set(query_ID)
appearance_times = 0
for document_ID in relevance_set:
if document_ID in retrieval_set:
appearance_times += 1
recall += appearance_times / len(relevance_set)
recall = recall / len(query_results)
return recall
def p_at_n(n):
""" It calculates arithmetic mean of precisions at N for all queries.
"""
p_at_n = 0.0
for query_ID in relevance_scores:
relevance_set = make_relevance_set(query_ID)
appearance_times = 0
for pair in query_results[query_ID]:
if pair[0] in relevance_set and pair[1] <= n:
appearance_times += 1
p_at_n += appearance_times / n
p_at_n = p_at_n / len(query_results)
return p_at_n
def mean_average_precision():
""" It calculates mean average precision for all queries.
"""
mean_average_precision = 0.0
for query_ID in relevance_scores:
relevance_set = make_relevance_set(query_ID)
appearance_times = 0
current_map = 0.0
for pair in query_results[query_ID]:
if pair[0] in relevance_set:
appearance_times += 1
current_map += appearance_times / pair[1]
mean_average_precision += current_map / len(relevance_set)
mean_average_precision = mean_average_precision / len(query_results)
return mean_average_precision
def ndcg_at_n(n):
""" It yields a list of NDCGs at up to N of each query separately.
"""
for query_ID, score_list in relevance_scores.items():
relevance_set = make_relevance_set(query_ID)
score_list_dict = dict(score_list)
# Convert a list of pairs to dictionary for convienence.
# Step one: gain vector.
gain_vector = []
for pair in query_results[query_ID]:
if pair[0] in relevance_set:
gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]])
# Convert original ranking scores to NDCG-usable scores.
else:
gain_vector.append(0)
# Step two: DCG (Discounted Cumulated Gain).
dcg = [gain_vector[0]]
# Put the first item in `dcg`.
for i in range(1, len(gain_vector)):
dcg.append(gain_vector[i] / math.log(i + 1, 2) + dcg[-1])
# Step three: IDCG (Ideal Discounted Cumulated Gain).
ideal_gain_vector = []
for pair in score_list:
ideal_gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]])
idcg = [ideal_gain_vector[0]]
for i in range(1, len(ideal_gain_vector)):
idcg.append(ideal_gain_vector[i] / math.log(i + 1, 2) + idcg[-1])
# Step four: NDCG (Normalised Discounted Cumulated Gain) at N.
ndcg_at_n = []
for pair in zip(dcg, idcg):
ndcg_at_n.append(pair[0] / pair[1])
if len(ndcg_at_n) > n:
# And finally, yield at most `n` results for each query.
yield query_ID, ndcg_at_n[0 : n]
else:
yield query_ID, ndcg_at_n
def print_evaluation_results():
print("Evaluation Results:")
print("Precision: {0}".format(precision()), end = "\n")
print("Recall: {0}".format(recall()), end = "\n")
print("P@{0}: {1}".format(N, p_at_n(N)), end = "\n")
print("Mean Average Precision: {0}".format(mean_average_precision()), end = "\n")
for query_ID, ndcg in ndcg_at_n(N):
print("NDCG@{0} <Query {1}>: {2}".format(N, query_ID, ndcg), end = "\n")
if __name__ == "__main__":
stemmer = porter.PorterStemmer()
stop_words = load_stop_words()
punctuation = string.punctuation[0 : 12] + string.punctuation[14:]
removing_punctuation_map = dict((ord(character), " ") for character in punctuation)
# Remove all punctuations except full stops and hyphens.
args = get_arguments()
if os.path.exists(INDEX_PATH):
print("[Loading BM25 index from file.]")
with open(INDEX_PATH, "r") as fp:
stemming, term_vectors, document_lengths = json.load(fp)
# Warning: unlike Python, `dict` type in JSON cannot have `int` key,
# therefore a conversion is of necessity.
document_lengths = {int(ID) : length for ID, length in document_lengths.items()}
for term, vector in term_vectors.items():
term_vectors[term] = {int(ID) : appearance_times for ID, appearance_times in vector.items()}
nums_of_documents = len(document_lengths)
# It is used in `bm25_similarities()` function.
if args.m == "manual":
manual_mode()
elif args.m == "evaluation":
relevance_scores = load_relevance_scores()
query_results = make_query_results()
print_evaluation_results()
if args.o is not None: # If `-o` option is available.
with open(args.o, "w") as fp:
for query_ID, pair_list in query_results.items():
for pair in pair_list:
fp.write("{0} {1} {2}\n".format(query_ID, pair[0], pair[1]))
else:
# For first-time running, it creates an index JSON file and exit.
print("[Generating the index file.]")
with open(INDEX_PATH, "w") as fp:
json.dump(process_documents(), fp)
| 2.3125 | 2 |
variants/migrations/0013_smallvariantflags_flag_summary.py | brand-fabian/varfish-server | 14 | 12793001 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-14 19:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("variants", "0012_auto_20181114_1914")]
operations = [
migrations.AddField(
model_name="smallvariantflags",
name="flag_summary",
field=models.CharField(
choices=[
("positive", "positive"),
("uncertain", "uncertain"),
("negative", "negative"),
("empty", "empty"),
],
default="empty",
max_length=32,
),
)
]
| 1.664063 | 2 |
exchangelib/services/get_attachment.py | RossK1/exchangelib | 1,006 | 12793002 | from itertools import chain
from .common import EWSAccountService, create_attachment_ids_element
from ..util import create_element, add_xml_child, set_xml_value, DummyResponse, StreamingBase64Parser,\
StreamingContentHandler, ElementNotFound, MNS
# https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/bodytype
BODY_TYPE_CHOICES = ('Best', 'HTML', 'Text')
class GetAttachment(EWSAccountService):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getattachment-operation"""
SERVICE_NAME = 'GetAttachment'
element_container_name = '{%s}Attachments' % MNS
def call(self, items, include_mime_content, body_type, filter_html_content, additional_fields):
if body_type and body_type not in BODY_TYPE_CHOICES:
raise ValueError("'body_type' %s must be one of %s" % (body_type, BODY_TYPE_CHOICES))
return self._elems_to_objs(self._chunked_get_elements(
self.get_payload, items=items, include_mime_content=include_mime_content,
body_type=body_type, filter_html_content=filter_html_content, additional_fields=additional_fields,
))
def _elems_to_objs(self, elems):
from ..attachments import FileAttachment, ItemAttachment
cls_map = {cls.response_tag(): cls for cls in (FileAttachment, ItemAttachment)}
for elem in elems:
if isinstance(elem, Exception):
yield elem
continue
yield cls_map[elem.tag].from_xml(elem=elem, account=self.account)
def get_payload(self, items, include_mime_content, body_type, filter_html_content, additional_fields):
payload = create_element('m:%s' % self.SERVICE_NAME)
shape_elem = create_element('m:AttachmentShape')
if include_mime_content:
add_xml_child(shape_elem, 't:IncludeMimeContent', 'true')
if body_type:
add_xml_child(shape_elem, 't:BodyType', body_type)
if filter_html_content is not None:
add_xml_child(shape_elem, 't:FilterHtmlContent', 'true' if filter_html_content else 'false')
if additional_fields:
additional_properties = create_element('t:AdditionalProperties')
expanded_fields = chain(*(f.expand(version=self.account.version) for f in additional_fields))
set_xml_value(additional_properties, sorted(
expanded_fields,
key=lambda f: (getattr(f.field, 'field_uri', ''), f.path)
), version=self.account.version)
shape_elem.append(additional_properties)
if len(shape_elem):
payload.append(shape_elem)
attachment_ids = create_attachment_ids_element(items=items, version=self.account.version)
payload.append(attachment_ids)
return payload
def _update_api_version(self, api_version, header, **parse_opts):
if not parse_opts.get('stream_file_content', False):
super()._update_api_version(api_version, header, **parse_opts)
# TODO: We're skipping this part in streaming mode because StreamingBase64Parser cannot parse the SOAP header
@classmethod
def _get_soap_parts(cls, response, **parse_opts):
if not parse_opts.get('stream_file_content', False):
return super()._get_soap_parts(response, **parse_opts)
# Pass the response unaltered. We want to use our custom streaming parser
return None, response
def _get_soap_messages(self, body, **parse_opts):
if not parse_opts.get('stream_file_content', False):
return super()._get_soap_messages(body, **parse_opts)
from ..attachments import FileAttachment
# 'body' is actually the raw response passed on by '_get_soap_parts'
r = body
parser = StreamingBase64Parser()
field = FileAttachment.get_field_by_fieldname('_content')
handler = StreamingContentHandler(parser=parser, ns=field.namespace, element_name=field.field_uri)
parser.setContentHandler(handler)
return parser.parse(r)
def stream_file_content(self, attachment_id):
# The streaming XML parser can only stream content of one attachment
payload = self.get_payload(
items=[attachment_id], include_mime_content=False, body_type=None, filter_html_content=None,
additional_fields=None,
)
self.streaming = True
try:
yield from self._get_response_xml(payload=payload, stream_file_content=True)
except ElementNotFound as enf:
# When the returned XML does not contain a Content element, ElementNotFound is thrown by parser.parse().
# Let the non-streaming SOAP parser parse the response and hook into the normal exception handling.
# Wrap in DummyResponse because _get_soap_parts() expects an iter_content() method.
response = DummyResponse(url=None, headers=None, request_headers=None, content=enf.data)
_, body = super()._get_soap_parts(response=response)
res = super()._get_soap_messages(body=body)
for e in self._get_elements_in_response(response=res):
if isinstance(e, Exception):
raise e
# The returned content did not contain any EWS exceptions. Give up and re-raise the original exception.
raise enf
finally:
self.streaming = False
self.stop_streaming()
| 2.109375 | 2 |
spinsim/__init__.py | rpanderson/spinsim | 0 | 12793003 | <filename>spinsim/__init__.py
"""
"""
# from . import utilities
from enum import Enum
import numpy as np
import numba as nb
from numba import cuda
from numba import roc
import math
sqrt2 = math.sqrt(2)
sqrt3 = math.sqrt(3)
class SpinQuantumNumber(Enum):
"""
Options for the spin quantum number of a system.
Parameters
----------
value : :obj:`float`
The numerical value of the spin quantum number.
dimension : :obj:`int`
Dimension of the hilbert space the states with this spin belong to.
label : :obj:`str`
A text label that can be used for archiving.
"""
def __init__(self, value, dimension, label):
super().__init__()
self._value_ = value
self.dimension = dimension
self.label = label
HALF = (1/2, 2, "half")
"""
For two level systems.
"""
ONE = (1, 3, "one")
"""
For three level systems.
"""
class IntegrationMethod(Enum):
"""
Options for describing which method is used during the integration.
Parameters
----------
value : :obj:`str`
A text label that can be used for archiving.
"""
MAGNUS_CF4 = "magnus_cf4"
"""
Commutator free, fourth order Magnus based integrator.
"""
MIDPOINT_SAMPLE = "midpoint_sample"
"""
Euler integration method.
"""
HALF_STEP = "half_step"
"""
Integration method from AtomicPy. Makes two Euler integration steps, one sampling the field from the start of the time step, one sampling the field from the end of the time step. The equivalent of the trapezoidal method.
"""
class ExponentiationMethod(Enum):
"""
The implementation to use for matrix exponentiation within the integrator.
Parameters
----------
value : :obj:`str`
A text label that can be used for archiving.
index : :obj:`int`
A reference number, used when compiling the integrator, where higher level objects like enums cannot be interpreted.
"""
def __init__(self, value, index):
super().__init__()
self._value_ = value
self.index = index
ANALYTIC = ("analytic", 0)
"""
Analytic expression of the matrix exponential. For spin half :obj:`SpinQuantumNumber.HALF` systems only.
"""
LIE_TROTTER = ("lie_trotter", 1)
"""
Approximation using the Lie Trotter theorem.
"""
class Device(Enum):
"""
The target device that the integrator is being compiled for.
.. _Supported Python features: http://numba.pydata.org/numba-doc/latest/reference/pysupported.html
.. _Supported Numpy features: http://numba.pydata.org/numba-doc/latest/reference/numpysupported.html
.. _Supported CUDA Python features: http://numba.pydata.org/numba-doc/latest/cuda/cudapysupported.html
"""
def __init__(self, value, index):
super().__init__()
self._value_ = value
self.index = index
if value == "python":
def jit_host(template, max_registers):
def jit_host(func):
return func
return jit_host
self.jit_host = jit_host
def jit_device(func):
return func
self.jit_device = jit_device
def jit_device_template(template):
def jit_device_template(func):
return func
return jit_device_template
self.jit_device_template = jit_device_template
elif value == "cpu_single":
def jit_host(template, max_registers):
def jit_host(func):
return nb.njit(template)(func)
return jit_host
self.jit_host = jit_host
def jit_device(func):
return nb.njit()(func)
self.jit_device = jit_device
def jit_device_template(template):
def jit_device_template(func):
return nb.njit(template)(func)
return jit_device_template
self.jit_device_template = jit_device_template
elif value == "cpu":
def jit_host(template, max_registers):
def jit_host(func):
return nb.njit(template, parallel = True)(func)
return jit_host
self.jit_host = jit_host
def jit_device(func):
return nb.njit()(func)
self.jit_device = jit_device
def jit_device_template(template):
def jit_device_template(func):
return nb.njit(template)(func)
return jit_device_template
self.jit_device_template = jit_device_template
elif value == "cuda":
def jit_host(template, max_registers):
def jit_host(func):
return cuda.jit(template, debug = False, max_registers = max_registers)(func)
return jit_host
self.jit_host = jit_host
def jit_device(func):
return cuda.jit(device = True, inline = True)(func)
self.jit_device = jit_device
def jit_device_template(template):
def jit_device_template(func):
return cuda.jit(template, device = True, inline = True)(func)
return jit_device_template
self.jit_device_template = jit_device_template
elif value == "roc":
def jit_host(template, max_registers):
def jit_host(func):
return roc.jit(template)(func)
return jit_host
self.jit_host = jit_host
def jit_device(func):
return roc.jit(device = True)(func)
self.jit_device = jit_device
def jit_device_template(template):
def jit_device_template(func):
return roc.jit(template, device = True)(func)
return jit_device_template
self.jit_device_template = jit_device_template
PYTHON = ("python", 0)
"""
Use pure python interpreted code for the integrator, ie, don't compile the integrator.
"""
CPU_SINGLE = ("cpu_single", 0)
"""
Use the :func:`numba.jit()` LLVM compiler to compile the integrator to run on a single CPU core.
.. note ::
To use this device option, the user defined field function must be :func:`numba.jit()` compilable. See `Supported Python features`_ for compilable python features, and `Supported Numpy features`_ for compilable numpy features.
"""
CPU = ("cpu", 0)
"""
Use the :func:`numba.jit()` LLVM compiler to compile the integrator to run on all CPU cores, in parallel.
.. note ::
To use this device option, the user defined field function must be :func:`numba.jit()` compilable. See `Supported Python features`_ for compilable python features, and `Supported Numpy features`_ for compilable numpy features.
"""
CUDA = ("cuda", 1)
"""
Use the :func:`numba.cuda.jit()` LLVM compiler to compile the integrator to run on an Nvidia cuda compatible GPU, in parallel.
.. note ::
To use this device option, the user defined field function must be :func:`numba.cuda.jit()` compilable. See `Supported CUDA Python features`_ for compilable python features.
"""
ROC = ("roc", 2)
"""
Use the :func:`numba.roc.jit()` LLVM compiler to compile the integrator to run on an AMD ROCm compatible GPU, in parallel.
.. warning ::
Work in progress, not currently functional!
"""
class Results:
"""
The results of a an evaluation of the integrator.
Attributes
----------
time : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index)
The times that `state` was evaluated at.
time_evolution : :obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index)
The evaluated time evolution operator between each time step. See :ref:`architecture` for some information.
state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)
The evaluated quantum state of the spin system over time, written in terms of the eigenstates of the spin projection operator in the z direction.
spin : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)
The expected spin projection (Bloch vector) over time. This is calculated just in time using the JITed :obj:`callable` `spin_calculator`.
spin_calculator : :obj:`callable`
Calculates the expected spin projection (Bloch vector) over time for a given time series of a quantum state. Used to calculate `spin` the first time it is referenced by the user.
Parameters:
* **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)) - The quantum state of the spin system over time, written in terms of the eigenstates of the spin projection operator in the z direction.
Returns:
* **spin** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)) - The expected spin projection (Bloch vector) over time.
"""
def __init__(self, time, time_evolution, state, spin_calculator):
"""
Parameters
----------
time : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index)
The times that `state` was evaluated at.
time_evolution : :obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index)
The evaluated time evolution operator between each time step. See :ref:`architecture` for some information.
state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)
The evaluated quantum state of the spin system over time, written in terms of the eigenstates of the spin projection operator in the z direction.
spin_calculator : :obj:`callable`
Calculates the expected spin projection (Bloch vector) over time for a given time series of a quantum state. Used to calculate `spin` the first time it is referenced by the user.
Parameters:
* **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)) - The quantum state of the spin system over time, written in terms of the eigenstates of the spin projection operator in the z direction.
Returns:
* **spin** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)) - The expected spin projection (Bloch vector) over time.
"""
self.time = time
self.time_evolution = time_evolution
self.state = state
self.spin_calculator = spin_calculator
def __getattr__(self, attr_name):
if attr_name == "spin":
spin = self.spin_calculator(self.state)
setattr(self, attr_name, spin)
return self.spin
raise AttributeError("{} has no attribute called {}.".format(self, attr_name))
class Simulator:
"""
Attributes
----------
spin_quantum_number : :obj:`SpinQuantumNumber`
The option to select whether the simulator will integrate a spin half :obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE` quantum system.
threads_per_block : :obj:`int`
The size of each thread block (workgroup), in terms of the number of threads (workitems) they each contain, when running on the GPU target devices :obj:`Device.CUDA` (:obj:`Device.ROC`). Defaults to 64. Modifying might be able to increase execution time for different GPU models.
device : :obj:`Device`
The option to select which device will be targeted for integration. That is, whether the integrator is compiled for a CPU or GPU. Defaults to :obj:`Device.CUDA` if the system it is being run on is Nvidia Cuda compatible, and defaults to :obj:`Device.CPU` otherwise. See :obj:`Device` for all options and more details.
get_time_evolution_raw : :obj:`callable`
The internal function for evaluating the time evolution operator in parallel. Compiled for chosen device on object constrution.
Parameters:
* **sweep_parameter** (:obj:`float`) - The input to the `get_field` function supplied by the user. Modifies the field function so the integrator can be used for many experiments, without the need for slow recompilation. For example, if the `sweep_parameter` is used to define the bias field strength in `get_field`, then one can run many simulations, sweeping through bias values, by calling this method multiple times, each time varying `sweep_parameter`.
* **time_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index)) - The times that `state` was evaluated at.
* **time_end_points** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (start/end)) - The time offset that the experiment is to start at, and the time that the experiment is to finish at. Measured in s.
* **time_step_integration** (:obj:`float`) - The integration time step. Measured in s.
* **time_step_output** (:obj:`float`) - The sample resolution of the output timeseries for the state. Must be a whole number multiple of `time_step_integration`. Measured in s.
* **time_evolution_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index)) - The evaluated time evolution operator between each time step. See :ref:`architecture` for some information.
spin_calculator : :obj:`callable`
Calculates the expected spin projection (Bloch vector) over time for a given time series of a quantum state. This :obj:`callable` is passed to the :obj:`Results` object returned from :func:`Simulator.evaluate()`, and is executed there just in time if the `spin` property is needed. Compiled for chosen device on object constrution.
Parameters:
* **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)) - The quantum state of the spin system over time, written in terms of the eigenstates of the spin projection operator in the z direction.
Returns:
* **spin** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)) - The expected spin projection (Bloch vector) over time.
"""
def __init__(self, get_field, spin_quantum_number, device = None, exponentiation_method = None, use_rotating_frame = True, integration_method = IntegrationMethod.MAGNUS_CF4, trotter_cutoff = 32, threads_per_block = 64, max_registers = 63):
"""
.. _Achieved Occupancy: https://docs.nvidia.com/gameworks/content/developertools/desktop/analysis/report/cudaexperiments/kernellevel/achievedoccupancy.htm
Parameters
----------
get_field : :obj:`callable`
A python function that describes the field that the spin system is being put under. It must have three arguments:
* **time_sample** (:obj:`float`) - the time to sample the field at, in units of s.
* **simulation_index** (:obj:`int`) - a parameter that can be swept over when multiple simulations need to be run. For example, it is used to sweep over dressing frequencies during the simulations that `spinsim` was designed for.
* **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64` (spatial_index)) the returned value of the field. This is a four dimensional vector, with the first three entries being x, y, z spatial directions (to model a magnetic field, for example), and the fourth entry being the amplitude of the quadratic shift (only appearing, and required, in spin one systems).
.. note::
This function must be compilable for the device that the integrator is being compiled for. See :class:`Device` for more information and links.
spin_quantum_number : :obj:`SpinQuantumNumber`
The option to select whether the simulator will integrate a spin half :obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE` quantum system.
device : :obj:`Device`
The option to select which device will be targeted for integration. That is, whether the integrator is compiled for a CPU or GPU. Defaults to :obj:`Device.CUDA` if the system it is being run on is Nvidia Cuda compatible, and defaults to :obj:`Device.CPU` otherwise. See :obj:`Device` for all options and more details.
exponentiation_method : :obj:`ExponentiationMethod`
Which method to use for matrix exponentiation in the integration algorithm. Defaults to :obj:`ExponentiationMethod.LIE_TROTTER` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.ONE`, and defaults to :obj:`ExponentiationMethod.ANALYTIC` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.HALF`. See :obj:`ExponentiationMethod` for more details.
use_rotating_frame : :obj:`bool`
Whether or not to use the rotating frame optimisation. Defaults to :obj:`True`. If set to :obj:`True`, the integrator moves into a frame rotating in the z axis by an amount defined by the field in the z direction. This removes the (possibly large) z component of the field, which increases the accuracy of the output since the integrator will on average take smaller steps.
.. note ::
The use of a rotating frame is commonly associated with the use of a rotating wave approximation, a technique used to get approximate analytic solutions of spin system dynamics. This is not done when this option is set to :obj:`True` - no such approximations are made, and the output state in given out of the rotating frame. One can, of course, use :mod:`spinsim` to integrate states in the rotating frame, using the rating wave approximation: just define `get_field()` with field functions that use the rotating wave approximation in the rotating frame.
integration_method : :obj:`IntegrationMethod`
Which integration method to use in the integration. Defaults to :obj:`IntegrationMethod.MAGNUS_CF4`. See :obj:`IntegrationMethod` for more details.
trotter_cutoff : :obj:`int`
The number of squares made by the matrix exponentiator, if :obj:`ExponentiationMethod.LIE_TROTTER` is chosen.
threads_per_block : :obj:`int`
The size of each thread block (workgroup), in terms of the number of threads (workitems) they each contain, when running on the GPU target devices :obj:`Device.CUDA` (:obj:`Device.ROC`). Defaults to 64. Modifying might be able to increase execution time for different GPU models.
max_registers : :obj:`int`
The maximum number of registers allocated per thread when using :obj:`Device.CUDA` as the target device, and can be modified to increase the execution speed for a specific GPU model. Defaults to 63 (optimal for GTX1070, the device used for testing. Note that one extra register per thread is always added to the number specified for control, so really this number is 64).
Raising this value allocates more registers (fast memory) to each thread, out of a maximum number for the whole GPU, for each specific GPU model. This means that if more registers are allocated than are available for the GPU model, the GPU must run fewer threads concurrently than it has Cuda cores, meaning some cores are inactive, and the GPU is said to have less occupancy. Lowering the value increases GPU occupancy, meaning more threads run concurrently, at the expense of fewer resgiters being avaliable to each thread, meaning slower memory must be used. Thus, there will be an optimal value of `max_registers` for each model of GPU running :mod:`spinsim`, balancing more threads vs faster running threads, and changing this value could increase performance for your GPU. See `Achieved Occupancy`_ for Nvidia's official explanation.
"""
if not device:
if cuda.is_available():
device = Device.CUDA
else:
device = Device.CPU
self.threads_per_block = threads_per_block
self.spin_quantum_number = spin_quantum_number
self.device = device
self.get_time_evolution_raw = None
self.get_spin_raw = None
try:
self.compile_time_evolver(get_field, spin_quantum_number, device, use_rotating_frame, integration_method, exponentiation_method, trotter_cutoff, threads_per_block, max_registers)
except:
print("\033[31mspinsim error: numba could not jit get_field function into a device function.\033[0m\n")
raise
def compile_time_evolver(self, get_field, spin_quantum_number, device, use_rotating_frame = True, integration_method = IntegrationMethod.MAGNUS_CF4, exponentiation_method = None, trotter_cutoff:int = 28, threads_per_block = 64, max_registers = 63):
"""
Compiles the integrator and spin calculation functions of the simulator.
Parameters
----------
get_field : :obj:`callable`
A python function that describes the field that the spin system is being put under. It must have three arguments:
* **time_sample** (:obj:`float`) - the time to sample the field at, in units of s.
* **simulation_index** (:obj:`int`) - a parameter that can be swept over when multiple simulations need to be run. For example, it is used to sweep over dressing frequencies during the simulations that `spinsim` was designed for.
* **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64` (spatial_index)) the returned value of the field. This is a four dimensional vector, with the first three entries being x, y, z spatial directions (to model a magnetic field, for example), and the fourth entry being the amplitude of the quadratic shift (only appearing, and required, in spin one systems).
.. note::
This function must be compilable for the device that the integrator is being compiled for. See :class:`Device` for more information and links.
spin_quantum_number : :obj:`SpinQuantumNumber`
The option to select whether the simulator will integrate a spin half :obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE` quantum system.
device : :obj:`Device`
The option to select which device will be targeted for integration. That is, whether the integrator is compiled for a CPU or GPU. Defaults to :obj:`Device.CUDA` if the system it is being run on is Nvidia Cuda compatible, and defaults to :obj:`Device.CPU` otherwise. See :obj:`Device` for all options and more details.
exponentiation_method : :obj:`ExponentiationMethod`
Which method to use for matrix exponentiation in the integration algorithm. Defaults to :obj:`ExponentiationMethod.LIE_TROTTER` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.ONE`, and defaults to :obj:`ExponentiationMethod.ANALYTIC` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.HALF`. See :obj:`ExponentiationMethod` for more details.
use_rotating_frame : :obj:`bool`
Whether or not to use the rotating frame optimisation. Defaults to :obj:`True`. If set to :obj:`True`, the integrator moves into a frame rotating in the z axis by an amount defined by the field in the z direction. This removes the (possibly large) z component of the field, which increases the accuracy of the output since the integrator will on average take smaller steps.
.. note ::
The use of a rotating frame is commonly associated with the use of a rotating wave approximation, a technique used to get approximate analytic solutions of spin system dynamics. This is not done when this option is set to :obj:`True` - no such approximations are made, and the output state in given out of the rotating frame. One can, of course, use :mod:`spinsim` to integrate states in the rotating frame, using the rating wave approximation: just define `get_field()` with field functions that use the rotating wave approximation in the rotating frame.
integration_method : :obj:`IntegrationMethod`
Which integration method to use in the integration. Defaults to :obj:`IntegrationMethod.MAGNUS_CF4`. See :obj:`IntegrationMethod` for more details.
trotter_cutoff : :obj:`int`
The number of squares made by the matrix exponentiator, if :obj:`ExponentiationMethod.LIE_TROTTER` is chosen.
threads_per_block : :obj:`int`
The size of each thread block (workgroup), in terms of the number of threads (workitems) they each contain, when running on the GPU target devices :obj:`Device.CUDA` (:obj:`Device.ROC`). Defaults to 64. Modifying might be able to increase execution time for different GPU models.
max_registers : :obj:`int`
The maximum number of registers allocated per thread when using :obj:`Device.CUDA` as the target device, and can be modified to increase the execution speed for a specific GPU model. Defaults to 63 (optimal for GTX1070, the device used for testing. Note that one extra register per thread is always added to the number specified for control, so really this number is 64).
Raising this value allocates more registers (fast memory) to each thread, out of a maximum number for the whole GPU, for each specific GPU model. This means that if more registers are allocated than are available for the GPU model, the GPU must run fewer threads concurrently than it has Cuda cores, meaning some cores are inactive, and the GPU is said to have less occupancy. Lowering the value increases GPU occupancy, meaning more threads run concurrently, at the expense of fewer resgiters being avaliable to each thread, meaning slower memory must be used. Thus, there will be an optimal value of `max_registers` for each model of GPU running :mod:`spinsim`, balancing more threads vs faster running threads, and changing this value could increase performance for your GPU. See `Achieved Occupancy`_ for Nvidia's official explanation.
"""
utilities = Utilities(spin_quantum_number, device, threads_per_block)
conj = utilities.conj
complex_abs = utilities.complex_abs
norm2 = utilities.norm2
inner = utilities.inner
set_to = utilities.set_to
set_to_one = utilities.set_to_one
set_to_zero = utilities.set_to_zero
matrix_multiply = utilities.matrix_multiply
adjoint = utilities.adjoint
matrix_exponential_analytic = utilities.matrix_exponential_analytic
matrix_exponential_lie_trotter = utilities.matrix_exponential_lie_trotter
jit_host = device.jit_host
jit_device = device.jit_device
jit_device_template = device.jit_device_template
device_index = device.index
dimension = spin_quantum_number.dimension
lie_dimension = dimension + 1
# utility_set = spin_quantum_number.utility_set
if not exponentiation_method:
if spin_quantum_number == SpinQuantumNumber.ONE:
exponentiation_method = ExponentiationMethod.LIE_TROTTER
elif spin_quantum_number == SpinQuantumNumber.HALF:
exponentiation_method = ExponentiationMethod.ANALYTIC
if integration_method == IntegrationMethod.MAGNUS_CF4:
sample_index_max = 3
sample_index_end = 4
elif integration_method == IntegrationMethod.HALF_STEP:
sample_index_max = 3
sample_index_end = 4
elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE:
sample_index_max = 1
sample_index_end = 1
exponentiation_method_index = exponentiation_method.index
if (exponentiation_method == ExponentiationMethod.ANALYTIC) and (spin_quantum_number != SpinQuantumNumber.HALF):
print("\033[31mspinsim warning!!!\n_attempting to use an analytic exponentiation method outside of spin half. Switching to a Lie Trotter method.\033[0m")
exponentiation_method = ExponentiationMethod.LIE_TROTTER
exponentiation_method_index = 1
@jit_device_template("(float64[:], complex128[:, :], complex128[:, :])")
def append_exponentiation(field_sample, time_evolution_fine, time_evolution_coarse):
if device_index == 0:
time_evolution_old = np.empty((dimension, dimension), dtype = np.complex128)
elif device_index == 1:
time_evolution_old = cuda.local.array((dimension, dimension), dtype = np.complex128)
elif device_index == 2:
time_evolution_old_group = roc.shared.array((threads_per_block, dimension, dimension), dtype = np.complex128)
time_evolution_old = time_evolution_old_group[roc.get_local_id(1), :, :]
# Calculate the exponential
if exponentiation_method_index == 0:
matrix_exponential_analytic(field_sample, time_evolution_fine)
elif exponentiation_method_index == 1:
matrix_exponential_lie_trotter(field_sample, time_evolution_fine, trotter_cutoff)
# Premultiply to the exitsing time evolution operator
set_to(time_evolution_coarse, time_evolution_old)
matrix_multiply(time_evolution_fine, time_evolution_old, time_evolution_coarse)
if use_rotating_frame:
if dimension == 3:
@jit_device_template("(float64[:], float64, complex128)")
def transform_frame_spin_one_rotating(field_sample, rotating_wave, rotating_wave_winding):
X = (field_sample[0] + 1j*field_sample[1])/rotating_wave_winding
field_sample[0] = X.real
field_sample[1] = X.imag
field_sample[2] = field_sample[2] - rotating_wave
transform_frame = transform_frame_spin_one_rotating
else:
@jit_device_template("(float64[:], float64, complex128)")
def transform_frame_spin_half_rotating(field_sample, rotating_wave, rotating_wave_winding):
X = (field_sample[0] + 1j*field_sample[1])/(rotating_wave_winding**2)
field_sample[0] = X.real
field_sample[1] = X.imag
field_sample[2] = field_sample[2] - 2*rotating_wave
transform_frame = transform_frame_spin_half_rotating
else:
@jit_device_template("(float64[:], float64, complex128)")
def transform_frame_lab(field_sample, rotating_wave, rotating_wave_winding):
return
transform_frame = transform_frame_lab
get_field_jit = jit_device(get_field)
if integration_method == IntegrationMethod.MAGNUS_CF4:
@jit_device_template("(float64, float64, float64, float64, float64[:, :], float64, complex128[:])")
def get_field_integration_magnus_cf4(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding):
time_sample = ((time_fine + 0.5*time_step_integration*(1 - 1/sqrt3)) - time_coarse)
rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample)
time_sample += time_coarse
get_field_jit(time_sample, sweep_parameter, field_sample[0, :])
time_sample = ((time_fine + 0.5*time_step_integration*(1 + 1/sqrt3)) - time_coarse)
rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample)
time_sample += time_coarse
get_field_jit(time_sample, sweep_parameter, field_sample[1, :])
@jit_device_template("(complex128[:, :], complex128[:, :], float64[:, :], float64, float64, complex128[:])")
def append_exponentiation_integration_magnus_cf4(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding):
transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0])
transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1])
w0 = (1.5 + sqrt3)/6
w1 = (1.5 - sqrt3)/6
field_sample[2, 0] = math.tau*time_step_integration*(w0*field_sample[0, 0] + w1*field_sample[1, 0])
field_sample[2, 1] = math.tau*time_step_integration*(w0*field_sample[0, 1] + w1*field_sample[1, 1])
field_sample[2, 2] = math.tau*time_step_integration*(w0*field_sample[0, 2] + w1*field_sample[1, 2])
if dimension > 2:
field_sample[2, 3] = math.tau*time_step_integration*(w0*field_sample[0, 3] + w1*field_sample[1, 3])
append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse)
field_sample[2, 0] = math.tau*time_step_integration*(w1*field_sample[0, 0] + w0*field_sample[1, 0])
field_sample[2, 1] = math.tau*time_step_integration*(w1*field_sample[0, 1] + w0*field_sample[1, 1])
field_sample[2, 2] = math.tau*time_step_integration*(w1*field_sample[0, 2] + w0*field_sample[1, 2])
if dimension > 2:
field_sample[2, 3] = math.tau*time_step_integration*(w1*field_sample[0, 3] + w0*field_sample[1, 3])
append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse)
get_field_integration = get_field_integration_magnus_cf4
append_exponentiation_integration = append_exponentiation_integration_magnus_cf4
elif integration_method == IntegrationMethod.HALF_STEP:
@jit_device_template("(float64, float64, float64, float64, float64[:, :], float64, complex128[:])")
def get_field_integration_half_step(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding):
time_sample = time_fine - time_coarse
rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample)
time_sample += time_coarse
get_field_jit(time_sample, sweep_parameter, field_sample[0, :])
time_sample = time_fine + time_step_integration - time_coarse
rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample)
time_sample += time_coarse
get_field_jit(time_sample, sweep_parameter, field_sample[1, :])
@jit_device_template("(complex128[:, :], complex128[:, :], float64[:, :], float64, float64, complex128[:])")
def append_exponentiation_integration_half_step(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding):
transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0])
transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1])
field_sample[2, 0] = math.tau*time_step_integration*field_sample[0, 0]/2
field_sample[2, 1] = math.tau*time_step_integration*field_sample[0, 1]/2
field_sample[2, 2] = math.tau*time_step_integration*field_sample[0, 2]/2
if dimension > 2:
field_sample[2, 3] = math.tau*time_step_integration*field_sample[0, 3]/2
append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse)
field_sample[2, 0] = math.tau*time_step_integration*field_sample[1, 0]/2
field_sample[2, 1] = math.tau*time_step_integration*field_sample[1, 1]/2
field_sample[2, 2] = math.tau*time_step_integration*field_sample[1, 2]/2
if dimension > 2:
field_sample[2, 3] = math.tau*time_step_integration*field_sample[1, 3]/2
append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse)
get_field_integration = get_field_integration_half_step
append_exponentiation_integration = append_exponentiation_integration_half_step
elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE:
@jit_device_template("(float64, float64, float64, float64, float64[:, :], float64, complex128[:])")
def get_field_integration_midpoint(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding):
time_sample = time_fine + 0.5*time_step_integration - time_coarse
rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample)
time_sample += time_coarse
get_field_jit(time_sample, sweep_parameter, field_sample[0, :])
@jit_device_template("(complex128[:, :], complex128[:, :], float64[:, :], float64, float64, complex128[:])")
def append_exponentiation_integration_midpoint(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding):
transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0])
field_sample[0, 0] = math.tau*time_step_integration*field_sample[0, 0]
field_sample[0, 1] = math.tau*time_step_integration*field_sample[0, 1]
field_sample[0, 2] = math.tau*time_step_integration*field_sample[0, 2]
if dimension > 2:
field_sample[0, 3] = math.tau*time_step_integration*field_sample[0, 3]
append_exponentiation(field_sample[0, :], time_evolution_fine, time_evolution_coarse)
get_field_integration = get_field_integration_midpoint
append_exponentiation_integration = append_exponentiation_integration_midpoint
@jit_device_template("(int64, float64[:], float64, float64, float64[:], complex128[:, :, :], float64)")
def get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter):
# Declare variables
if device_index == 0:
time_evolution_fine = np.empty((dimension, dimension), dtype = np.complex128)
field_sample = np.empty((sample_index_max, lie_dimension), dtype = np.float64)
rotating_wave_winding = np.empty(sample_index_end, dtype = np.complex128)
elif device_index == 1:
time_evolution_fine = cuda.local.array((dimension, dimension), dtype = np.complex128)
field_sample = cuda.local.array((sample_index_max, lie_dimension), dtype = np.float64)
rotating_wave_winding = cuda.local.array(sample_index_end, dtype = np.complex128)
elif device_index == 2:
time_evolution_fine_group = roc.shared.array((threads_per_block, dimension, dimension), dtype = np.complex128)
time_evolution_fine = time_evolution_fine_group[roc.get_local_id(1), :, :]
field_sample_group = roc.shared.array((threads_per_block, sample_index_max, lie_dimension), dtype = np.float64)
field_sample = field_sample_group[roc.get_local_id(1), :, :]
rotating_wave_winding_group = roc.shared.array((threads_per_block, sample_index_end), dtype = np.complex128)
rotating_wave_winding = rotating_wave_winding_group[roc.get_local_id(1), :]
time_coarse[time_index] = time_end_points[0] + time_step_output*time_index
time_fine = time_coarse[time_index]
# Initialise time evolution operator to 1
set_to_one(time_evolution_coarse[time_index, :])
field_sample[0, 2] = 0
if use_rotating_frame:
time_sample = time_coarse[time_index] + time_step_output/2
get_field_jit(time_sample, sweep_parameter, field_sample[0, :])
rotating_wave = field_sample[0, 2]
if dimension == 2:
rotating_wave /= 2
# For every fine step
for time_fine_index in range(math.floor(time_step_output/time_step_integration + 0.5)):
get_field_integration(sweep_parameter, time_fine, time_coarse[time_index], time_step_integration, field_sample, rotating_wave, rotating_wave_winding)
append_exponentiation_integration(time_evolution_fine, time_evolution_coarse[time_index, :], field_sample, time_step_integration, rotating_wave, rotating_wave_winding)
time_fine += time_step_integration
if use_rotating_frame:
# Take out of rotating frame
rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_step_output) + 1j*math.sin(math.tau*rotating_wave*time_step_output)
time_evolution_coarse[time_index, 0, 0] /= rotating_wave_winding[0]
time_evolution_coarse[time_index, 0, 1] /= rotating_wave_winding[0]
if dimension > 2:
time_evolution_coarse[time_index, 0, 2] /= rotating_wave_winding[0]
time_evolution_coarse[time_index, 2, 0] *= rotating_wave_winding[0]
time_evolution_coarse[time_index, 2, 1] *= rotating_wave_winding[0]
time_evolution_coarse[time_index, 2, 2] *= rotating_wave_winding[0]
else:
time_evolution_coarse[time_index, 1, 0] *= rotating_wave_winding[0]
time_evolution_coarse[time_index, 1, 1] *= rotating_wave_winding[0]
@jit_host("(float64, float64[:], float64[:], float64, float64, complex128[:, :, :])", max_registers)
def get_time_evolution(sweep_parameter, time_coarse, time_end_points, time_step_integration, time_step_output, time_evolution_coarse):
"""
Find the stepwise time evolution opperator.
Parameters
----------
sweep_parameter : :obj:`float`
time_coarse : :class:`numpy.ndarray` of :class:`numpy.float64` (time_index)
A coarse grained list of time samples that the time evolution operator is found for. In units of s. This is an output, so use an empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`.
time_end_points : :class:`numpy.ndarray` of :class:`numpy.float64` (start time (0) or end time (1))
The time values for when the experiment is to start and finishes. In units of s.
time_step_integration : :obj:`float`
The time step used within the integration algorithm. In units of s.
time_step_output : :obj:`float`
The time difference between each element of `time_coarse`. In units of s. Determines the sample rate of the outputs `time_coarse` and `time_evolution_coarse`.
time_evolution_coarse : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index)
Time evolution operator (matrix) between the current and next timesteps, for each time sampled. See :math:`U(t)` in :ref:`overview_of_simulation_method`. This is an output, so use an empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`.
"""
if device_index == 0:
for time_index in nb.prange(time_coarse.size):
get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter)
elif device_index == 1:
# Run calculation for each coarse timestep in parallel
time_index = cuda.grid(1)
if time_index < time_coarse.size:
get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter)
elif device_index == 2:
# Run calculation for each coarse timestep in parallel
time_index = roc.get_global_id(1)
if time_index < time_coarse.size:
get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter)
return
@jit_host("(complex128[:, :], float64[:, :])", max_registers = max_registers)
def get_spin(state, spin):
"""
Calculate each expected spin value in parallel.
For spin half:
.. math::
\\begin{align*}
\\langle F\\rangle(t) = \\begin{pmatrix}
\\Re(\\psi_{+\\frac{1}{2}}(t)\\psi_{-\\frac{1}{2}}(t)^*)\\\\
-\\Im(\\psi_{+\\frac{1}{2}}(t)\\psi_{-\\frac{1}{2}}(t)^*)\\\\
\\frac{1}{2}(|\\psi_{+\\frac{1}{2}}(t)|^2 - |\\psi_{-\\frac{1}{2}}(t)|^2)
\\end{pmatrix}
\\end{align*}
For spin one:
.. math::
\\begin{align*}
\\langle F\\rangle(t) = \\begin{pmatrix}
\\Re(\\sqrt{2}\\psi_{0}(t)^*(\\psi_{+1}(t) + \\psi_{-1}(t))\\\\
-\\Im(\\sqrt{2}\\psi_{0}(t)^*(\\psi_{+1}(t) - \\psi_{-1}(t))\\\\
|\\psi_{+1}(t)|^2 - |\\psi_{-1}(t)|^2
\\end{pmatrix}
\\end{align*}
Parameters
----------
state : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, state_index)
The state (wavefunction) of the spin system in the lab frame, for each time sampled. See :math:`\\psi(t)` in :ref:`overview_of_simulation_method`.
spin : :class:`numpy.ndarray` of :class:`numpy.float64` (time_index, spatial_index)
The expected value for hyperfine spin of the spin system in the lab frame, for each time sampled. Units of :math:`\\hbar`. This is an output, so use an empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`.
"""
if device_index == 0:
for time_index in nb.prange(spin.shape[0]):
if dimension == 2:
spin[time_index, 0] = (state[time_index, 0]*conj(state[time_index, 1])).real
spin[time_index, 1] = (1j*state[time_index, 0]*conj(state[time_index, 1])).real
spin[time_index, 2] = 0.5*(state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 1].real**2 - state[time_index, 1].imag**2)
else:
spin[time_index, 0] = (2*conj(state[time_index, 1])*(state[time_index, 0] + state[time_index, 2])/sqrt2).real
spin[time_index, 1] = (2j*conj(state[time_index, 1])*(state[time_index, 0] - state[time_index, 2])/sqrt2).real
spin[time_index, 2] = state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 2].real**2 - state[time_index, 2].imag**2
elif device_index > 0:
if device_index == 1:
time_index = cuda.grid(1)
elif device_index == 1:
time_index = roc.get_global_id(1)
if time_index < spin.shape[0]:
if dimension == 2:
spin[time_index, 0] = (state[time_index, 0]*conj(state[time_index, 1])).real
spin[time_index, 1] = (1j*state[time_index, 0]*conj(state[time_index, 1])).real
spin[time_index, 2] = 0.5*(state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 1].real**2 - state[time_index, 1].imag**2)
else:
spin[time_index, 0] = (2*conj(state[time_index, 1])*(state[time_index, 0] + state[time_index, 2])/sqrt2).real
spin[time_index, 1] = (2j*conj(state[time_index, 1])*(state[time_index, 0] - state[time_index, 2])/sqrt2).real
spin[time_index, 2] = state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 2].real**2 - state[time_index, 2].imag**2
return
def spin_calculator(state):
"""
Calculates the expected spin projection (Bloch vector) over time for a given time series of a quantum state.
Parameters
----------
state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)
The quantum state of the spin system over time, written in terms of the eigenstates of the spin projection operator in the z direction.
Returns
-------
spin : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)
The expected spin projection (Bloch vector) over time.
"""
if device.index == 0:
spin = np.empty((state.shape[0], 3), np.float64)
get_spin(state, spin)
elif device == Device.CUDA:
spin = cuda.device_array((state.shape[0], 3), np.float64)
blocks_per_grid = (state.shape[0] + (threads_per_block - 1)) // threads_per_block
get_spin[blocks_per_grid, threads_per_block](cuda.to_device(state), spin)
spin = spin.copy_to_host()
elif device == Device.ROC:
spin = roc.device_array((state.shape[0], 3), np.float64)
blocks_per_grid = (state.shape[0] + (threads_per_block - 1)) // threads_per_block
get_spin[blocks_per_grid, threads_per_block](roc.to_device(state), spin)
spin = spin.copy_to_host()
return spin
self.get_time_evolution_raw = get_time_evolution
self.spin_calculator = spin_calculator
def evaluate(self, sweep_parameter, time_start, time_end, time_step_integration, time_step_output, state_init):
"""
Integrates the time dependent Schroedinger equation and returns the quantum state of the spin system over time.
Parameters
----------
sweep_parameter : :obj:`float`
The input to the `get_field` function supplied by the user. Modifies the field function so the integrator can be used for many experiments, without the need for slow recompilation. For example, if the `sweep_parameter` is used to define the bias field strength in `get_field`, then one can run many simulations, sweeping through bias values, by calling this method multiple times, each time varying `sweep_parameter`.
time_start : :obj:`float`
The time offset that the experiment is to start at. Measured in s.
time_end : :obj:`float`
The time that the experiment is to finish at. Measured in s. The duration of the experiment is `time_end - time_start`.
time_step_integration : :obj:`float`
The integration time step. Measured in s.
time_step_output : :obj:`float`
The sample resolution of the output timeseries for the state. Must be a whole number multiple of `time_step_integration`. Measured in s.
state_init : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (magnetic_quantum_number)
The initial quantum state of the spin system, written in terms of the eigenstates of the spin projection operator in the z direction.
Returns
-------
results : :obj:`Results`
An object containing the results of the simulation.
"""
if math.fabs(time_step_output/time_step_integration - round(time_step_output/time_step_integration)) > 1e-6:
print(f"\033[33mspinsim warning: time_step_output not an integer multiple of time_step_integration. Resetting time_step_integration to {time_step_output/round(time_step_output/time_step_integration):8.4e}.\033[0m\n")
time_step_integration = time_step_output/round(time_step_output/time_step_integration)
time_end_points = np.asarray([time_start, time_end], np.float64)
state_init = np.asarray(state_init, np.complex128)
time_index_max = int((time_end_points[1] - time_end_points[0])/time_step_output)
if self.device.index == 0:
time = np.empty(time_index_max, np.float64)
time_evolution_coarse = np.empty((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128)
self.get_time_evolution_raw(sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse)
elif self.device == Device.CUDA:
time = cuda.device_array(time_index_max, np.float64)
time_evolution_coarse = cuda.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128)
blocks_per_grid = (time.size + (self.threads_per_block - 1)) // self.threads_per_block
try:
self.get_time_evolution_raw[blocks_per_grid, self.threads_per_block](sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse)
except:
print("\033[31mspinsim error: numba.cuda could not jit get_field function into a cuda device function.\033[0m\n")
raise
time_evolution_coarse = time_evolution_coarse.copy_to_host()
time = time.copy_to_host()
elif self.device == Device.ROC:
time = roc.device_array(time_index_max, np.float64)
time_evolution_coarse = roc.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128)
blocks_per_grid = (time.size + (self.threads_per_block - 1)) // self.threads_per_block
try:
self.get_time_evolution_raw[blocks_per_grid, self.threads_per_block](sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse)
except:
print("\033[31mspinsim error: numba.roc could not jit get_field function into a roc device function.\033[0m\n")
raise
time_evolution_coarse = time_evolution_coarse.copy_to_host()
time = time.copy_to_host()
state = np.empty((time_index_max, self.spin_quantum_number.dimension), np.complex128)
self.get_state(state_init, state, time_evolution_coarse)
results = Results(time, time_evolution_coarse, state, self.spin_calculator)
return results
@staticmethod
@nb.njit
def get_state(state_init, state, time_evolution):
"""
Use the stepwise time evolution operators in succession to find the quantum state timeseries of the 3 level atom.
Parameters
----------
state_init : :class:`numpy.ndarray` of :class:`numpy.complex128`
The state (spin wavefunction) of the system at the start of the simulation.
state : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, state_index)
The state (wavefunction) of the spin system in the lab frame, for each time sampled.
time_evolution : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index)
The evaluated time evolution operator between each time step. See :ref:`architecture` for some information.
"""
for time_index in range(state.shape[0]):
# State = time evolution * previous state
for x_index in nb.prange(state.shape[1]):
state[time_index, x_index] = 0
if time_index > 0:
for z_index in range(state.shape[1]):
state[time_index, x_index] += time_evolution[time_index - 1, x_index, z_index]*state[time_index - 1, z_index]
else:
state[time_index, x_index] += state_init[x_index]
sqrt2 = math.sqrt(2)
sqrt3 = math.sqrt(3)
machine_epsilon = np.finfo(np.float64).eps*1000
class Utilities:
"""
A on object that contains definitions of all of the device functions (functions compiled for use on the target device) used in the integrator. These device functions are compiled for the chosen target device on construction of the object.
Attributes
----------
conj(z) : :obj:`callable`
Conjugate of a complex number.
.. math::
\\begin{align*}
(a + ib)^* &= a - ib\\\\
a, b &\\in \\mathbb{R}
\\end{align*}
Parameters:
* **z** (:class:`numpy.complex128`) - The complex number to take the conjugate of.
Returns
* **cz** (:class:`numpy.complex128`) - The conjugate of z.
complex_abs(z) : :obj:`callable`
The absolute value of a complex number.
.. math::
\\begin{align*}
|a + ib| &= \\sqrt{a^2 + b^2}\\\\
a, b &\\in \\mathbb{R}
\\end{align*}
Parameters:
* **z** (:class:`numpy.complex128`) - The complex number to take the absolute value of.
Returns
* **az** (:class:`numpy.float64`) - The absolute value of z.
norm2(z) : :obj:`callable`
The 2 norm of a complex vector.
.. math::
\|a + ib\|_2 = \\sqrt {\\left(\\sum_i a_i^2 + b_i^2\\right)}
Parameters:
* **z** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to take the 2 norm of.
Returns
* **nz** (:class:`numpy.float64`) - The 2 norm of z.
inner(left, right) : :obj:`callable`
The inner (maths convention dot) product between two complex vectors.
.. note::
The mathematics definition is used here rather than the physics definition, so the left vector is conjugated. Thus the inner product of two orthogonal vectors is 0.
.. math::
\\begin{align*}
l \\cdot r &\\equiv \\langle l, r \\rangle\\\\
l \\cdot r &= \\sum_i (l_i)^* r_i
\\end{align*}
Parameters:
* **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to left multiply in the inner product.
* **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to right multiply in the inner product.
Returns
* **d** (:class:`numpy.complex128`) - The inner product of l and r.
set_to(operator, result) : :obj:`callable`
Copy the contents of one matrix into another.
.. math::
(A)_{i, j} = (B)_{i, j}
Parameters:
* **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to copy from.
* **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to copy to.
set_to_one(operator) : :obj:`callable`
Make a matrix the multiplicative identity, ie, :math:`1`.
.. math::
\\begin{align*}
(A)_{i, j} &= \\delta_{i, j}\\\\
&= \\begin{cases}
1,&i = j\\\\
0,&i\\neq j
\\end{cases}
\\end{align*}
Parameters:
* **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to set to :math:`1`.
set_to_zero(operator) : :obj:`callable`
Make a matrix the additive identity, ie, :math:`0`.
.. math::
\\begin{align*}
(A)_{i, j} = 0
\\end{align*}
Parameters:
* **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to set to :math:`0`.
matrix_multiply(left, right, result) : :obj:`callable`
Multiply matrices left and right together, to be returned in result.
.. math::
\\begin{align*}
(LR)_{i,k} = \\sum_j (L)_{i,j} (R)_{j,k}
\\end{align*}
Parameters:
* **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to left multiply by.
* **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to right multiply by.
* **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - A matrix to be filled with the result of the product.
adjoint(operator) : :obj:`callable`
Takes the hermitian adjoint of a matrix.
.. math::
\\begin{align*}
A^\\dagger &\\equiv A^H\\\\
(A^\\dagger)_{y,x} &= ((A)_{x,y})^*
\\end{align*}
Matrix can be in :math:`\\mathbb{C}^{2\\times2}` or :math:`\\mathbb{C}^{3\\times3}`.
Parameters:
* **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The operator to take the adjoint of.
* **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - An array to write the resultant adjoint to.
matrix_exponential_analytic(field_sample, result) : :obj:`callable`
Calculates a :math:`\\mathfrak{su}(2)` matrix exponential based on its analytic form.
.. warning::
Only available for use with spin half systems. Will not work with spin one systems.
Assumes the exponent is an imaginary linear combination of :math:`\\mathfrak{su}(2)`, being,
.. math::
\\begin{align*}
A &= -i(x J_x + y J_y + z J_z),
\\end{align*}
with
.. math::
\\begin{align*}
J_x &= \\frac{1}{2}\\begin{pmatrix}
0 & 1 \\\\
1 & 0
\\end{pmatrix},&
J_y &= \\frac{1}{2}\\begin{pmatrix}
0 & -i \\\\
i & 0
\\end{pmatrix},&
J_z &= \\frac{1}{2}\\begin{pmatrix}
1 & 0 \\\\
0 & -1
\\end{pmatrix}
\\end{align*}
Then the exponential can be calculated as
.. math::
\\begin{align*}
\\exp(A) &= \\exp(-ix J_x - iy J_y - iz J_z)\\\\
&= \\begin{pmatrix}
\\cos(\\frac{r}{2}) - i\\frac{z}{r}\\sin(\\frac{r}{2}) & -\\frac{y + ix}{r}\\sin(\\frac{r}{2})\\\\
\\frac{y - ix}{r}\\sin(\\frac{r}{2}) & \\cos(\\frac{r}{2}) + i\\frac{z}{r}\\sin(\\frac{r}{2})
\\end{pmatrix}
\\end{align*}
with :math:`r = \\sqrt{x^2 + y^2 + z^2}`.
Parameters:
* **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index, x_index)) - The values of x, y and z respectively, as described above.
* **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix which the result of the exponentiation is to be written to.
matrix_exponential_lie_trotter(field_sample, result) : :obj:`callable`
Calculates a matrix exponential based on the Lie Product Formula,
.. math::
\\exp(A + B) = \\lim_{c \\to \\infty} \\left(\\exp\\left(\\frac{1}{c}A\\right) \\exp\\left(\\frac{1}{c}B\\right)\\right)^c.
**For spin half systems:**
Assumes the exponent is an imaginary linear combination of a subspace of :math:`\\mathfrak{su}(2)`, being,
.. math::
\\begin{align*}
A &= -i(x J_x + y J_y + z J_z),
\\end{align*}
with
.. math::
\\begin{align*}
J_x &= \\frac{1}{2}\\begin{pmatrix}
0 & 1 \\\\
1 & 0
\\end{pmatrix},&
J_y &= \\frac{1}{2}\\begin{pmatrix}
0 & -i \\\\
i & 0
\\end{pmatrix},&
J_z &= \\frac{1}{2}\\begin{pmatrix}
1 & 0 \\\\
0 & -1
\\end{pmatrix}
\\end{align*}
Then the exponential can be approximated as, for large :math:`\\tau`,
.. math::
\\begin{align*}
\\exp(A) &= \\exp(-ix J_x - iy J_y - iz J_z)\\\\
&= \\exp(2^{-\\tau}(-ix J_x - iy J_y - iz J_z))^{2^\\tau}\\\\
&\\approx (\\exp(-i(2^{-\\tau} x) J_x) \\exp(-i(2^{-\\tau} y) J_y) \\exp(-i(2^{-\\tau} z) J_z)^{2^\\tau}\\\\
&= \\begin{pmatrix}
(c_Xc_Y - is_Xs_Y) e^{-iZ} &
-(c_Xs_Y + is_Xc_Y) e^{iZ} \\\\
(c_Xs_Y - is_Xc_Y) e^{-iZ} &
(c_Xc_Y + is_Xs_Y) e^{iZ}
\\end{pmatrix}^{2^\\tau}\\\\
&= T^{2^\\tau},
\\end{align*}
with
.. math::
\\begin{align*}
X &= \\frac{1}{2}2^{-\\tau}x,\\\\
Y &= \\frac{1}{2}2^{-\\tau}y,\\\\
Z &= \\frac{1}{2}2^{-\\tau}z,\\\\
c_{\\theta} &= \\cos(\\theta),\\\\
s_{\\theta} &= \\sin(\\theta).
\\end{align*}
**For spin one systems**
Assumes the exponent is an imaginary linear combination of a subspace of :math:`\\mathfrak{su}(3)`, being,
.. math::
\\begin{align*}
A &= -i(x J_x + y J_y + z J_z + q J_q),
\\end{align*}
with
.. math::
\\begin{align*}
J_x &= \\frac{1}{\\sqrt{2}}\\begin{pmatrix}
0 & 1 & 0 \\\\
1 & 0 & 1 \\\\
0 & 1 & 0
\\end{pmatrix},&
J_y &= \\frac{1}{\\sqrt{2}}\\begin{pmatrix}
0 & -i & 0 \\\\
i & 0 & -i \\\\
0 & i & 0
\\end{pmatrix},\\\\
J_z &= \\begin{pmatrix}
1 & 0 & 0 \\\\
0 & 0 & 0 \\\\
0 & 0 & -1
\\end{pmatrix},&
J_q &= \\frac{1}{3}\\begin{pmatrix}
1 & 0 & 0 \\\\
0 & -2 & 0 \\\\
0 & 0 & 1
\\end{pmatrix}
\\end{align*}
Then the exponential can be approximated as, for large :math:`\\tau`,
.. math::
\\begin{align*}
\\exp(A) &= \\exp(-ix J_x - iy J_y - iz J_z - iq J_q)\\\\
&= \\exp(2^{-\\tau}(-ix J_x - iy J_y - iz J_z - iq J_q))^{2^\\tau}\\\\
&\\approx (\\exp(-i(2^{-\\tau} x) J_x) \\exp(-i(2^{-\\tau} y) J_y) \\exp(-i(2^{-\\tau} z J_z + (2^{-\\tau} q) J_q)))^{2^\\tau}\\\\
&= \\begin{pmatrix}
\\frac{e^{-i\\left(Z + \\frac{Q}{3}\\right)}(c_X + c_Y - i s_Xs_Y)}{2} & \\frac{e^{i\\frac{2Q}{3}} (-s_Y -i c_Y s_X)}{\\sqrt{2}} & \\frac{e^{-i\\left(-Z + \\frac{Q}{3}\\right)}(c_X - c_Y + i s_Xs_Y)}{2} \\\\
\\frac{e^{-i\\left(Z + \\frac{Q}{3}\\right)} (-i s_X + c_X s_Y)}{\\sqrt{2}} & e^{i\\frac{2Q}{3}} c_X c_Y & \\frac{e^{-i(Z - \\frac{Q}{3})} (-i s_X - c_X s_Y)}{\\sqrt{2}} \\\\
\\frac{e^{-i\\left(Z + \\frac{Q}{3}\\right)}(c_X - c_Y - i s_Xs_Y)}{2} & \\frac{e^{i\\frac{2Q}{3}} (s_Y -i c_Y s_X)}{\\sqrt{2}} & \\frac{e^{-i\\left(-Z + \\frac{Q}{3}\\right)}(c_X + c_Y + i s_Xs_Y)}{2}
\\end{pmatrix}^{2^\\tau}\\\\
&= T^{2^\\tau},
\\end{align*}
with
.. math::
\\begin{align*}
X &= 2^{-\\tau}x,\\\\
Y &= 2^{-\\tau}y,\\\\
Z &= 2^{-\\tau}z,\\\\
Q &= 2^{-\\tau}q,\\\\
c_{\\theta} &= \\cos(\\theta),\\\\
s_{\\theta} &= \\sin(\\theta).
\\end{align*}
Once :math:`T` is calculated, it is then recursively squared :math:`\\tau` times to obtain :math:`\\exp(A)`.
Parameters:
* **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index, x_index)) - The values of x, y and z (and q for spin one) respectively, as described above.
* **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix which the result of the exponentiation is to be written to.
* **trotter_cutoff** (:obj:`int`) - The number of squares to make to the approximate matrix (:math:`\\tau` above).
"""
def __init__(self, spin_quantum_number, device, threads_per_block):
"""
Parameters
----------
spin_quantum_number : :obj:`SpinQuantumNumber`
The option to select whether the simulator will integrate a spin half :obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE` quantum system.
device : :obj:`Device`
The option to select which device will be targeted for integration. That is, whether the integrator is compiled for a CPU or GPU. Defaults to :obj:`Device.CUDA` if the system it is being run on is Nvidia Cuda compatible, and defaults to :obj:`Device.CPU` otherwise. See :obj:`Device` for all options and more details.
threads_per_block : :obj:`int`
The size of each thread block (workgroup), in terms of the number of threads (workitems) they each contain, when running on the GPU target devices :obj:`Device.CUDA` (:obj:`Device.ROC`). Defaults to 64. Modifying might be able to increase execution time for different GPU models.
"""
jit_device = device.jit_device
device_index = device.index
@jit_device
def conj(z):
return (z.real - 1j*z.imag)
@jit_device
def complex_abs(z):
return math.sqrt(z.real**2 + z.imag**2)
if spin_quantum_number == SpinQuantumNumber.HALF:
@jit_device
def norm2(z):
return math.sqrt(z[0].real**2 + z[0].imag**2 + z[1].real**2 + z[1].imag**2)
@jit_device
def inner(left, right):
return conj(left[0])*right[0] + conj(left[1])*right[1]
@jit_device
def set_to(operator, result):
result[0, 0] = operator[0, 0]
result[1, 0] = operator[1, 0]
result[0, 1] = operator[0, 1]
result[1, 1] = operator[1, 1]
@jit_device
def set_to_one(operator):
operator[0, 0] = 1
operator[1, 0] = 0
operator[0, 1] = 0
operator[1, 1] = 1
@jit_device
def set_to_zero(operator):
operator[0, 0] = 0
operator[1, 0] = 0
operator[0, 1] = 0
operator[1, 1] = 0
@jit_device
def matrix_multiply(left, right, result):
result[0, 0] = left[0, 0]*right[0, 0] + left[0, 1]*right[1, 0]
result[1, 0] = left[1, 0]*right[0, 0] + left[1, 1]*right[1, 0]
result[0, 1] = left[0, 0]*right[0, 1] + left[0, 1]*right[1, 1]
result[1, 1] = left[1, 0]*right[0, 1] + left[1, 1]*right[1, 1]
@jit_device
def matrix_square_residual(operator, result):
result[0, 0] = (2 + operator[0, 0])*operator[0, 0] + operator[0, 1]*operator[1, 0]
result[1, 0] = operator[1, 0]*operator[0, 0] + (2 + operator[1, 1])*operator[1, 0]
result[0, 1] = (2 + operator[0, 0])*operator[0, 1] + operator[0, 1]*operator[1, 1]
result[1, 1] = operator[1, 0]*operator[0, 1] + (2 + operator[1, 1])*operator[1, 1]
@jit_device
def adjoint(operator, result):
result[0, 0] = conj(operator[0, 0])
result[1, 0] = conj(operator[0, 1])
result[0, 1] = conj(operator[1, 0])
result[1, 1] = conj(operator[1, 1])
@jit_device
def matrix_exponential_analytic(field_sample, result):
x = field_sample[0]
y = field_sample[1]
z = field_sample[2]
r = math.sqrt(x**2 + y**2 + z**2)
if r > 0:
x /= r
y /= r
z /= r
c = math.cos(r/2)
s = math.sin(r/2)
result[0, 0] = c - 1j*z*s
result[1, 0] = (y - 1j*x)*s
result[0, 1] = -(y + 1j*x)*s
result[1, 1] = c + 1j*z*s
else:
result[0, 0] = 1
result[1, 0] = 0
result[0, 1] = 0
result[1, 1] = 1
@jit_device
def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff):
hyper_cube_amount = math.ceil(trotter_cutoff/2)
if hyper_cube_amount < 0:
hyper_cube_amount = 0
precision = 4**hyper_cube_amount
a = math.sqrt(field_sample[0]*field_sample[0] + field_sample[1]*field_sample[1])
if a > 0:
ep = (field_sample[0] + 1j*field_sample[1])/a
else:
ep = 1
a = a/precision
Ca = math.cos(a/2)
Sa = -1j*math.sin(a/2)
ez = field_sample[2]/(2*precision)
ez = math.cos(ez) + 1j*math.sin(ez)
# eq = field_sample[3]/(6*precision)
# eq = math.cos(eq) + 1j*math.sin(eq)
result[0, 0] = Ca/ez - 1
result[1, 0] = Sa*ep
result[0, 1] = Sa/ep
result[1, 1] = Ca*ez - 1
if device_index == 0:
temporary = np.empty((2, 2), dtype = np.complex128)
elif device_index == 1:
temporary = cuda.local.array((2, 2), dtype = np.complex128)
elif device_index == 2:
temporary_group = roc.shared.array((threads_per_block, 2, 2), dtype = np.complex128)
temporary = temporary_group[roc.get_local_id(1), :, :]
for power_index in range(hyper_cube_amount):
matrix_square_residual(result, temporary)
matrix_square_residual(temporary, result)
# matrix_multiply(result, result, temporary)
# matrix_multiply(temporary, temporary, result)
result[0, 0] += 1
result[1, 1] += 1
# @jit_device
# def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff):
# hyper_cube_amount = math.ceil(trotter_cutoff/2)
# if hyper_cube_amount < 0:
# hyper_cube_amount = 0
# precision = 4**hyper_cube_amount
# x = field_sample[0]/(2*precision)
# y = field_sample[1]/(2*precision)
# z = field_sample[2]/(2*precision)
# cx = math.cos(x)
# sx = math.sin(x)
# cy = math.cos(y)
# sy = math.sin(y)
# cisz = math.cos(z) + 1j*math.sin(z)
# result[0, 0] = (cx*cy - 1j*sx*sy)/cisz
# result[1, 0] = (cx*sy -1j*sx*cy)/cisz
# result[0, 1] = -(cx*sy + 1j*sx*cy)*cisz
# result[1, 1] = (cx*cy + 1j*sx*sy)*cisz
# if device_index == 0:
# temporary = np.empty((2, 2), dtype = np.complex128)
# elif device_index == 1:
# temporary = cuda.local.array((2, 2), dtype = np.complex128)
# elif device_index == 2:
# temporary_group = roc.shared.array((threads_per_block, 2, 2), dtype = np.complex128)
# temporary = temporary_group[roc.get_local_id(1), :, :]
# for power_index in range(hyper_cube_amount):
# matrix_multiply(result, result, temporary)
# matrix_multiply(temporary, temporary, result)
else:
@jit_device
def norm2(z):
return math.sqrt(z[0].real**2 + z[0].imag**2 + z[1].real**2 + z[1].imag**2 + z[2].real**2 + z[2].imag**2)
@jit_device
def cross(left, right, result):
result[0] = conj(left[1]*right[2] - left[2]*right[1])
result[1] = conj(left[2]*right[0] - left[0]*right[2])
result[2] = conj(left[0]*right[1] - left[1]*right[0])
@jit_device
def inner(left, right):
return conj(left[0])*right[0] + conj(left[1])*right[1] + conj(left[2])*right[2]
@jit_device
def set_to(operator, result):
result[0, 0] = operator[0, 0]
result[1, 0] = operator[1, 0]
result[2, 0] = operator[2, 0]
result[0, 1] = operator[0, 1]
result[1, 1] = operator[1, 1]
result[2, 1] = operator[2, 1]
result[0, 2] = operator[0, 2]
result[1, 2] = operator[1, 2]
result[2, 2] = operator[2, 2]
@jit_device
def set_to_one(operator):
operator[0, 0] = 1
operator[1, 0] = 0
operator[2, 0] = 0
operator[0, 1] = 0
operator[1, 1] = 1
operator[2, 1] = 0
operator[0, 2] = 0
operator[1, 2] = 0
operator[2, 2] = 1
@jit_device
def set_to_zero(operator):
operator[0, 0] = 0
operator[1, 0] = 0
operator[2, 0] = 0
operator[0, 1] = 0
operator[1, 1] = 0
operator[2, 1] = 0
operator[0, 2] = 0
operator[1, 2] = 0
operator[2, 2] = 0
@jit_device
def matrix_multiply(left, right, result):
result[0, 0] = left[0, 0]*right[0, 0] + left[0, 1]*right[1, 0] + left[0, 2]*right[2, 0]
result[1, 0] = left[1, 0]*right[0, 0] + left[1, 1]*right[1, 0] + left[1, 2]*right[2, 0]
result[2, 0] = left[2, 0]*right[0, 0] + left[2, 1]*right[1, 0] + left[2, 2]*right[2, 0]
result[0, 1] = left[0, 0]*right[0, 1] + left[0, 1]*right[1, 1] + left[0, 2]*right[2, 1]
result[1, 1] = left[1, 0]*right[0, 1] + left[1, 1]*right[1, 1] + left[1, 2]*right[2, 1]
result[2, 1] = left[2, 0]*right[0, 1] + left[2, 1]*right[1, 1] + left[2, 2]*right[2, 1]
result[0, 2] = left[0, 0]*right[0, 2] + left[0, 1]*right[1, 2] + left[0, 2]*right[2, 2]
result[1, 2] = left[1, 0]*right[0, 2] + left[1, 1]*right[1, 2] + left[1, 2]*right[2, 2]
result[2, 2] = left[2, 0]*right[0, 2] + left[2, 1]*right[1, 2] + left[2, 2]*right[2, 2]
@jit_device
def matrix_square_residual(operator, result):
result[0, 0] = (2 + operator[0, 0])*operator[0, 0] + operator[0, 1]*operator[1, 0] + operator[0, 2]*operator[2, 0]
result[1, 0] = operator[1, 0]*operator[0, 0] + (2 + operator[1, 1])*operator[1, 0] + operator[1, 2]*operator[2, 0]
result[2, 0] = operator[2, 0]*operator[0, 0] + operator[2, 1]*operator[1, 0] + (2 + operator[2, 2])*operator[2, 0]
result[0, 1] = (2 + operator[0, 0])*operator[0, 1] + operator[0, 1]*operator[1, 1] + operator[0, 2]*operator[2, 1]
result[1, 1] = operator[1, 0]*operator[0, 1] + (2 + operator[1, 1])*operator[1, 1] + operator[1, 2]*operator[2, 1]
result[2, 1] = operator[2, 0]*operator[0, 1] + operator[2, 1]*operator[1, 1] + (2 + operator[2, 2])*operator[2, 1]
result[0, 2] = (2 + operator[0, 0])*operator[0, 2] + operator[0, 1]*operator[1, 2] + operator[0, 2]*operator[2, 2]
result[1, 2] = operator[1, 0]*operator[0, 2] + (2 + operator[1, 1])*operator[1, 2] + operator[1, 2]*operator[2, 2]
result[2, 2] = operator[2, 0]*operator[0, 2] + operator[2, 1]*operator[1, 2] + (2 + operator[2, 2])*operator[2, 2]
@jit_device
def adjoint(operator, result):
result[0, 0] = conj(operator[0, 0])
result[1, 0] = conj(operator[0, 1])
result[2, 0] = conj(operator[0, 2])
result[0, 1] = conj(operator[1, 0])
result[1, 1] = conj(operator[1, 1])
result[2, 1] = conj(operator[1, 2])
result[0, 2] = conj(operator[2, 0])
result[1, 2] = conj(operator[2, 1])
result[2, 2] = conj(operator[2, 2])
@jit_device
def matrix_exponential_analytic(field_sample, result, trotter_cutoff):
pass
@jit_device
def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff):
hyper_cube_amount = math.ceil(trotter_cutoff/2)
if hyper_cube_amount < 0:
hyper_cube_amount = 0
precision = 4**hyper_cube_amount
a = math.sqrt(field_sample[0]*field_sample[0] + field_sample[1]*field_sample[1])
if a > 0:
ep = (field_sample[0] + 1j*field_sample[1])/a
else:
ep = 1
a = a/precision
Ca = math.cos(a/2)
Sa = math.sin(a/2)
ca = math.cos(a)
sa = -1j*math.sin(a)/sqrt2
ez = field_sample[2]/(2*precision)
ez = math.cos(ez) + 1j*math.sin(ez)
eq = field_sample[3]/(6*precision)
eq = math.cos(eq) + 1j*math.sin(eq)
# Ca = 1
# Sa = a/2
# ca = 1
# sa = -1j*a/sqrt2
# ez = field_sample[2]/(2*precision)
# ez = 1 + 1j*ez
# eq = field_sample[3]/(6*precision)
# eq = 1 + 1j*eq
result[0, 0] = (Ca/(eq*ez))*(Ca/(eq*ez)) - 1
result[1, 0] = sa*eq*ep/ez
result[2, 0] = -((Sa*ep/eq)*(Sa*ep/eq))
result[0, 1] = sa*eq/(ez*ep)
result[1, 1] = ca*(eq*eq*eq*eq) - 1
result[2, 1] = sa*eq*ez*ep
result[0, 2] = -((Sa*eq/ep)*(Sa*eq/ep))
result[1, 2] = sa*eq*ez/ep
result[2, 2] = (Ca*ez/eq)*(Ca*ez/eq) - 1
if device_index == 0:
temporary = np.empty((3, 3), dtype = np.complex128)
elif device_index == 1:
temporary = cuda.local.array((3, 3), dtype = np.complex128)
elif device_index == 2:
temporary_group = roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128)
temporary = temporary_group[roc.get_local_id(1), :, :]
for power_index in range(hyper_cube_amount):
matrix_square_residual(result, temporary)
matrix_square_residual(temporary, result)
result[0, 0] += 1
result[1, 1] += 1
result[2, 2] += 1
# @jit_device
# def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff):
# hyper_cube_amount = math.ceil(trotter_cutoff/2)
# if hyper_cube_amount < 0:
# hyper_cube_amount = 0
# precision = 4**hyper_cube_amount
# x = field_sample[0]/precision
# y = field_sample[1]/precision
# z = field_sample[2]/precision
# q = field_sample[3]/precision
# cx = math.cos(x)
# sx = math.sin(x)
# cy = math.cos(y)
# sy = math.sin(y)
# cisz = math.cos(z + q/3) - 1j*math.sin(z + q/3)
# result[0, 0] = 0.5*cisz*(cx + cy - 1j*sx*sy)
# result[1, 0] = cisz*(-1j*sx + cx*sy)/sqrt2
# result[2, 0] = 0.5*cisz*(cx - cy - 1j*sx*sy)
# cisz = math.cos(2*q/3) + 1j*math.sin(2*q/3)
# result[0, 1] = cisz*(-sy - 1j*cy*sx)/sqrt2
# result[1, 1] = cisz*cx*cy
# result[2, 1] = cisz*(sy - 1j*cy*sx)/sqrt2
# cisz = math.cos(z - q/3) + 1j*math.sin(z - q/3)
# result[0, 2] = 0.5*cisz*(cx - cy + 1j*sx*sy)
# result[1, 2] = cisz*(-1j*sx - cx*sy)/sqrt2
# result[2, 2] = 0.5*cisz*(cx + cy + 1j*sx*sy)
# if device_index == 0:
# temporary = np.empty((3, 3), dtype = np.complex128)
# elif device_index == 1:
# temporary = cuda.local.array((3, 3), dtype = np.complex128)
# elif device_index == 2:
# temporary_group = roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128)
# temporary = temporary_group[roc.get_local_id(1), :, :]
# for power_index in range(hyper_cube_amount):
# matrix_multiply(result, result, temporary)
# matrix_multiply(temporary, temporary, result)
self.conj = conj
self.complex_abs = complex_abs
self.norm2 = norm2
self.inner = inner
self.set_to = set_to
self.set_to_one = set_to_one
self.set_to_zero = set_to_zero
self.matrix_multiply = matrix_multiply
self.adjoint = adjoint
self.matrix_exponential_analytic = matrix_exponential_analytic
self.matrix_exponential_lie_trotter = matrix_exponential_lie_trotter
self.matrix_square_residual = matrix_square_residual | 2.96875 | 3 |
board/boot.py | vincent-l-j/micropython-stubber | 1 | 12793004 | <filename>board/boot.py<gh_stars>1-10
# This file is executed on every boot (including wake-boot from deepsleep)
import machine
import uos as os
try:
import esp
esp.osdebug(None)
except ImportError:
esp = None
try:
import pyb
pyb.country("US") # ISO 3166-1 Alpha-2 code, eg US, GB, DE, AU
pyb.usb_mode("VCP+MSC") # act as a serial and a storage device
# pyb.main('main.py') # main script to run after this one
except ImportError:
pass
SD = False
if SD:
# Mount SD to /sd
try:
# Some boards have pulldown and/or LED on GPIO2, pullup avoids issues on TTGO 8 v1.8
# machine.Pin(2,mode=machine.Pin.IN, pull=machine.Pin.PULL_UP)
# os.mount(machine.SDCard(slot=1, width=4), "/sd") # SD mode 4 bit
if esp:
# # SPI 1 bit M5Stack Core
os.mount(machine.SDCard(slot=2, width=1, sck=18, miso=19, mosi=23, cs=4), "/sd") # SPI 1 bit M5Stack Core
print("SD Card mounted")
except OSError as e:
if e.args[0] == 16:
print("No SD Card found")
| 2.53125 | 3 |
atm_controller.py | chulpyo/simple-atm-controller | 0 | 12793005 | from __future__ import annotations
from typing import List, Dict, Tuple, Optional
from abc import ABCMeta, abstractmethod
from enum import Enum
from bank import Bank
from cash_bin import CashBin
from card_reader import CardReader
class ControlType(Enum):
SeeBalance = (0, "잔고 출력")
Deposit = (1, "입금")
Withdraw = (2, "출금")
def __init__(self, code: int, desc: str) -> None:
self.code = code
self.desc = desc
class AbstractAtmController(metaclass=ABCMeta):
def __init__(self, bank: Bank, cash_bin: CashBin, card_reader: CardReader) -> None:
self.bank = bank
self.cash_bin = cash_bin
self.card_reader = card_reader
self.accounts = None
self.pin = None
self.card_number = None
self.account = None
@abstractmethod
def input_pin(self, pin: str) -> None:
pass
@abstractmethod
def authentication(self, card_number: str, pin: str) -> Tuple[bool, str]:
pass
@abstractmethod
def select_account(self, account: str) -> Tuple[bool, str]:
pass
@abstractmethod
def control_account(
self, control: ControlType, delta: int = 0
) -> Tuple[bool, str, int]:
pass
def finish(self) -> None:
self.accounts = None
self.pin = None
self.card_number = None
self.account = None
class AtmController(AbstractAtmController):
def input_pin(self, pin: str) -> None:
self.pin = pin
def authentication(self) -> Tuple[bool, str]:
self.card_number = self.card_reader.get_card_number()
self.accounts = self.bank.check_pin(self.card_number, self.pin)
if self.accounts is None:
self.finish()
return (False, "인증 실패.")
else:
return (True, "성공")
def select_account(self, account: str) -> Tuple[bool, str]:
if account in self.accounts:
self.account = account
return (True, "성공")
else:
self.finish()
return (False, "존재하지 않는 계좌 입니다.")
def control_account(
self, control: ControlType, delta: int = 0
) -> Tuple[bool, str, int]:
# 중복된 예외처리 존재, 개선 필요
# 카드 번호, 계좌가 없을시 atmcontroller의 control_account와 bank의 control_balnce에서 동시에 채크함
if self.card_number is None or self.account is None:
self.finish()
return (False, "카드번호 혹은 계좌가 존재하지 않습니다.", -1)
if control == ControlType.SeeBalance:
result = (True, "성공", self.bank.get_balance(self.card_number, self.account))
elif control == ControlType.Deposit or control == ControlType.Withdraw:
# delta 는 양의 정수만 입력된다고 가정
if control == ControlType.Withdraw:
delta = delta * -1
if self.cash_bin.get_balance() + delta < 0:
self.finish()
return (False, "현금통에 현금이 부족합니다.", -1)
if self.bank.get_balance(self.card_number, self.account) + delta < 0:
self.finish()
return (False, "계좌에 잔고가 부족합니다.", -1)
balance = self.bank.control_balance(self.card_number, self.account, delta)
if balance < 0:
result = (False, "실패(예외처리용)", balance)
else:
result = (True, "성공", balance)
else:
result = (False, "잘못된 제어구문입니다.", -1)
self.finish()
return result
| 3.3125 | 3 |
teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/project/viewmodels/project_fortesting_list_view.py | zhangyin2088/Teamcat | 6 | 12793006 | #coding=utf-8
'''
Created on 2015-10-10
@author: Devuser
'''
class ProjectFortestingList(object):
def __init__(self,fullpart,isversion,fortestings):
self.fullpart=fullpart
self.isversion=isversion
self.fortestings=fortestings
| 2.046875 | 2 |
cloudrunner_server/api/controllers/groups.py | ttrifonov/cloudrunner-server | 2 | 12793007 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# /*******************************************************
# * Copyright (C) 2013-2014 CloudRunner.io <<EMAIL>>
# *
# * Proprietary and confidential
# * This file is part of CloudRunner Server.
# *
# * CloudRunner Server can not be copied and/or distributed
# * without the express permission of CloudRunner.io
# *******************************************************/
import logging
from pecan import expose, request # noqa
from sqlalchemy.exc import IntegrityError
from cloudrunner_server.api.decorators import wrap_command
from cloudrunner_server.api.model import Group, Org, Role
from cloudrunner_server.api.policy.decorators import check_policy
from cloudrunner_server.api.util import JsonOutput as O
LOG = logging.getLogger()
class Groups(object):
@expose('json', generic=True)
@check_policy('is_admin')
@wrap_command(Group)
def groups(self, name=None, *args):
def modifier(roles):
return [dict(as_user=role.as_user, servers=role.servers)
for role in roles]
if name:
group = Group.visible(request).filter(Group.name == name).first()
return O.group(group.serialize(
skip=['id', 'org_id'],
rel=[('roles', 'roles', modifier)]))
else:
groups = [u.serialize(
skip=['id', 'org_id'],
rel=[('roles', 'roles', modifier)])
for u in Group.visible(request).all()]
return O._anon(groups=groups,
quota=dict(allowed=request.user.tier.groups))
@groups.when(method='POST', template='json')
@check_policy('is_admin')
@groups.wrap_create()
def add_group(self, name, *args, **kwargs):
name = name or kwargs['name']
org = request.db.query(Org).filter(
Org.name == request.user.org).one()
group = Group(name=name, org=org)
request.db.add(group)
request.db.commit()
@groups.when(method='PUT', template='json')
@check_policy('is_admin')
@groups.wrap_modify()
def modify_group_roles(self, name, *args, **kwargs):
name = name or kwargs['name']
add_roles = request.POST.getall('add')
rm_roles = request.POST.getall('remove')
group = Group.visible(request).filter(Group.name == name).first()
if not group:
return O.error(msg="Group is not available")
for role in rm_roles:
as_user, _, servers = role.rpartition("@")
if not as_user or not servers:
continue
if as_user == "*":
as_user = "@"
roles = [r for r in group.roles if r.as_user == as_user and
r.servers == servers]
for r in roles:
request.db.delete(r)
request.db.commit()
errs = []
for role in add_roles:
as_user, _, servers = role.rpartition("@")
if not Role.is_valid(as_user):
errs.append(as_user)
if errs:
if len(errs) == 1:
return O.error(msg="The role '%s' is not valid" % errs[0])
else:
return O.error(msg="The following roles are not valid: %s" %
", ".join(errs))
for role in add_roles:
as_user, _, servers = role.rpartition("@")
if not as_user or not servers:
continue
if as_user == "*":
as_user = "@"
r = Role(as_user=as_user, servers=servers, group=group)
try:
request.db.add(r)
request.db.commit()
except IntegrityError:
request.db.rollback()
@groups.when(method='DELETE', template='json')
@check_policy('is_admin')
@groups.wrap_delete()
def rm_group(self, name, *args):
group = Group.visible(request).filter(Group.name == name).first()
if not group:
return O.error(msg="Group not found")
request.db.delete(group)
request.db.commit()
| 1.984375 | 2 |
couchfs/api.py | thanos/couchfs | 1 | 12793008 | """
A client api for couchdb attachments
"""
"""Main module."""
import logging
import fnmatch
import io
import mimetypes
import os
import pathlib
import re
import tempfile
from contextlib import contextmanager
import requests
logger = logging.getLogger(__file__)
echo = logger.info
class CouchDBClientException(Exception):
def __init__(self, *args, **kwargs):
super(CouchDBClientException, self).__init__(*args, **kwargs)
class URLRequired(CouchDBClientException):
"""A valid URL is required."""
class BadConnectionURI(CouchDBClientException):
"""A valid URL is required."""
class CouchDBClient:
URI_ENVIRON_KEY = 'COUCHDB_URI'
CONNECTION_RE = 'couchdb(s)?://((\w+)\:(.+)@)?([\w\.]+)(:(\d+))?/(\w+)'
URI_RE = re.compile(CONNECTION_RE)
def __init__(self, uri=None):
if uri is None:
uri = os.environ.get(self.URI_ENVIRON_KEY)
if not uri:
key = self.URI_ENVIRON_KEY
raise URLRequired(f'You can set environment varialble {key}')
scheme, userid, psswd, host, port, db = self.parse_connection_uri(uri)
if userid and psswd:
self.auth = (userid, psswd)
else:
self.auth = None
self.db = db
self.db_uri = f'{scheme}://{host}{port}/{self.db}'
def check_db(self):
response = requests.head(f"{self.db_uri}", auth=self.auth)
return response.status_code == 200
def create_db(self):
response = requests.put(f"{self.db_uri}", auth=self.auth)
response.raise_for_status()
def save_doc(self, doc):
_id = doc['_id']
doc_uri = f'{self.db_uri}/{_id}'
response = requests.head(doc_uri, auth=self.auth)
if response.status_code == 200:
rev = response.headers['ETag']
headers = {'If-Match': rev[1:-1]}
response = requests.put(doc_uri, json=doc, headers=headers, auth=self.auth)
elif response.status_code == 404:
response = requests.post(self.db_uri, json=doc, auth=self.auth)
response.raise_for_status()
def parse_connection_uri(self, uri):
"""
Given:
'couchdb://admin:*****%@127.0.0.1:5984/test' -> http://127.0.0.1:5984/
:param uri:
:return {host, db, auth, passwd}:
"""
if match := self.URI_RE.match(uri):
(ssl, _, userid, psswd, host, _, port, db) = match.groups()
scheme = 'http' + ('s' if ssl else '')
port = f':{port}' if port else ''
return scheme, userid, psswd, host, port, db
else:
raise BadConnectionURI(f'use a connections like {self.CONNECTION_RE}')
def list_attachments(self, *patterns):
regexs = []
for pattern in patterns:
if self.WILDCARD_RE.search(pattern):
regex = re.compile(fnmatch.translate(pattern))
else:
regex = re.compile(fnmatch.translate(pattern)[:-2])
regexs.append(regex)
for file_path, file_size in self.run_view():
if not regexs or any([regex.search(file_path) for regex in regexs]):
yield file_path, file_size
def run_view(self, **args):
params = {'reduce': False, 'include_docs': False}
if 'depth' in args:
params['group_level'] = args['depth']
params['reduce'] = True
response = requests.get(f"{self.db_uri}/_design/couchfs_views/_view/attachment_list", params=params, auth=self.auth)
response.raise_for_status()
for doc in response.json()['rows']:
yield '/'.join(doc['key']), doc['value']
def download(self, src, dst, dry_run=False):
for src, dst in self.download_srcdst(src, dst):
if dry_run:
yield src, dst, 'DRY RUN', response
else:
uri = f'{self.db_uri}/{src}'
response = requests.get(uri, auth=self.auth)
yield uri, dst, response.status_code, response.reason
WILDCARD_RE = re.compile('[\*\?\[\]]+')
def download_srcdst(self, src, dst, dry_run=False):
if match := self.WILDCARD_RE.search(src):
regex = re.compile(fnmatch.translate(src))
is_copying_files = True
else:
regex = re.compile(fnmatch.translate(src)[:-2])
sub_regex = re.compile(src)
is_copying_files = False
for file_path, _ in self.ls():
if regex.search(file_path):
if is_copying_files:
match = self.WILDCARD_RE.search(src)
dst_file_path = file_path[match.span()[0]:]
if dst_file_path.startswith('/'):
dst_file_path = file_path[1:]
dest_path = os.path.join(dst, dst_file_path)
else:
dst_file_path = file_path[len(src):]
if file_path.startswith('/'):
dst_file_path = dst_file_path[1:]
dest_path = os.path.join(dst, dst_file_path[1:])
if not dest_path.startswith('dump'):
print('NO DUMP', is_copying_files, dst, file_path[len(src):])
# break
yield file_path, dest_path
def download_file(self, url, dest):
with open(dest, 'wb') as f:
return self.download_to_file(url, f)
def download_to_file(self, url, file_obj):
with requests.get(url, stream=True, auth=self.auth) as r:
r.raise_for_status()
for chunk in r.iter_content(chunk_size=8192):
if chunk:
file_obj.write(chunk)
@contextmanager
def get_attachment(self, url, in_memory=False):
try:
if in_memory:
bytes_fp = io.BytesIO()
self.download_to_file(url, bytes_fp)
yield bytes_fp.getvalue()
else:
fp = tempfile.NamedTemporaryFile(delete=False)
self.download_to_file(url, fp)
fp.close()
yield open(fp.name, 'rb')
finally:
if in_memory:
bytes_fp.close()
else:
os.unlink(fp.name)
def get_attachment_as_bytes(self, url):
return requests.get(url, stream=True, auth=self.auth).content
def upload(self, src, dst, dry_run=False):
src = os.path.abspath(src)
if os.path.isfile(src):
if dry_run:
yield src, dst, 'DRY RUN', ''
else:
with open(src, 'rb') as src_fp:
yield self.upload_file(src_fp, os.path.join(dst, os.path.basename(src)))
elif os.path.isdir(src):
p = pathlib.Path(src).resolve()
for (dirpath, dirs, files) in os.walk(src):
for filename in files:
file_path = os.path.join(dirpath, filename)
pp = file_path[len(p.parent.as_posix()) + 1:]
dest_path = os.path.join(dst, pp)
if dry_run:
yield file_path, dest_path, 'DRY RUN', ''
else:
yield self.upload_file(file_path, dest_path)
def upload_bytes_file(self, src_bytes, dst):
with tempfile.NamedTemporaryFile() as src_fp:
src_fp.name = os.path.basename(dst)
src_fp.write(src_bytes)
return self.upload_file(src_fp, dst)
def upload_file(self, src, dst):
"""
Uploads a file using dst as the doc/bucket id
:param src: path to file to upload
:param dst: id
:return: file_name, file_url, upload status, upload message
"""
doc_id = [segment for segment in dst.split('/') if segment][0]
file_name = '/'.join(dst.split('/')[1:])
doc_uri = f'{self.db_uri}/{doc_id}'
file_uri = f'{doc_uri}/{file_name}'
response = requests.head(f'{doc_uri}', auth=self.auth)
if response.status_code == 404:
response = requests.post(f'{self.db_uri}', json=dict(_id=doc_id), auth=self.auth)
if response.status_code != 201:
return file_name, f'{file_uri}', response.status_code, response.reason
rev = response.json()['rev']
else:
rev = response.headers['ETag']
major, _ = mimetypes.guess_type(src.name)
headers = {'Content-type': f'{major}', 'If-Match': rev[1:-1]}
response = requests.put(f'{file_uri}', data=src, headers=headers, auth=self.auth)
response.raise_for_status()
return file_name, f'{file_uri}', response.status_code, response.reason
@classmethod
def init_db(cls, logger=echo):
echo('connecting to couchdb')
client = cls()
logger('checking the db')
if not client.check_db():
logger('creating the db')
client.create_db()
_id = client.COUCHFS_VIEWS['_id']
logger(f'creating or updating the db {_id}')
client.save_doc(client.COUCHFS_VIEWS)
logger(f'db is now setup')
COUCHFS_VIEWS={
"_id": "_design/couchfs_views",
"views": {
"attachment_list": {
"map": "function (doc) {\n if (doc._attachments) {\n for (const file_name in doc._attachments) {\n emit((doc._id+'/'+file_name).split('/'), doc._attachments[file_name].length);\n }\n} else {\n emit(doc._id.split('/'), 0)\n}\n}",
"reduce": "_stats"
}
},
"language": "javascript"
} | 2.90625 | 3 |
0x02-python-import_modules/5-variable_load.py | darkares23/holbertonschool-higher_level_programming | 0 | 12793009 | <gh_stars>0
#!/usr/bin/python3
if __name__ == "__main__":
import variable_load_5
print("{:d}".format(variable_load_5.a))
| 1.84375 | 2 |
7kyu/jaden_casing_strings.py | nhsz/codewars | 1 | 12793010 | # http://www.codewars.com/kata/5390bac347d09b7da40006f6/
import string
def to_jaden_case(s):
return string.capwords(s)
| 2 | 2 |
03/solve.py | jkowalleck/AoC2020 | 0 | 12793011 | <reponame>jkowalleck/AoC2020<filename>03/solve.py
from collections import namedtuple
from os import path
from typing import List
INPUT_FILE = path.join(path.dirname(__file__), 'input')
def get_input() -> List[str]:
with open(INPUT_FILE) as fh:
return [line.rstrip('\n') for line in fh.readlines()]
class Tile:
@classmethod
def from_raw_lines(cls, lines: List[str]) -> "Tile":
trees = [
[char == '#' for char in line]
for line in lines
]
return cls(trees)
def __init__(self, trees: List[List[bool]]) -> None:
self.trees = trees
self.height = len(self.trees)
self.width = len(self.trees[0])
Position = namedtuple('Position', ['x', 'y'])
class Map:
def __init__(self, tile: Tile) -> None:
self.tile = tile
self.pos = Position(0, 0)
def step(self, x, y) -> None:
new_pos = Position(self.pos.x + x, self.pos.y + y)
if new_pos.y >= self.tile.height:
raise ValueError('stepped out')
self.pos = new_pos
def pos_has_tree(self) -> bool:
tile_pos = Position(self.pos.x % self.tile.width, self.pos.y)
return self.tile.trees[tile_pos.y][tile_pos.x]
def solve(step_x, step_y) -> int:
map_ = Map(Tile.from_raw_lines(get_input()))
tree_count = 0
while True:
try:
map_.step(step_x, step_y)
tree_count += 1 if map_.pos_has_tree() else 0
except ValueError:
return tree_count
if __name__ == '__main__':
solution_part1 = solve(3, 1)
print(f'solution part 1: {solution_part1}')
solution_part2 = solve(1, 1) * solve(3, 1) * solve(5, 1) * solve(7, 1) * solve(1, 2)
print(f'solution part 2: {solution_part2}')
| 3.421875 | 3 |
scripts/annotate_variants.py | gwaygenomics/pdx_exomeseq | 15 | 12793012 | """
<NAME> 2017
scripts/annotate_variants.py
Use ANNOVAR to first convert a sample into annovar format and then annotate
"""
import os
import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--merged', action='store_true',
help='use directory for merged VCFs')
parser.add_argument('-g', '--humanonly', action='store_true',
help='use humanonly directory for merged VCFs')
args = parser.parse_args()
merged = args.merged
humanonly = args.humanonly
if merged:
vcf_file_dir = os.path.join('processed', 'gatk_merged_vcf')
annovar_file_dir = os.path.join('results', 'annovar_merged_vcfs')
annotated_file_dir = os.path.join('results', 'annotated_merged_vcfs')
else:
vcf_file_dir = os.path.join('results', 'gatk_vcfs')
annovar_file_dir = os.path.join('results', 'annovar_vcfs')
annotated_file_dir = os.path.join('results', 'annotated_vcfs')
if humanonly:
human_string = 'humanonly'
vcf_file_dir = '{}_{}'.format(vcf_file_dir, human_string)
annovar_file_dir = '{}_{}'.format(annovar_file_dir, human_string)
annotated_file_dir = '{}_{}'.format(annotated_file_dir, human_string)
annovar_dir = os.path.join('modules', 'annovar')
humandb_dir = os.path.join(annovar_dir, 'humandb/')
convert_annovar = os.path.join(annovar_dir, 'convert2annovar.pl')
table_annovar = os.path.join(annovar_dir, 'table_annovar.pl')
conv_com = 'perl {} -format vcf4 -filter pass'.format(convert_annovar)
anno_com = 'perl {} {} -buildver hg19'.format(table_annovar, humandb_dir)
# Convert to annovar format
for vcf_file in os.listdir(vcf_file_dir):
if '.idx' not in vcf_file:
base_name = vcf_file.split('.')[0]
full_vcf_file = os.path.join(vcf_file_dir, vcf_file)
output_vcf_file = os.path.join(annovar_file_dir,
'{}.annovar.vcf'.format(base_name))
if not os.path.isfile(output_vcf_file):
file_command = '{} {} > {}'.format(conv_com, full_vcf_file,
output_vcf_file)
subprocess.call(file_command, shell=True)
# Annotate annovar formatted files with given databases
for annovar_file in os.listdir(annovar_file_dir):
base_name = annovar_file.split('.')[0]
full_annov_file = os.path.join(annovar_file_dir, annovar_file)
annotated_vcf_file = os.path.join(annotated_file_dir,
'{}.annotated'.format(base_name))
if not os.path.isfile(annotated_vcf_file):
file_command = 'perl {} {} modules/annovar/humandb -buildver hg19 ' \
'-out {} -verbose -otherinfo -remove -protocol ' \
'refGene,cosmic70,gnomad_exome,dbnsfp30a ' \
'-operation g,f,f,f -nastring . -csvout ' \
'-polish'.format(table_annovar, full_annov_file,
annotated_vcf_file)
subprocess.call(file_command, shell=True)
| 2.46875 | 2 |
Week 3/Actual answer.py | JustCodeIt729/Coding-Challenges | 3 | 12793013 | def get_max_profit(stock_prices):
if len(stock_prices) < 2:
raise ValueError('Getting a profit requires at least 2 prices')
# We'll greedily update min_price and max_profit, so we initialize
# them to the first price and the first possible profit
min_price = stock_prices[0]
max_profit = stock_prices[1] - stock_prices[0]
# Start at the second (index 1) time
# We can't sell at the first time, since we must buy first,
# and we can't buy and sell at the same time!
# If we started at index 0, we'd try to buy *and* sell at time 0.
# This would give a profit of 0, which is a problem if our
# max_profit is supposed to be *negative*--we'd return 0.
for current_time in range(1, len(stock_prices)):
current_price = stock_prices[current_time]
# See what our profit would be if we bought at the
# min price and sold at the current price
potential_profit = current_price - min_price
# Update max_profit if we can do better
max_profit = max(max_profit, potential_profit)
# Update min_price so it's always
# the lowest price we've seen so far
min_price = min(min_price, current_price)
return max_profit
| 4.15625 | 4 |
halfpipe/io/__init__.py | fossabot/Halfpipe-1 | 0 | 12793014 | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from .file import (
DictListFile,
AdaptiveLock,
loadpicklelzma,
dumppicklelzma,
make_cachefilepath,
cacheobj,
uncacheobj,
)
from .parse import (
parse_condition_file,
parse_design,
loadspreadsheet,
loadmatrix,
)
from .index import BidsDatabase, ExcludeDatabase, Database
from .metadata import (
canonicalize_direction_code,
direction_code_str,
MetadataLoader,
SidecarMetadataLoader,
slice_timing_str,
str_slice_timing,
)
from .signals import meansignals
__all__ = [
"DictListFile",
"AdaptiveLock",
"IndexedFile",
"parse_condition_file",
"parse_design",
"loadspreadsheet",
"loadmatrix",
"loadpicklelzma",
"dumppicklelzma",
"make_cachefilepath",
"cacheobj",
"uncacheobj",
"BidsDatabase",
"ExcludeDatabase",
"Database",
"canonicalize_direction_code",
"direction_code_str",
"MetadataLoader",
"SidecarMetadataLoader",
"slice_timing_str",
"str_slice_timing",
"meansignals",
]
| 1.671875 | 2 |
generator_utils.py | sciatti/mazesolving | 0 | 12793015 | import numpy as np
#TODO:
#1. create a streamlined and replicable gif creation set of functions in this file.
#2. implement these functions into the generation algorithms available.
def convert_2d(index, cols):
return (index // cols, index % cols)
def bounds_check(index, rows, cols):
if index[0] < 0 or index[0] > rows - 1:
return True
if index[1] < 0 or index[1] > cols - 1:
return True
return False
def neighborCheck(grid, curr, rows, cols):
#order: Left, Right, Top, Down
ops = [(0,-1), (0,1), (-1,0), (1,0)]
#short for operations
ret = []
for i in range(4):
#bounds checking
x = curr.index[1] + ops[i][1]
y = curr.index[0] + ops[i][0]
if bounds_check((y,x), rows, cols):
continue
if grid[y][x].visited == False:
if curr.walls[i] != 'X':
ret.append(i)
return ret
def nbr_index(index, dir):
if dir == 'L':
return (index[0], index[1] - 1)
elif dir == 'R':
return (index[0], index[1] + 1)
elif dir == 'T':
return (index[0] - 1, index[1])
return (index[0] + 1, index[1])
def conv_nbr_wall(dir):
if dir == 'L':
return 1
elif dir == 'R':
return 0
elif dir == 'T':
return 2
return 3
def conv_idx_dir(index, nbr_index):
y = index[0] - nbr_index[0]
x = index[1] - nbr_index[1]
if x == 1:
return 'R'
if x == -1:
return 'L'
if y == 1:
return 'T'
if y == -1:
return 'D'
def print_grid(grid):
for i in range(len(grid)):
print("[", end="")
for j in range(len(grid[i])):
print(grid[i][j].walls, end=", ")
print("]")
def print_index(grid):
for i in range(len(grid)):
print("[", end="")
for j in range(len(grid[i])):
print(grid[i][j].index, end=", ")
print("]")
def print_visited(grid):
for i in range(len(grid)):
print("[", end="")
for j in range(len(grid[i])):
if grid[i][j].visited == True:
print('X', end=", ")
else:
print('O', end=", ")
print("]")
def maze_index(index, dir):
if dir == 0:
return (index[0], index[1] - 1)
elif dir == 1:
return (index[0], index[1] + 1)
elif dir == 2:
return (index[0] - 1, index[1])
return (index[0] + 1, index[1])
def create_snapshot(new_image, index, direction, color=None):
# set marking color to 255 (white) if none provided
if color == None:
color = 255
# assign the given color to the cell to mark it as active
new_image[index[0], index[1]] = color
if direction < 0:
return new_image
# find the index of the wall to break remove
mark_as_white = maze_index(index, direction)
# remove the wall (set it to the provided color)
new_image[mark_as_white[0], mark_as_white[1]] = color
return new_image
def grid_to_image(index):
return (index[0] * 2 + 1, index[1] * 2 + 1)
def mark_change(idx, gif_arr, wall_idx, secondIdx = None, color = None):
# mark one or two changes, algorithm specific
if secondIdx == None:
newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color)
else:
newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color)
newIMG = create_snapshot(newIMG, secondIdx, -1, color)
if not np.array_equal(newIMG, gif_arr[-1]):
gif_arr.append(newIMG)
def mark_node(idx, gif_arr, secondIdx = None, color = None):
if secondIdx == None:
newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1, color)
else:
newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1, color)
newIMG = create_snapshot(newIMG, secondIdx, -1, color)
if not np.array_equal(newIMG, gif_arr[-1]):
gif_arr.append(newIMG)
def getNeighbor(grid, curr, rows, cols, previous):
#order: Left, Right, Top, Down
ops = [(0,-1), (0,1), (-1,0), (1,0)]
#short for operations
ret = []
for i in range(4):
#bounds checking
x = curr.index[1] + ops[i][1]
y = curr.index[0] + ops[i][0]
if bounds_check((y,x), rows, cols) or (y,x) == previous.index:
continue
ret.append(grid[y][x])
return ret
def print_maze(grid):
maze = np.chararray((len(grid) * 2 + 1, len(grid[0]) * 2 + 1))
maze[:,:] = '@'
for i in range(len(grid)):
for j in range(len(grid[i])):
for k in range(4):
idx = maze_index((i * 2 + 1,j * 2 + 1), k)
maze[i * 2 + 1, j * 2 + 1] = '+'
if grid[i][j].walls[k] == 'X':
if k == 0 or k == 1:
maze[idx[0], idx[1]] = '-'
else:
maze[idx[0], idx[1]] = '|'
for i in range(maze.shape[0]):
for j in range(maze.shape[1]):
print(maze[i,j].decode('utf-8'), end=" ")
print()
def countNeighbors(grid, index, rows, cols):
#order: Left, Right, Top, Down, Top Left, Bottom Left, Top Right, Bottom Right
ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)]
#short for operations
count = 0
for i in range(8):
#bounds checking
x = index[1] + ops[i][1]
y = index[0] + ops[i][0]
if bounds_check((y,x), rows, cols):
continue
if grid[y,x] == 255:
count += 1
return count
def checkRules(grid, index, rule):
c = countNeighbors(grid, index, grid.shape[0], grid.shape[1])
for character in rule:
if c == int(character):
return True
return False
def start_cells(grid, y, x, random, visited, unvisited):
ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)]
dirs = random.sample(ops, k=len(ops))
count = 0
for index in dirs:
if count == len(dirs):
break
if not bounds_check((y + index[0], x + index[1]), grid.shape[0], grid.shape[1]):
if y + index[0] == 0 or grid.shape[0] - 1 == y + index[0] or x + index[1] == 0 or grid.shape[1] - 1 == x + index[1]:
continue
grid[y + index[0], x + index[1]] = 255
visited.add((y + index[0], x + index[1]))
update_set(y + index[0], x + index[1], visited, grid, unvisited)
count += 1
if count == 0:
return False
return True
def check_visited(y, x, visited):
ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)]
for index in ops:
if (y + index[0], x + index[1]) in visited:
return True
return False
def update_set(y, x, all_nodes, grid, unvisited):
ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)]
for index in ops:
if y + index[0] == 0 or grid.shape[0] - 1 == y + index[0] or x + index[1] == 0 or grid.shape[1] - 1 == x + index[1]:
continue
all_nodes.add((y,x))
if (y,x) in unvisited:
unvisited.remove((y,x))
| 3.171875 | 3 |
main.py | Hsuirad/Hemorrhagic-Volume-Assessment | 0 | 12793016 | <reponame>Hsuirad/Hemorrhagic-Volume-Assessment
import cv2 as c
import numpy as np
import tkinter as tk
from sklearn.neighbors import KNeighborsClassifier
from tkinter import filedialog
from PIL import ImageTk, Image
l = tk.Tk()
l.geometry('300x300')
def select():
global name
name = filedialog.askopenfilename(initialdir = "/Users/Dariush/Desktop/python_code/mri_machine_learning",title = "Select file",filetypes = (("jpeg files","*.jpg"),("all files","*.*")))
img = c.imread(name, 0)
icon = ImageTk.PhotoImage(Image.open(name))
label = tk.Label(l, image = icon).pack()
analyze(name)
def analyze(image):
values_xy = [5*3, 2*1, 9*5, 9*9]
values_g = [1, 0, 5, 10]
image
l.title('MRI Analysis tool')
lbl = tk.Label(l, text = "Please select an image to by analyzed").pack()
btn = tk.Button(l, text = "import", width = 10, command=select).pack()
l.mainloop()
#TODO: ALL THIS HERE
# # # # # # # # # # # # # # # # # # # # # #
# #
# #
# USE KNN FOR A THREE DIMENSIONAL #
# PLANE IN WHICH YOU USE DISTANCE #
# AND GRAYSCALE AS CLASSIFICATION #
# VALUES, AND THEN RECEIVE A POINT #
# WITH ITS GRAYSCALE AND CLASSIFY #
# OR DO SECTIONS LIKE 10x10 GRIDS #
# #
# #
# # # # # # # # # # # # # # # # # # # # # #
'''
Notes:
- maybe use old algorithm to define exact points
- have user select roi again
- use x, y, and grayscale as three point in knn classification
''' | 3.296875 | 3 |
pasta para baixar/netcha/exr47 contagem de pares.py | vany-oss/python | 0 | 12793017 | <reponame>vany-oss/python<filename>pasta para baixar/netcha/exr47 contagem de pares.py
for c in range(2, 51, 2):
print(c, end=' ')
print('Acabou')
#pg que faz uma contagem de 2 em 2 ebtre 1 a 50 | 2.484375 | 2 |
src/datafinder/core/configuration/properties/property_type.py | schlauch/DataFinder | 9 | 12793018 | # $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Provides the supported property types. A property type allows
validation of property values against defined restrictions and
performs transformation of values for the persistence layer.
"""
from copy import deepcopy
from datetime import datetime
from decimal import Decimal
import logging
from datafinder.core.configuration.properties import constants
from datafinder.core.configuration.properties import domain
from datafinder.core.configuration.properties.validators import base_validators
from datafinder.core.configuration.properties.validators import type_validators
from datafinder.core.error import ConfigurationError
__version__ = "$Revision-Id:$"
_log = logging.getLogger()
class BasePropertyType(object):
""" Base class for all property types. """
name = ""
def __init__(self, notNull):
"""
@param notNull: Indicates if a values may be C{None} or not.
@type notNull: C{bool}
"""
self.restrictions = dict()
self.notNull = notNull
def validate(self, value):
""" Performs validation of the value against
the defined restrictions. Calls C{_validate}
to perform concrete validation.
@raise ValueError: indicates validation errors.
"""
if not value is None:
self._validate(value)
else:
if self.notNull:
raise ValueError("Value must not be None.")
def _validate(self, value):
""" Template method for concrete validation within
a sub class. """
pass
def fromPersistenceFormat(self, persistedValue):
""" Restores the value from the persistence layer format.
@raise ValueError: Indicates problems during value transformation.
"""
self = self # silent pylint
return persistedValue
def toPersistenceFormat(self, value):
""" Transforms the value to the persistence layer format.
@raise ValueError: Indicates problems during value transformation.
"""
self = self # silent pylint
return value
class StringType(BasePropertyType):
""" Represents string values. """
name = constants.STRING_TYPE
def __init__(self, minimum=None, maximum=None, pattern=None,
options=None, optionsMandatory=None, notNull=False):
"""
@see L{StringValidator.__init__<datafinder.core.configuration.
properties.validators.type_validators.StringValidator.__init__>}
for details on restriction parameters.
"""
BasePropertyType.__init__(self, notNull)
self.restrictions[constants.MINIMUM_LENGTH] = minimum
self.restrictions[constants.MAXIMUM_LENGTH] = maximum
self.restrictions[constants.PATTERN] = pattern
self.restrictions[constants.OPTIONS] = options
self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory
self._validate = type_validators.StringValidator(minimum, maximum, pattern, options, optionsMandatory)
class BooleanType(BasePropertyType):
""" Represents a boolean values. """
name = constants.BOOLEAN_TYPE
def __init__(self, notNull=False):
BasePropertyType.__init__(self, notNull)
self._validate = type_validators.BooleanValidator()
class NumberType(BasePropertyType):
""" Represents numeric values. """
name = constants.NUMBER_TYPE
def __init__(self, minimum=None, maximum=None, minDecimalPlaces=None,
maxDecimalPlaces=None, options=None, optionsMandatory=None,
notNull=False):
"""
@see L{NumberType.__init__<datafinder.core.configuration.
properties.validators.type_validators.NumberType.__init__>}
for details on restriction parameters.
"""
BasePropertyType.__init__(self, notNull)
self.restrictions[constants.MINIMUM_VALUE] = minimum
self.restrictions[constants.MAXIMUM_VALUE] = maximum
self.restrictions[constants.MINIMUM_NUMBER_OF_DECIMAL_PLACES] = minDecimalPlaces
self.restrictions[constants.MAXIMUM_NUMBER_OF_DECIMAL_PLACES] = maxDecimalPlaces
self.restrictions[constants.OPTIONS] = options
self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory
self._validate = type_validators.NumberValidator(minimum, maximum, minDecimalPlaces,
maxDecimalPlaces, options, optionsMandatory)
class DatetimeType(BasePropertyType):
""" Represents date and time values. """
name = constants.DATETIME_TYPE
def __init__(self, minimum=None, maximum=None, options=None,
optionsMandatory=None, notNull=False):
"""
@see L{DatetimeType.__init__<datafinder.core.configuration.
properties.validators.type_validators.DatetimeType.__init__>}
for details on restriction parameters.
"""
BasePropertyType.__init__(self, notNull)
self.restrictions[constants.MINIMUM_VALUE] = minimum
self.restrictions[constants.MAXIMUM_VALUE] = maximum
self.restrictions[constants.OPTIONS] = options
self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory
self._validate = type_validators.DatetimeValidator(minimum, maximum, options, optionsMandatory)
class ListType(BasePropertyType):
""" Represents list of primitive values. """
name = constants.LIST_TYPE
def __init__(self, allowedSubtypes=None, minimum=None,
maximum=None, notNull=False):
"""
@see L{ListType.__init__<datafinder.core.configuration.
properties.validators.type_validators.ListType.__init__>}
for details on restriction parameters.
"""
BasePropertyType.__init__(self, notNull)
self.restrictions[constants.MINIMUM_LENGTH] = minimum
self.restrictions[constants.MAXIMUM_LENGTH] = maximum
self.restrictions[constants.ALLOWED_SUB_TYPES] = list()
if allowedSubtypes is None:
self._allowedSubtypes = list()
self._allowedSubtypes.append(StringType())
self._allowedSubtypes.append(NumberType())
self._allowedSubtypes.append(BooleanType())
self._allowedSubtypes.append(DatetimeType())
self._allowedSubtypes.append(DomainObjectType())
else:
self._allowedSubtypes = allowedSubtypes
subValidators = list()
for subtype in self._allowedSubtypes:
subValidators.append(subtype.validate)
self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name)
self._validate = type_validators.ListValidator(minimum, maximum, subValidators)
def toPersistenceFormat(self, value):
""" Ensures that the transformation for every
list item is performed. """
if not value is None:
result = list()
for item in value:
transformationSucceeded = False
for subType in self._allowedSubtypes:
try:
subType.validate(item)
result.append(subType.toPersistenceFormat(item))
transformationSucceeded = True
break
except ValueError:
continue
if not transformationSucceeded:
raise ValueError("Cannot transform value '%s' to persistence format."
% repr(item))
return result
def fromPersistenceFormat(self, persistedValue):
""" Ensures that the transformation for every
list item is performed. """
if not persistedValue is None:
result = list()
for item in persistedValue:
transformationSucceeded = False
for subType in self._allowedSubtypes:
try:
value = subType.fromPersistenceFormat(item)
subType.validate(value)
result.append(value)
transformationSucceeded = True
break
except ValueError:
continue
if not transformationSucceeded:
raise ValueError("Cannot restore value '%s' from persistence format."
% repr(item))
return result
def __deepcopy__(self, _):
return ListType(deepcopy(self._allowedSubtypes),
self.restrictions[constants.MINIMUM_LENGTH],
self.restrictions[constants.MAXIMUM_LENGTH],
self.notNull)
class AnyType(BasePropertyType):
""" Represents an unspecific property type. """
name = constants.ANY_TYPE
def __init__(self, allowedTypes=None, notNull=False):
""" Constructor. """
BasePropertyType.__init__(self, notNull)
if allowedTypes is None:
self._allowedTypes = list()
self._allowedTypes.append(BooleanType())
self._allowedTypes.append(NumberType())
self._allowedTypes.append(DatetimeType())
self._allowedTypes.append(StringType())
self._allowedTypes.append(DomainObjectType())
self._allowedTypes.append(ListType())
else:
self._allowedTypes = allowedTypes
self.restrictions[constants.ALLOWED_SUB_TYPES] = list()
subValidators = list()
for subtype in self._allowedTypes:
subValidators.append(subtype.validate)
self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name)
self.validate = base_validators.OrValidator(subValidators)
def toPersistenceFormat(self, value):
""" Ensures that the transformation for every
supported type is tried. """
if not value is None:
result = None
transformationSucceeded = False
for subType in self._allowedTypes:
try:
subType.validate(value)
result = subType.toPersistenceFormat(value)
transformationSucceeded = True
break
except ValueError:
continue
if not transformationSucceeded:
raise ValueError("Cannot transform value '%s' to persistence format."
% repr(value))
return result
def fromPersistenceFormat(self, persistedValue):
""" Ensures that the transformation for every
supported type is tried. """
if not persistedValue is None:
result = None
transformationSucceeded = False
for subType in self._allowedTypes:
try:
value = subType.fromPersistenceFormat(persistedValue)
subType.validate(value)
result = value
transformationSucceeded = True
break
except ValueError:
continue
if not transformationSucceeded:
raise ValueError("Cannot restore value '%s' from persistence format."
% repr(persistedValue))
return result
def __deepcopy__(self, _):
return AnyType(deepcopy(self._allowedTypes), self.notNull)
class UnknownDomainObject(domain.DomainObject):
""" Used to represent values of domain object types whose
class could not be loaded. """
# Used to have a nice representation of the dictionary
representation = domain.DomainProperty(StringType())
def __init__(self, theDict):
domain.DomainObject.__init__(self)
self.theDict = theDict # Used to allow access to the properties
self.representation = str(theDict)
class DomainObjectType(BasePropertyType):
""" Represents a object values. """
name = "" # Here you find the concrete class identifier after initialization
def __init__(self, cls=None, notNull=False):
"""
Constructor.
@param cls: Full dotted class name (consists of package, module, and class name)
or a class object.
@type cls: C{unicode} or class object
"""
BasePropertyType.__init__(self, notNull)
if cls is None:
cls = UnknownDomainObject
if isinstance(cls, basestring):
self.name = cls
self._cls = self._importClass(cls)
else:
self.name = "%s.%s" % (cls.__module__, cls.__name__)
self._cls = cls
@property
def _isValid(self):
""" Indicates whether the domain class has been correctly loaded or not. """
return self._cls != UnknownDomainObject
def _importClass(self, fullDottedClassName):
""" Tries to import the associated class and raises a configuration
error if something goes wrong. """
fullDottedModuleName = fullDottedClassName[:fullDottedClassName.rfind(".")]
className = fullDottedClassName[fullDottedClassName.rfind(".") + 1:]
try:
moduleInstance = __import__(fullDottedModuleName, globals(), dict(), [""])
cls = getattr(moduleInstance, className)
except (ImportError, AttributeError, ValueError), error:
return self._handleImportError(str(error.args))
if cls.__name__ != className:
cls = self._handleImportError("Failed to import class '%s'! Got '%s' instead!" \
% (fullDottedClassName, cls.__name__))
return cls
def _handleImportError(self, reason):
""" Common procedure to handle failed domain object imports. """
message = "Cannot import '%s'. Reason: '%s'" % (self.name, reason)
_log.warning(message)
return UnknownDomainObject
def _validate(self, value):
""" Delegates the validation to the actual instance. """
if self._cls != value.__class__:
raise ValueError("The value '%s' has not the required type '%s'." \
% (str(value), str(self._cls)))
try:
value.validate()
except AttributeError, error:
raise ValueError("Cannot validate property value. Reason '%s'" % str(error.args))
except ValueError, error:
raise ValueError("Invalid property value found: '%s'" % str(error.args))
def toPersistenceFormat(self, value):
""" Transform the domain object into a dictionary. """
if not self._isValid:
raise ValueError("The domain class could not be found. Please " \
+ "correct the configuration.")
if not value is None:
if self._cls != value.__class__:
raise ValueError("The value '%s' has not the required type '%s'." \
% (str(value), str(self._cls)))
result = dict()
try:
for _, name, descriptor, subValue in value.walk():
result[name] = descriptor.type.toPersistenceFormat(subValue)
except AttributeError:
raise ValueError("The value '%s' is no valid domain object." % str(value))
return result
def fromPersistenceFormat(self, persistedValue):
""" Restores the domain object from the given dictionary. """
if not persistedValue is None:
if not isinstance(persistedValue, dict):
raise ValueError("The persisted value '%s' is no dictionary."
% str(persistedValue))
if not self._isValid:
return UnknownDomainObject(persistedValue)
try:
instance = self._cls()
except TypeError:
raise ValueError("Cannot create domain object '%s' using empty constructor."
% self.name)
else:
for instance, name, descriptor, value in instance.walk():
try:
value = descriptor.type.fromPersistenceFormat(persistedValue[name])
except KeyError:
raise ValueError(
"Persisted domain object '%s' does not fit defined domain class '%s'."
% (self.name, str(persistedValue)))
else:
setattr(instance, name, value)
return instance
_propertyNameClassMap = {StringType.name: StringType,
BooleanType.name: BooleanType,
NumberType.name: NumberType,
DatetimeType.name: DatetimeType,
ListType.name: ListType,
AnyType.name: AnyType}
PROPERTY_TYPE_NAMES = _propertyNameClassMap.keys()[:]
def createPropertyType(propertyTypeName, restrictions=dict()):
"""
Factory method for property type creation.
@param propertyTypeName: Name of the property type.
@type propertyTypeName: C{unicode}
@param restrictions: Map of restriction parameters and corresponding values.
@type restrictions: C{dict} keys: C{unicode}, C{object}
W0142: Here the */** magic is useful to simplify the property type
creation. Other approaches would "blow up" the code here.
""" # pylint: disable=W0142
if propertyTypeName in _propertyNameClassMap:
try:
return _propertyNameClassMap[propertyTypeName](**restrictions)
except TypeError:
raise ConfigurationError("Restrictions for property type '%s' are invalid." % propertyTypeName)
else:
return DomainObjectType(propertyTypeName)
_typeConstantsPythonTypeMap = {constants.BOOLEAN_TYPE: [bool],
constants.DATETIME_TYPE: [datetime],
constants.LIST_TYPE: [list],
constants.NUMBER_TYPE: [int, float, Decimal],
constants.STRING_TYPE: [str, unicode]}
def determinePropertyTypeConstant(value):
"""
Helper function to determine the property type constant of the given value.
If the no constant matches the full dotted class name is returned.
@see: L{constants<datafinder.core.configuration.properties.constants>}
for property type constants.
@param value: Python object.
@type value: C{object}
@return: Property type constant.
@rtype: C{string}
"""
typeDisplayName = None
valueType = type(value)
for typeName, availableTypes in _typeConstantsPythonTypeMap.iteritems():
if valueType in availableTypes:
typeDisplayName = typeName
break
if typeDisplayName is None:
typeDisplayName = \
"%s.%s" % (value.__class__.__module__, value.__class__.__name__)
return typeDisplayName
| 1.554688 | 2 |
pysnmp-with-texts/LAN.py | agustinhenze/mibs.snmplabs.com | 8 | 12793019 | #
# PySNMP MIB module LAN (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/LAN
# Produced by pysmi-0.3.4 at Wed May 1 14:05:04 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
ModuleIdentity, Gauge32, TimeTicks, Counter64, MibIdentifier, NotificationType, Bits, Integer32, iso, ObjectIdentity, IpAddress, Unsigned32, enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Gauge32", "TimeTicks", "Counter64", "MibIdentifier", "NotificationType", "Bits", "Integer32", "iso", "ObjectIdentity", "IpAddress", "Unsigned32", "enterprises", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32")
MacAddress, DisplayString, TextualConvention, RowStatus, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "MacAddress", "DisplayString", "TextualConvention", "RowStatus", "TruthValue")
pepwave = MibIdentifier((1, 3, 6, 1, 4, 1, 27662))
productMib = MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200))
generalMib = MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200, 1))
lanMib = MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3))
lanInfo = ModuleIdentity((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3, 1))
if mibBuilder.loadTexts: lanInfo.setLastUpdated('201305220000Z')
if mibBuilder.loadTexts: lanInfo.setOrganization('PEPWAVE')
if mibBuilder.loadTexts: lanInfo.setContactInfo('')
if mibBuilder.loadTexts: lanInfo.setDescription('MIB module for LAN.')
class PortSpeedType(TextualConvention, Integer32):
description = 'Describe the port speed and type.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))
namedValues = NamedValues(("unknown", 0), ("auto", 1), ("fullDulplex10", 2), ("halfDulplex10", 3), ("fullDulplex100", 4), ("halfDulplex100", 5), ("fullDulplex1000", 6), ("halfDulplex1000", 7))
lanStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3, 1, 1))
lanIp = MibScalar((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3, 1, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lanIp.setStatus('current')
if mibBuilder.loadTexts: lanIp.setDescription('LAN IP address.')
lanSubnetMask = MibScalar((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3, 1, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lanSubnetMask.setStatus('current')
if mibBuilder.loadTexts: lanSubnetMask.setDescription('LAN subnet mask.')
lanSpeed = MibScalar((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3, 1, 1, 3), PortSpeedType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lanSpeed.setStatus('current')
if mibBuilder.loadTexts: lanSpeed.setDescription('LAN speed status (Auto/10baseT-FD/ 10baseT-HD/100baseTx-FD/100baseTx-HD/1000baseTx-FD/ 1000baseTx-HD.')
mibBuilder.exportSymbols("LAN", pepwave=pepwave, PYSNMP_MODULE_ID=lanInfo, lanStatus=lanStatus, lanInfo=lanInfo, lanIp=lanIp, lanSpeed=lanSpeed, lanMib=lanMib, PortSpeedType=PortSpeedType, generalMib=generalMib, productMib=productMib, lanSubnetMask=lanSubnetMask)
| 1.585938 | 2 |
faravdms/account/views.py | samcodesio/faravdms_active | 0 | 12793020 | <reponame>samcodesio/faravdms_active
from django.shortcuts import redirect, render
from django.http import HttpResponse
from django.contrib.auth.models import User, auth
from django.contrib import messages
from django.contrib.auth.forms import UserCreationForm
from .forms import CreateUserForm
from django.contrib.auth.decorators import login_required
# Create your views here.
def login(request):
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username, password=password)
if user is not None:
# User is authenticated
auth.login(request,user)
return redirect("/")
else:
messages.info(request, "invalid credentials")
print("Error")
return redirect("login")
else:
return render(request,"login.html")
def logout(request):
auth.logout(request)
return redirect('login')
# ADD USERS
def add_users(request):
if request.method == "POST":
form = CreateUserForm(request.POST)
if form.is_valid():
form.save()
return redirect('add_users')
else:
form = CreateUserForm()
return render(request,'add_users.html',{'form':form}) | 2.28125 | 2 |
171_excel_sheet_column_number.py | jsingh41/cs_jatin | 1 | 12793021 | """
171 Excel Sheet Column Number
https://leetcode.com/problems/excel-sheet-column-number/description/
Related to question Excel Sheet Column Title
Given a column title as appear in an Excel sheet, return its corresponding column number.
For example:
A -> 1
B -> 2
C -> 3
...
Z -> 26
AA -> 27
AB -> 28
"""
class Solution(object):
def titleToNumber(self, s):
"""
:type s: str
:rtype: int
"""
column_num = 0
for i,letter in enumerate(s):
column_num = 26 * column_num + ord(letter) - 64
return column_num
| 3.65625 | 4 |
musiclist_v2.py | tolgaerdonmez/music-list-lastfm | 1 | 12793022 | <reponame>tolgaerdonmez/music-list-lastfm
from PyQt5 import QtCore, QtGui, QtWidgets
import sqlite3
import sys
import requests
import os
import urllib.request
class song():
def __init__(self,name,artist,album):
self.name = name
self.artist = artist
self.album = album
def __str__(self):
return "{} by {} | Album: {}".format(self.name,self.artist,self.album)
class App_Musiclist(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.setupUi()
self.connectdb()
self.countbtn = 0
def connectdb(self):
try:
os.remove("musiclist.db")
except FileNotFoundError:
pass
self.connection = sqlite3.connect("musiclist.db")
self.cursor = self.connection.cursor()
self.cursor.execute("CREATE TABLE musiclist (song_name TEXT,song_artist TEXT,song_album TEXT)")
self.connection.commit()
def setupUi(self):
#CREATING WIDGETS
self.save_list_txt = QtWidgets.QPushButton("Save List to TXT")
self.delete_list = QtWidgets.QPushButton("Delete Current List")
self.add_song = QtWidgets.QPushButton("Add Song")
self.add_song.setObjectName('add_song_btn')
self.add_song_field = QtWidgets.QLineEdit()
#STYLE
self.save_list_txt.setStyleSheet("color: rgb(162, 16, 26);")
self.delete_list.setStyleSheet("color: rgb(162, 16, 26);")
self.add_song.setStyleSheet("color: rgb(162, 16, 26);")
#SETTING LAYOUT
h_box = QtWidgets.QHBoxLayout()
h_box.addWidget(self.add_song)
h_box.addWidget(self.add_song_field)
h_box.addWidget(self.delete_list)
h_box.addWidget(self.save_list_txt)
self.v_box_2 = QtWidgets.QVBoxLayout()
self.v_box = QtWidgets.QVBoxLayout()
self.v_box.addLayout(h_box)
self.v_box.addLayout(self.v_box_2)
self.v_box.addStretch()
#EVENTS
self.add_song.clicked.connect(self.event_add_song)
self.delete_list.clicked.connect(self.event_delete_list)
self.save_list_txt.clicked.connect(self.event_save_list_to_txt)
#WINDOW PROPERTIES
self.setLayout(self.v_box)
#self.setGeometry(700,100,750,500)
#icon = QtGui.QIcon()
#icon.addPixmap(QtGui.QPixmap("music.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
#self.setWindowTitle("Music List")
def get_song_from_lastfm(self,song_name):
try:
# GETTING TRACK & ARTIST
track_url = "http://ws.audioscrobbler.com/2.0/?method=track.search&track={}&api_key=31873cc90c27539710b2e41cf3a7ef24&format=json".format(
song_name)
track_val = requests.get(track_url).json()
# GETTING ALBUM FROM TRACK
album_url = "http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json".format(track_val["results"]["trackmatches"]["track"][0]["artist"],track_val["results"]["trackmatches"]["track"][0]["name"])
album_val = requests.get(album_url).json()
song_info = [album_val["track"]["name"], album_val["track"]["artist"]["name"],album_val["track"]["album"]["title"]]
return song_info
except:
return False
def event_add_song(self):
from_who = self.sender().objectName()
if from_who == 'add_song_btn':
get_input = self.add_song_field.text()
song_info = self.get_song_from_lastfm(get_input)
elif from_who == 'file_add_song':
text, okPressed = QtWidgets.QInputDialog.getText(self, "Add Song","Song Name: ", QtWidgets.QLineEdit.Normal, "")
song_info = self.get_song_from_lastfm(text)
if song_info == False:
pass
else:
track = song(song_info[0],song_info[1],song_info[2])
self.cursor.execute("INSERT INTO musiclist Values(?,?,?)",(song_info[0],song_info[1],song_info[2]))
self.connection.commit()
self.add_label = QtWidgets.QLabel(str(track))
self.add_label.setObjectName(song_info[0])
#Create Widgets & Layout
self.new_h_box = QtWidgets.QHBoxLayout()
self.new_h_box.setObjectName("delhbox|{}".format(self.countbtn))
new_del_btn = QtWidgets.QPushButton("X")
new_del_btn.setObjectName("delbtn|{}".format(self.countbtn))
new_img = QtWidgets.QLabel()
#Getting img and create img
#getting url
track_url = "http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json".format(song_info[1],song_info[0])
track_val = requests.get(track_url).json()
url = track_val['track']['album']['image'][1]['#text']
if url == '':
new_img.setPixmap(QtGui.QPixmap('music.ico').scaled(64, 64, QtCore.Qt.KeepAspectRatio))
else:
data = urllib.request.urlopen(url).read()
image = QtGui.QImage()
image.loadFromData(data)
new_img.setPixmap(QtGui.QPixmap(image))
#Add items to layout & setting to v_box_2
self.new_h_box.addWidget(new_img)
self.new_h_box.addWidget(self.add_label)
self.new_h_box.addStretch()
self.new_h_box.addWidget(new_del_btn)
self.v_box_2.addLayout(self.new_h_box)
self.countbtn += 1
new_del_btn.clicked.connect(self.del_selected_item)
self.add_song_field.clear()
def del_selected_item(self):
btn = self.sender().objectName()
number = btn.split("|")[1]
layout = self.findChild(QtCore.QObject, "delhbox|{}".format(number))
song_name = layout.itemAt(1).widget().objectName()
self.cursor.execute("DELETE FROM musiclist WHERE song_name = ?",(song_name,))
self.connection.commit()
self.clearLayout(layout)
def event_save_list_to_txt(self):
self.cursor.execute("SELECT * FROM musiclist")
current_songs = self.cursor.fetchall()
if len(current_songs) == 0:
msg_box = QtWidgets.QMessageBox.warning(self, 'Empty List!', "Empty List!", QtWidgets.QMessageBox.Ok)
else:
save_to_where = QtWidgets.QFileDialog.getSaveFileName(self, "Save Your List to ?", os.getenv("HOME"),"Text Files (*.txt)")
try:
with open(save_to_where[0],"w",encoding = "utf-8") as file:
for i in current_songs:
add_this = song(i[0],i[1],i[2])
file.write(str(add_this) + '\n')
msg_box = QtWidgets.QMessageBox.warning(self, 'List Saved !', "List Saved !", QtWidgets.QMessageBox.Ok)
except FileNotFoundError:
pass
def event_delete_list(self):
self.clearLayout(self.v_box_2)
self.countbtn = 0
self.cursor.execute("DELETE FROM musiclist")
def clearLayout(self, layout):
if layout is not None:
while layout.count():
item = layout.takeAt(0)
widget = item.widget()
if widget is not None:
widget.deleteLater()
else:
self.clearLayout(item.layout())
# app = QtWidgets.QApplication(sys.argv)
# ui = App_Musiclist()
# sys.exit(app.exec_())
| 2.96875 | 3 |
cfg.py | CharlesPikachu/DeepDream | 15 | 12793023 | '''config file'''
MEANS = [0.485, 0.456, 0.406]
STDS = [0.229, 0.224, 0.225]
MAX_JITTER = 32
MAX_ITERS = 50
LEARNING_RATE = 2e-2
NUM_OCTAVES = 6
OCTAVE_SCALE = 1.4
SAVE_INTERVAL = 10
SAVEDIR = 'results'
| 1.164063 | 1 |
tensorbay/dataset/dataset.py | YiweiLi4/tensorbay-python-sdk | 0 | 12793024 | <reponame>YiweiLi4/tensorbay-python-sdk
#!/usr/bin/env python3
#
# Copyright 2021 Graviti. Licensed under MIT License.
#
"""Notes, DatasetBase, Dataset and FusionDataset.
:class:`Notes` contains the basic information of a :class:`DatasetBase`.
:class:`DatasetBase` defines the basic concept of a dataset,
which is the top-level structure to handle your data files, labels and other additional information.
It represents a whole dataset contains several segments
and is the base class of :class:`Dataset` and :class:`FusionDataset`.
:class:`Dataset` is made up of data collected from only one sensor
or data without sensor information.
It consists of a list of :class:`~tensorbay.dataset.segment.Segment`.
:class:`FusionDataset` is made up of data collected from multiple sensors.
It consists of a list of :class:`~tensorbay.dataset.segment.FusionSegment`.
"""
import json
from typing import Any, Dict, Iterable, KeysView, Optional, Sequence, Type, TypeVar, Union, overload
from ..label import Catalog
from ..utility import EqMixin, NameMixin, NameSortedList, ReprMixin, ReprType, common_loads
from .segment import FusionSegment, Segment
_T = TypeVar("_T", FusionSegment, Segment)
class Notes(ReprMixin, EqMixin):
"""This is a class stores the basic information of :class:`DatasetBase`.
Arguments:
is_continuous: Whether the data inside the dataset is time-continuous.
bin_point_cloud_fields: The field names of the bin point cloud files in the dataset.
"""
_T = TypeVar("_T", bound="Notes")
_repr_attrs = ("is_continuous", "bin_point_cloud_fields")
def __init__(
self, is_continuous: bool = False, bin_point_cloud_fields: Optional[Iterable[str]] = None
) -> None:
self.is_continuous = is_continuous
self.bin_point_cloud_fields = bin_point_cloud_fields
def __getitem__(self, key: str) -> Any:
try:
return getattr(self, key)
except AttributeError as error:
raise KeyError(key) from error
def _loads(self, contents: Dict[str, Any]) -> None:
self.is_continuous = contents["isContinuous"]
self.bin_point_cloud_fields = contents.get("binPointCloudFields")
@classmethod
def loads(cls: Type[_T], contents: Dict[str, Any]) -> _T:
"""Loads a :class:`Notes` instance from the given contents.
Arguments:
contents: The given dict containing the dataset notes::
{
"isContinuous": <boolean>
"binPointCloudFields": [ <array> or null
<field_name>, <str>
...
]
}
Returns:
The loaded :class:`Notes` instance.
"""
return common_loads(cls, contents)
def keys(self) -> KeysView[str]:
"""Return the valid keys within the notes.
Returns:
The valid keys within the notes.
"""
return KeysView(self._repr_attrs) # type: ignore[arg-type]
def dumps(self) -> Dict[str, Any]:
"""Dumps the notes into a dict.
Returns:
A dict containing all the information of the Notes::
{
"isContinuous": <boolean>
"binPointCloudFields": [ <array> or null
<field_name>, <str>
...
]
}
"""
contents: Dict[str, Any] = {"isContinuous": self.is_continuous}
if self.bin_point_cloud_fields:
contents["binPointCloudFields"] = self.bin_point_cloud_fields
return contents
class DatasetBase(NameMixin, Sequence[_T]): # pylint: disable=too-many-ancestors
"""This class defines the concept of a basic dataset.
DatasetBase represents a whole dataset contains several segments
and is the base class of :class:`Dataset` and :class:`FusionDataset`.
A dataset with labels should contain a :class:`~tensorbay.label.catalog.Catalog`
indicating all the possible values of the labels.
Arguments:
name: The name of the dataset.
Attributes:
catalog: The :class:`~tensorbay.label.catalog.Catalog` of the dataset.
notes: The :class:`Notes` of the dataset.
"""
_repr_type = ReprType.SEQUENCE
def __init__(self, name: str) -> None:
super().__init__(name)
self._segments: NameSortedList[_T] = NameSortedList()
self._catalog: Catalog = Catalog()
self._notes = Notes()
def __len__(self) -> int:
return self._segments.__len__()
@overload
def __getitem__(self, index: int) -> _T:
...
@overload
def __getitem__(self, index: slice) -> Sequence[_T]:
...
def __getitem__(self, index: Union[int, slice]) -> Union[Sequence[_T], _T]:
return self._segments.__getitem__(index)
@property
def catalog(self) -> Catalog:
"""Return the catalog of the dataset.
Returns:
The :class:`~tensorbay.label.catalog.Catalog` of the dataset.
"""
return self._catalog
@property
def notes(self) -> Notes:
"""Return the notes of the dataset.
Returns:
The class:`Notes` of the dataset.
"""
return self._notes
def load_catalog(self, filepath: str) -> None:
"""Load catalog from a json file.
Arguments:
filepath: The path of the json file which contains the catalog information.
"""
with open(filepath, "r") as fp:
contents = json.load(fp)
self._catalog = Catalog.loads(contents)
def get_segment_by_name(self, name: str) -> _T:
"""Return the segment corresponding to the given name.
Arguments:
name: The name of the request segment.
Returns:
The segment which matches the input name.
"""
return self._segments.get_from_name(name)
def add_segment(self, segment: _T) -> None:
"""Add a segment to the dataset.
Arguments:
segment: The segment to be added.
"""
self._segments.add(segment)
class Dataset(DatasetBase[Segment]):
"""This class defines the concept of dataset.
Dataset is made up of data collected from only one sensor or data without sensor information.
It consists of a list of :class:`~tensorbay.dataset.segment.Segment`.
"""
def create_segment(self, segment_name: str = "") -> Segment:
"""Create a segment with the given name.
Arguments:
segment_name: The name of the segment to create, which default value is an empty string.
Returns:
The created :class:`~tensorbay.dataset.segment.Segment`.
"""
segment = Segment(segment_name)
self._segments.add(segment)
return segment
class FusionDataset(DatasetBase[FusionSegment]):
"""This class defines the concept of fusion dataset.
FusionDataset is made up of data collected from multiple sensors.
It consists of a list of :class:`~tensorbay.dataset.segment.FusionSegment`.
"""
def create_segment(self, segment_name: str = "") -> FusionSegment:
"""Create a fusion segment with the given name.
Arguments:
segment_name: The name of the fusion segment to create,
which default value is an empty string.
Returns:
The created :class:`~tensorbay.dataset.segment.FusionSegment`.
"""
segment = FusionSegment(segment_name)
self._segments.add(segment)
return segment
| 2.3125 | 2 |
osdu_commons/services/delivery_service.py | binderjoe/sdu-commons | 0 | 12793025 | <gh_stars>0
import logging
import time
from functools import partial
from itertools import islice
from typing import List, Optional, Iterable
import attr
from attr.validators import instance_of, optional
from pampy import match
from osdu_commons.clients.delivery_client import DeliveryClient, GetResourcesResponseSuccess, \
GetResourcesResponseNotFound, GetResourcesResultItem
from osdu_commons.model.aws import S3Location
from osdu_commons.utils import convert
from osdu_commons.utils.srn import SRN
from osdu_commons.utils.validators import list_of
logger = logging.getLogger(__name__)
MAX_RESOURCES_FETCHING_ATTEMPTS = 5
@attr.s(frozen=True)
class DeliveredResource:
srn: SRN = attr.ib(validator=instance_of(SRN), converter=convert.srn)
exists: bool = attr.ib(validator=instance_of(bool))
data: Optional[dict] = attr.ib(validator=optional(instance_of(dict)), default=None)
s3_location: Optional[S3Location] = attr.ib(validator=optional(instance_of(S3Location)), default=None)
temporary_credentials: Optional[dict] = attr.ib(validator=optional(instance_of(dict)), default=None)
@classmethod
def from_json(cls, json_object, credentials, exists=True):
return cls(
srn=json_object['SRN'],
data=json_object.get('Data'),
s3_location=json_object.get('S3Location'),
temporary_credentials=credentials,
exists=exists
)
@classmethod
def from_get_resource_result_item(cls, item: GetResourcesResultItem, credentials, exists=True):
return cls(
srn=item.srn,
data=item.data,
s3_location=item.s3_location,
temporary_credentials=credentials,
exists=exists,
)
@attr.s(frozen=True)
class DeliveredResponse:
delivery_resources: List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource)))
not_found_resources: List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource)))
unprocessed_srn: List[SRN] = attr.ib(validator=list_of(instance_of(SRN)))
class DeliveryServiceException(object):
pass
class DeliveryService:
MAX_GET_RESOURCES_BATCH_SIZE = 100
def __init__(self, delivery_client: DeliveryClient):
self._delivery_client = delivery_client
def get_resources(self, resource_ids: Iterable[SRN]) -> Iterable[DeliveredResource]:
resource_ids = iter(resource_ids)
srns_to_fetch = list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE))
while len(srns_to_fetch) > 0:
yield from self.get_resources_batch_unordered(srns_to_fetch)
srns_to_fetch = list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE))
def get_resources_batch_unordered(self, resource_ids: List[SRN]) -> Iterable[DeliveredResource]:
srns_to_fetch = set(resource_ids)
for i in range(MAX_RESOURCES_FETCHING_ATTEMPTS):
delivered_response = self.get_resources_batch_unordered_response(srns_to_fetch)
yield from delivered_response.delivery_resources
yield from delivered_response.not_found_resources
srns_to_fetch = delivered_response.unprocessed_srn
if len(srns_to_fetch) == 0:
break
logger.debug(f'Unprocessed srns: {delivered_response.unprocessed_srn} after {i} attempt')
if i < MAX_RESOURCES_FETCHING_ATTEMPTS - 1:
time.sleep(2 ** i)
if len(srns_to_fetch) > 0:
raise Exception(f'Cannot fetch srns: {srns_to_fetch}')
def get_resources_batch_unordered_response(self, resource_ids: Iterable[SRN]) -> DeliveredResponse:
srns_to_fetch = list(resource_ids)
get_resources_response = self._delivery_client.get_resources(srns_to_fetch)
return match(
get_resources_response,
GetResourcesResponseSuccess, self.handle_get_resources_success,
GetResourcesResponseNotFound, partial(self.handle_get_resources_not_found, srn_to_fetch=srns_to_fetch),
)
@staticmethod
def handle_get_resources_success(get_resources_response: GetResourcesResponseSuccess) -> DeliveredResponse:
result = get_resources_response.result
credentials = get_resources_response.temporary_credentials
delivery_resources = [
DeliveredResource.from_get_resource_result_item(res_item, credentials) for res_item in result]
return DeliveredResponse(
delivery_resources=delivery_resources,
not_found_resources=[],
unprocessed_srn=get_resources_response.unprocessed_srn
)
@staticmethod
def handle_get_resources_not_found(get_resources_response: GetResourcesResponseNotFound,
srn_to_fetch: List[SRN]) -> DeliveredResponse:
not_found_srns = get_resources_response.not_found_resource_ids
not_found_delivery_resources = [DeliveredResource(srn=srn, exists=False) for srn in not_found_srns]
unprocessed_srn = list(set(srn_to_fetch) - set(not_found_srns))
return DeliveredResponse(
delivery_resources=[],
not_found_resources=not_found_delivery_resources,
unprocessed_srn=unprocessed_srn
)
def get_resource(self, resource_id: SRN) -> DeliveredResource:
get_resources_result = list(self.get_resources([resource_id]))
assert len(get_resources_result) == 1
return get_resources_result[0]
def check_if_resources_exist(self, resource_ids: Iterable[SRN]) -> bool:
resources = self.get_resources(resource_ids)
return all(resource.exists for resource in resources)
def get_components_of_type(self, resource_id: SRN, component_type: str) -> Iterable[DeliveredResource]:
resource = self.get_resource(resource_id)
components_ids = [SRN.from_string(item) for item in resource.data['GroupTypeProperties']['Components']]
components_ids_with_requested_type = [
component_id for component_id in components_ids if component_id.type == component_type
]
return self.get_resources(components_ids_with_requested_type)
def get_component_of_type(self, resource_id: SRN, component_type: str) -> DeliveredResource:
get_components_of_type_result = list(self.get_components_of_type(resource_id, component_type))
assert len(get_components_of_type_result) == 1
return get_components_of_type_result[0]
| 1.953125 | 2 |
src/bxgateway/messages/ont/ont_message_factory.py | doubleukay/bxgateway | 21 | 12793026 | <filename>src/bxgateway/messages/ont/ont_message_factory.py
from typing import Type
from bxcommon.messages.abstract_message import AbstractMessage
from bxcommon.messages.abstract_message_factory import AbstractMessageFactory
from bxgateway.messages.ont.addr_ont_message import AddrOntMessage
from bxgateway.messages.ont.block_ont_message import BlockOntMessage
from bxgateway.messages.ont.consensus_ont_message import OntConsensusMessage
from bxgateway.messages.ont.get_addr_ont_message import GetAddrOntMessage
from bxgateway.messages.ont.get_blocks_ont_message import GetBlocksOntMessage
from bxgateway.messages.ont.get_data_ont_message import GetDataOntMessage
from bxgateway.messages.ont.get_headers_ont_message import GetHeadersOntMessage
from bxgateway.messages.ont.headers_ont_message import HeadersOntMessage
from bxgateway.messages.ont.inventory_ont_message import InvOntMessage
from bxgateway.messages.ont.notfound_ont_message import NotFoundOntMessage
from bxgateway.messages.ont.ont_message import OntMessage
from bxgateway.messages.ont.ont_message_type import OntMessageType
from bxgateway.messages.ont.ping_ont_message import PingOntMessage
from bxgateway.messages.ont.pong_ont_message import PongOntMessage
from bxgateway.messages.ont.tx_ont_message import TxOntMessage
from bxgateway.messages.ont.ver_ack_ont_message import VerAckOntMessage
from bxgateway.messages.ont.version_ont_message import VersionOntMessage
class _OntMessageFactory(AbstractMessageFactory):
_MESSAGE_TYPE_MAPPING = {
OntMessageType.VERSION: VersionOntMessage,
OntMessageType.VERACK: VerAckOntMessage,
OntMessageType.GET_ADDRESS: GetAddrOntMessage,
OntMessageType.ADDRESS: AddrOntMessage,
OntMessageType.PING: PingOntMessage,
OntMessageType.PONG: PongOntMessage,
OntMessageType.CONSENSUS: OntConsensusMessage,
OntMessageType.INVENTORY: InvOntMessage,
OntMessageType.GET_DATA: GetDataOntMessage,
OntMessageType.GET_HEADERS: GetHeadersOntMessage,
OntMessageType.GET_BLOCKS: GetBlocksOntMessage,
OntMessageType.BLOCK: BlockOntMessage,
OntMessageType.HEADERS: HeadersOntMessage,
OntMessageType.TRANSACTIONS: TxOntMessage,
OntMessageType.NOT_FOUND: NotFoundOntMessage
}
def __init__(self):
super(_OntMessageFactory, self).__init__()
self.message_type_mapping = self._MESSAGE_TYPE_MAPPING
def get_base_message_type(self) -> Type[AbstractMessage]:
return OntMessage
ont_message_factory = _OntMessageFactory()
| 1.421875 | 1 |
src/modules/resnet18.py | lkm2835/ELimNet | 6 | 12793027 | <gh_stars>1-10
import torch
from torch import nn as nn
from src.modules.base_generator import GeneratorAbstract
from torchvision import models
class Resnet18(nn.Module):
def __init__(self, in_channel: int, out_channel: int, pretrained: bool):
"""
Args:
in_channel: input channels.
out_channel: output channels.
"""
super().__init__()
self.out_channel = out_channel
self.model = models.resnet18(pretrained=pretrained)
del self.model.fc
del self.model.avgpool
if self.out_channel == 512:
pass
elif self.out_channel == 256:
del self.model.layer4
elif self.out_channel == 128:
del self.model.layer4
del self.model.layer3
elif self.out_channel == 64:
del self.model.layer4
del self.model.layer3
del self.model.layer2
else:
raise Exception("out_channel: 512, 256, 128 or 64")
def forward(self,x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
if self.out_channel >= 128:
x = self.model.layer2(x)
if self.out_channel >= 256:
x = self.model.layer3(x)
if self.out_channel >= 512:
x = self.model.layer4(x)
return x
class Resnet18Generator(GeneratorAbstract):
""" Resnet18 (torchvision.models) module generator for parsing."""
def __init__(self, *args, **kwargs):
"""Initailize."""
super().__init__(*args, **kwargs)
@property
def out_channel(self) -> int:
"""Get out channel size."""
return self.args[0]
def __call__(self, repeat: int = 1):
# TODO: Apply repeat
pretrained = self.args[1] if len(self.args) > 1 else True
return self._get_module(
Resnet18(self.in_channel, self.out_channel, pretrained=pretrained)
) | 2.328125 | 2 |
src/adorn/data/parameter.py | pyadorn/adorn | 3 | 12793028 | <gh_stars>1-10
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""State from the constructor for a parameter from the constructor."""
from typing import Any
from typing import Dict
from typing import TYPE_CHECKING
if TYPE_CHECKING: # pragma: no cover
from adorn.unit.complex import _UnitT
class Parameter:
"""State for a parameter from a given :class:`~adorn.data.constructor.Constructor`
``Parameter`` allows for information outside of its ``obj``, to be utilized,
to perform some action. Examples of constructs that utilize the information
contained in ``Parameter`` include:
- :class:`~adorn.unit.parameter_value.DependentTypeCheck`
- :class:`~adorn.unit.parameter_value.DependentFromObj`
- :class:`~adorn.unit.parameter_value.DependentUnion`
Attributes:
cls (_UnitT): the type of the underlying parameter
parent (_UnitT): The type of object whose constructor requires
an object of type ``cls``
local_state (Dict[str, Any]): Information about other arguments
provided to the given constructor
parameter_name (str): name of the parameter in the constructor
"""
def __init__(
self,
cls: "_UnitT",
parent: "_UnitT",
local_state: Dict[str, Any],
parameter_name: str,
):
self.cls = cls
self.parent = parent
self.local_state = local_state
self.parameter_name = parameter_name
self.origin = getattr(self.cls, "__origin__", None)
self.args = getattr(self.cls, "__args__", None)
def __eq__(self, o: object) -> bool: # noqa: C901
# Literal.__eq__ doesn't support dict's so
# we force the check
# prevent circular import
from adorn.unit.parameter_value import Dependent
if not isinstance(o, Parameter):
return False
normal_args = all(
i
for i in [
self.parent == o.parent,
self.local_state == o.local_state,
self.parameter_name == o.parameter_name,
]
)
if not normal_args:
return False
eq_cls = False
if issubclass(getattr(self.cls, "__origin__", int), Dependent):
eq_cls = str(self.cls) == str(o.cls)
else:
eq_cls = self.cls == o.cls
return eq_cls
def __str__(self):
return str(self.cls)
| 2.578125 | 3 |
fibonacci.py | shiny13/algorithm-problems-python | 1 | 12793029 | #use python3
import sys
def getNthFib(n):
if n == 2:
return 1
elif n == 1:
return 0
previous = 0
current = 1
for _ in range(n-2):
previous, current = current, previous + current
return current
if __name__ == '__main__':
# Expected only one integer as input
input = sys.stdin.read()
n = int(input)
print(getNthFib(n))
| 3.953125 | 4 |
examples/diffraction/hole_crossed_grating.py | benvial/gyptis | 0 | 12793030 | <filename>examples/diffraction/hole_crossed_grating.py
# -*- coding: utf-8 -*-
"""
3D Checkerboard Grating
=======================
Example of a dielectric bi-periodic diffraction grating.
"""
# sphinx_gallery_thumbnail_number = 2
from collections import OrderedDict
import matplotlib.pyplot as plt
import numpy as np
import gyptis as gy
gy.dolfin.parameters["form_compiler"]["quadrature_degree"] = 5
# gy.dolfin.parameters["ghost_mode"] = "shared_facet"
gy.dolfin.set_log_level(7)
##############################################################################
# Structure is the same as in :cite:p:`Demesy2010`.
#
# The units of lengths are in nanometers here, and we first define some
# geometrical and optical parameters:
lambda0 = 0.5
dx = dy = 1 # 5 * lambda0 * 2 ** 0.5 / 4 # periods of the grating
h = 0.05
theta0 = 0
phi0 = 0
psi0 = gy.pi / 4
eps_diel = 2.25
eps_layer = 0.8125 - 5.2500j
##############################################################################
# The thicknesses of the different layers are specified with an
# ``OrderedDict`` object **from bottom to top**:
thicknesses = OrderedDict(
{
"pml_bottom": lambda0,
"substrate": lambda0 / 1,
"groove": 1 * h,
"superstrate": lambda0 / 1,
"pml_top": lambda0,
}
)
##############################################################################
# Here we set the mesh refinement parameters, in order to be able to have
# ``parmesh`` cells per wavelength of the field inside each subdomain
degree = 2
pmesh = 3
pmesh_hole = pmesh * 1
mesh_param = dict(
{
"pml_bottom": 1 * pmesh * eps_diel ** 0.5,
"substrate": pmesh * eps_diel ** 0.5,
"groove": pmesh * abs(eps_layer) ** 0.5,
"hole": pmesh_hole,
"superstrate": pmesh,
"pml_top": 1 * pmesh,
}
)
##############################################################################
# Let's create the geometry using the :class:`~gyptis.Layered`
# class:
geom = gy.Layered(3, (dx, dy), thicknesses)
z0 = geom.z_position["groove"] # + h/10
# l_pillar = 0.9 * dx * 2 ** 0.5 / 2
R_hole = 0.25
hole = geom.add_cylinder(0, 0, z0, 0, 0, h, R_hole)
# pillar = geom.add_box(-l_pillar / 2, -l_pillar / 2, z0, l_pillar, l_pillar, h)
# geom.rotate(pillar, (0, 0, 0), (0, 0, 1), np.pi / 4)
groove = geom.layers["groove"]
sub = geom.layers["substrate"]
sup = geom.layers["superstrate"]
sub, sup, hole, groove = geom.fragment([sub, sup, groove], hole)
geom.add_physical(hole, "hole")
geom.add_physical(groove, "groove")
geom.add_physical(sub, "substrate")
geom.add_physical(sup, "superstrate")
mesh_size = {d: lambda0 / param for d, param in mesh_param.items()}
geom.set_mesh_size(mesh_size)
# geom.remove_all_duplicates()
geom.build(interactive=0)
# geom.build(interactive=1)
######################################################################
# Set the permittivity and permeabilities for the various domains
# using a dictionary:
epsilon = {d: 1 for d in geom.domains}
mu = {d: 1 for d in geom.domains}
epsilon["groove"] = eps_layer
# epsilon["groove"] = eps_diel
epsilon["hole"] = 1
epsilon["substrate"] = eps_diel
######################################################################
# Now we can create an instance of the simulation class
# :class:`~gyptis.Grating`,
pw = gy.PlaneWave(lambda0, (theta0, phi0, psi0), dim=3, degree=degree)
grating = gy.Grating(geom, epsilon, mu, source=pw, degree=degree, periodic_map_tol=1e-8)
# pp = gy.utils.project_iterative(pw.expression,grating.formulation.real_function_space)
# gy.dolfin.File("test.pvd") << pp.real
# us = grating.formulation.annex_field["as_subdomain"]["stack"]
# pp = gy.utils.project_iterative(us,grating.formulation.real_function_space)
#
# gy.dolfin.File("test.pvd") << pp.real
#
# import os
# os.system("paraview test.pvd")
# xsx
grating.solve()
effs = grating.diffraction_efficiencies(2, orders=True)
print(effs)
xssx
E = grating.solution["total"]
# E = grating.solution["diffracted"]
# E = grating.solution["periodic"]
# E = grating.formulation.annex_field["as_subdomain"]["stack"]
pp = gy.utils.project_iterative(E, grating.formulation.real_function_space)
# Vplot = gy.dolfin.FunctionSpace(geom.mesh,"CG",degree)
#
# E = grating.formulation.phasor
# pp = gy.utils.project_iterative(E, Vplot)
gy.dolfin.File("test.pvd") << pp.real
import os
os.system("paraview test.pvd")
xs
### reference
T_ref = dict(TM=[0.2070, 1.0001], TE=[0.8187, 1.0001])
T = 0.04308
T = 0.12860
T = 0.06196
T = 0.12860
T = 0.17486
T = 0.12860
T = 0.06196
T = 0.12860
T = 0.04308
fmm = {}
print("Transmission coefficient")
print(" order ref calc")
print("--------------------------------")
print(f" 0 {T_ref['TM'][0]:.4f} {effs_TE['T'][1]:.4f} ")
print(f" sum {T_ref['TM'][1]:.4f} {effs_TE['B']:.4f} ")
######################################################################
# We switch to TE polarization
gratingTM = gy.Grating(geom, epsilon, mu, source=pw, polarization="TE", degree=2)
gratingTM.solve()
effs_TM = gratingTM.diffraction_efficiencies(1, orders=True)
H = gratingTM.solution["total"]
######################################################################
# Let's visualize the fields
fig, ax = plt.subplots(1, 2)
ylim = geom.y_position["substrate"], geom.y_position["pml_top"]
gratingTE.plot_field(ax=ax[0])
gratingTE.plot_geometry(ax=ax[0])
ax[0].set_ylim(ylim)
ax[0].set_axis_off()
ax[0].set_title("$E_z$ (TM)")
gratingTM.plot_field(ax=ax[1])
gratingTM.plot_geometry(ax=ax[1])
ax[1].set_ylim(ylim)
ax[1].set_axis_off()
ax[1].set_title("$H_z$ (TE)")
fig.tight_layout()
fig.show()
######################################################################
# Results are in good agreement with the reference
print("Transmission coefficient")
print(" order ref calc")
print("--------------------------------")
print(f" 0 {T_ref['TE'][0]:.4f} {effs_TM['T'][1]:.4f} ")
print(f" sum {T_ref['TE'][1]:.4f} {effs_TM['B']:.4f} ")
| 2.53125 | 3 |
pyteslaapi/__main__.py | tyamell/pytesla | 2 | 12793031 | <filename>pyteslaapi/__main__.py
#!/usr/bin/python
"""
Tesla CLI
"""
from client import TeslaApiClient
from exceptions import TeslaException
import logging
_LOGGER = logging.getLogger('pyteslaapi_cli')
def setup_logging(log_level=logging.INFO):
"""Set up the logging."""
logging.basicConfig(level=log_level)
fmt = ("%(asctime)s %(levelname)s (%(threadName)s) "
"[%(name)s] %(message)s")
colorfmt = "%(log_color)s{}%(reset)s".format(fmt)
datefmt = '%Y-%m-%d %H:%M:%S'
logging.getLogger('requests').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
try:
from colorlog import ColoredFormatter
logging.getLogger().handlers[0].setFormatter(ColoredFormatter(
colorfmt,
datefmt=datefmt,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
))
except ImportError:
pass
logger = logging.getLogger('')
logger.setLevel(log_level)
def call():
"""Execute command line helper."""
log_level = logging.DEBUG
setup_logging(log_level)
try:
api = TeslaApiClient("<EMAIL>","T1aB2stsp!ez142215")
vehicles = api.vehicles
for v in vehicles:
if not v.wake_up():
_LOGGER.error("Unable to wake up vehicle")
continue
v.update()
_LOGGER.info(v.drive.attributes)
_LOGGER.info(v.climate.attributes)
_LOGGER.info(v.charge.attributes)
_LOGGER.info(v.gui_settings.attributes)
_LOGGER.info(v.locked)
except TeslaException as exc:
_LOGGER.error(exc.message)
def main():
"""Execute from command line."""
call()
if __name__ == '__main__':
main()
| 2.359375 | 2 |
Organization/player.py | kkuba91/turnament_organizer | 1 | 12793032 | """player.py
Chess player class. All data related to Player,
which can b set at the begining and during the game.
"""
# Global package imports:
from datetime import date
# Local package imports:
CATEGORY = {
"male":{
"m": 2400,
"im": 2400,
"k++": 2300,
"k+": 2300,
"k": 2200,
"I++": 2100,
"I+": 2100,
"I": 2000,
"II+": 1900,
"II": 1800,
"III": 1600,
"IV": 1400,
"V": 1200,
"bk": 1000,
"wc": 1000
},
"female":{
"m": 2200,
"iwm": 2200,
"k++": 2100,
"k+": 2100,
"k": 2000,
"I++": 1900,
"I+": 1900,
"I": 1800,
"II+": 1700,
"II": 1600,
"III": 1400,
"IV": 1250,
"V": 1100,
"bk": 1000,
"wc": 1000
}
}
class Player(object):
def __init__(self) -> None:
# Static Player data:
self._name = ""
self._surname = ""
self._sex = ""
self._birth_date: date
self._city = ""
self._category = "bk"
self._elo = 0
self._rank = 1000
self._club = ""
# Dynamic Player data:
self._place = 0
self._idnt = 0
self._paused = False
self._points = 0.0
self._progress = 0.0
self._bucholz = 0.0
self._achieved_rank = 0
self._last_played_white = False
self._rounds = None
self._oponents = list()
self._results = list()
self._set = False # For setting round flag
self._round_done = False
def __repr__(self):
ret = f'\n#({self._idnt})'
ret += self.dump()
return ret
def set_name(self, name):
self._name = name
return self
def set_surname(self, surname):
self._surname = surname
return self
def set_sex(self, sex):
self._sex = sex
return self
def set_birthdate(self, birthdate):
self._birth_date = birthdate
return self
def set_city(self, city):
self._city = city
return self
def set_category(self, category):
self._category = category
return self
def set_elo(self, elo):
self._elo = elo
return self
def set_club(self, club):
self._club = club
return club
def get_name(self):
return self._name
def get_surname(self):
return self._surname
def get_by_ident(self, ident):
if ident == self._idnt:
return f'{self._surname} {self._name}'
else:
return None
def calculate_rank(self):
if self._elo > 0:
self._rank = self._elo
else:
self._rank = CATEGORY[self._sex][self._category]
return self
def exist(self, name, surname):
_exist = False
if self.get_name() == name and self.get_surname() == surname:
_exist = True
else:
_exist = False
return _exist
def dump(self):
_dump = f"\nPLAYER (#{self._idnt}): {self._name} {self._surname}\n"
_dump += f"Sex: {self._sex}\n"
# _dump += f"Birth: {self._birth_date}\n"
_dump += f"City: {self._city}\n"
_dump += f"Category: {self._category}\n"
_dump += f"Elo rating: {self._elo}\n"
_dump += f"Turnament rating: {self._rank}\n"
return _dump
| 3.125 | 3 |
chainer/utils/argument.py | takeratta/chainer | 2 | 12793033 | <filename>chainer/utils/argument.py
def check_unexpected_kwargs(kwargs, **unexpected):
for key, message in unexpected.items():
if key in kwargs:
raise ValueError(message)
def parse_kwargs(kwargs, *name_and_values):
values = [kwargs.pop(name, default_value)
for name, default_value in name_and_values]
if kwargs:
args = ', '.join(["'%s'" % arg for arg in kwargs.keys()])
raise TypeError('got unexpected keyword argument(s) %s' % args)
return tuple(values)
def assert_kwargs_empty(kwargs):
# It only checks if kwargs is empty.
parse_kwargs(kwargs)
| 3.0625 | 3 |
hfs_creation.py | karamarielynch/hfs-sim | 0 | 12793034 | <reponame>karamarielynch/hfs-sim
from math import sqrt
from math import factorial
from operator import *
from numpy import *
### Defining the DeltaJ function that will be used in Wigner6J
def DeltaJ(a, b, c):
Total = 0
while True:
if (a+b-c) < 0:
break
elif (a-b+c) < 0:
break
elif (-a+b+c) < 0:
break
elif (a+b+c+1) < 0:
break
Total = sqrt( float(factorial(a+b-c)*factorial(a-b+c)*factorial(-a+b+c)) / float(factorial(a+b+c+1)) )
#print "Total: ", Total
break
return Total
### Defining Wigner 6J function
def Wigner6J(j1, j2, j3, j4, j5, j6):
# Wigner 6J angular momentum coupling
# {j1 j2 j3} = {J_upper F_upper I}
# {j4 j5 j6} {F_lower J_lower 1}
Delta_Total = DeltaJ(j1, j2, j3)*DeltaJ(j1, j5, j6)*DeltaJ(j4, j2, j6)*DeltaJ(j4, j5, j3)
Wigner_Total = 0
z = 0
while True:
if (j1+j2+j4+j5-z) < 0:
break
elif (j2+j3+j5+j6-z) < 0:
break
elif (j3+j1+j6+j4-z) < 0:
break
while True:
if (z-j1-j2-j3) < 0:
break
elif (z-j1-j5-j6) < 0:
break
elif (z-j4-j2-j6) < 0:
break
elif (z-j4-j5-j3) < 0:
break
Wigner1 = float(factorial(z-j1-j2-j3))
Wigner2 = float(factorial(z-j1-j5-j6))
Wigner3 = float(factorial(z-j4-j2-j6))
Wigner4 = float(factorial(z-j4-j5-j3))
Wigner5 = float(factorial(j1+j2+j4+j5-z))
Wigner6 = float(factorial(j2+j3+j5+j6-z))
Wigner7 = float(factorial(j3+j1+j6+j4-z))
Wigner_Denominator = Wigner1*Wigner2*Wigner3*Wigner4*Wigner5*Wigner6*Wigner7
Wigner_Total = Wigner_Total + (float(-1)**z*factorial(float(z+1)))/Wigner_Denominator
z = z+1
if (j1+j2+j4+j5-z) <= 0:
break
elif (j2+j3+j5+j6-z) <= 0:
break
elif (j3+j1+j6+j4-z) <= 0:
break
z = z+1
Total = float(Delta_Total*Wigner_Total)
return Total
### Defining the Gaussian function for the HF spectrum
def Gaussian(x, HFS_frequency, FWHM, intensity):
return float(intensity)*exp(- 0.5*((HFS_frequency-x)/(FWHM/2.355))**2) # Gaussian function
### Defining the Lorentzian function for the HF spectrum
def Lorentzian(x, HFS_frequency, gamma, intensity):
return intensity*(gamma**2/((x-HFS_frequency)**2 + gamma**2)) # Lorentzian function
### Defining the Voigt function for the HF spectrum
def pseudoVoigt(x, HFS_frequency, FWHM, intensity, eta):
Gauss = exp(-0.6931*((x-HFS_frequency)/(FWHM/2))**2)
Lorentz = 1/(1+((x-HFS_frequency)/(FWHM/2))**2)
Voigt = eta*Lorentz + (1-eta)*Gauss
return intensity*Voigt # Voigt function
### Defining the Crystalball function
def Crystalball(x_array, x0, N, sigma, alpha, n):
y_array = []
for i in range(len(x_array)):
x = x_array[i]
t = (x-x0)/sigma
if (alpha < 0):
t = -t
if (t >= -abs(alpha)):
y = exp(-0.5*t*t)
else:
a = ((n/abs(alpha))**n)*exp(-0.5*abs(alpha)*abs(alpha))
b = n/abs(alpha) - abs(alpha)
y = a/(b - t)**n
y_array.append(N*y)
return array(y_array)
### Defining an exponential pseudoVoigt function
def expoVoigt(x_array, x0, intensity, FWHM, alpha, eta):
y_array = []
for i in range(len(x_array)):
x = x_array[i]
t = (x-x0)/FWHM
if (alpha < 0):
t = -t
if (t >= -abs(alpha)):
y = pseudoVoigt(x, x0, FWHM, intensity, eta)*exp(-0.5*t*t)
else:
y = pseudoVoigt(x, x0, FWHM, intensity, eta)
y_array.append(y)
return array(y_array)
### Defining the HF function which simulates the HF spectrum
def HF_function(I, J_lower, J_upper, centroid_frequency, A_lower, A_upper, B_lower, B_upper):
# Calculates the F values for a J_lower to J_upper transition
HFS_frequency = []; HF_intensity = []
F_lower_min = pos(I - J_lower)
F_lower_max = pos(I + J_lower)
F_upper_min = pos(I - J_upper)
F_upper_max = pos(I + J_upper)
while F_lower_min < (F_lower_max +1) :
F_upper_min = pos(I - J_upper)
while F_upper_min < (F_upper_max +1) :
F_lower = F_lower_min
F_upper = F_upper_min
F_delta = F_upper - F_lower
if (-1 <= F_delta <= 1):
K_lower = F_lower*(F_lower+1)-I*(I+1)-J_lower*(J_lower+1)
alpha_lower = K_lower/2
if I <= 0.5 :
beta_lower = 0
elif J_lower <= 0.5 :
beta_lower = 0
else:
beta_lower = (3*K_lower*(K_lower+1)-4*I*(I+1)*J_lower*(J_lower+1))/(8*I*(2*I-1)*J_lower*(2*J_lower-1))
K_upper = F_upper*(F_upper+1)-I*(I+1)-J_upper*(J_upper+1)
alpha_upper = K_upper/2
if I <= 0.5 :
beta_upper = 0
elif J_upper <= 0.5 :
beta_upper = 0
else:
beta_upper = (3*K_upper*(K_upper+1)-4*I*(I+1)*J_upper*(J_upper+1))/(8*I*(2*I-1)*J_upper*(2*J_upper-1))
HFS_frequency.append(centroid_frequency + alpha_upper*A_upper + beta_upper*B_upper - alpha_lower*A_lower - beta_lower*B_lower)
HF_intensity.append((2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1, J_upper, J_lower, I)**2)
F_upper_min = F_upper_min +1
F_lower_min = F_lower_min +1
return HFS_frequency, HF_intensity
### Defining the intensity for each of the HF peaks
def HF_intensity(I, J_lower, J_upper, F_lower, F_upper):
# Intensity ratio = (2F_lower+1)(2F_upper+1){F_lower F_upper 1}
# {J_upper J_lower I}
Intensity = (2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1, J_upper, J_lower, I)**2
if Intensity == 0:
print("Intensity = 0")
return Intensity
### Defining Doppler correction from lab frame to rest frame
def Doppler_correction(freq_range_lab, mass, iscool_voltage):
alpha = iscool_voltage/(mass*931.494061*10**6)
freq_range_rest = freq_range_lab*( 1 + alpha - sqrt(2*alpha + alpha*alpha))
return freq_range_rest
### Defining wavenumber to frequency conversion
def Frequency_conversion(wavenumber, mass, iscool_voltage, harmonic, frequency_correction):
frequency_rest_frame = array([]); frequency_lab_frame = array([])
c = 299792458.0 # Speed of light [ms-1] in a vacuum
e = 2.7182818284 # Maths constant
alpha = iscool_voltage/(mass*931.494061*10**6) # alpha = eV/mc*c - in units of e/c**2
# Convert from wavenumber to frequency in lab frame
frequency_lab_frame = harmonic*wavenumber*c/10**4 # Wavenumber doubled as reading taken at fundamental frequency (calculate in MHz)
# Convert frequency from lab frame to rest frame
frequency_rest_frame = frequency_lab_frame*( 1 + alpha - sqrt(2*alpha + alpha*alpha))
# Convert to relative frequency
frequency_relative = frequency_rest_frame - frequency_correction
return frequency_relative
### Defining the HF structure
def HFS(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper, FWHM, Int, Bkgnd, x):
for i in range(len(HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[0])):
HFS_frequency = HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[0][i]
intensity_1 = HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[1][i]
intensity_1 = intensity_1*Int
Bkgnd = Bkgnd + Gaussian(x, HFS_frequency, FWHM, intensity_1)
return Bkgnd
| 2.8125 | 3 |
get_styles.py | big-skim-milk/Quetzal | 2 | 12793035 | IFS = """
"""
def STYLES():
with open('style.qss') as main_styles:
with open('customizable_styles.qss') as custom_styles:
all_styles = main_styles.read() + IFS + custom_styles.read()
return all_styles
| 2.171875 | 2 |
graphs_trees/bst_validate/test_bst_validate.py | filippovitale/interactive-coding-challenges | 0 | 12793036 | <reponame>filippovitale/interactive-coding-challenges
from nose.tools import assert_equal
class TestBstValidate(object):
def test_bst_validate(self):
node = Node(5)
insert(node, 8)
insert(node, 5)
insert(node, 6)
insert(node, 4)
insert(node, 7)
assert_equal(validate_bst(node), True)
root = Node(5)
left = Node(5)
right = Node(8)
invalid = Node(20)
root.left = left
root.right = right
root.left.right = invalid
assert_equal(validate_bst(root), False)
print('Success: test_bst_validate')
def main():
test = TestBstValidate()
test.test_bst_validate()
if __name__ == '__main__':
main() | 3.5625 | 4 |
noxfile.py | tdmorello/imagecatalog | 0 | 12793037 | <reponame>tdmorello/imagecatalog
"""Nox sessions."""
import shutil
import sys
import tempfile
from pathlib import Path
import nox
from nox import Session, session
python_versions = ["3.10", "3.9", "3.8", "3.7"]
@nox.session(python=python_versions)
def tests(session: Session) -> None:
"""Run the test suite."""
_install_via_pip(session)
session.install("pytest", "pytest-cov", "xdoctest")
session.run("pytest")
@nox.session
def lint(session: Session) -> None:
"""Run linting."""
pass
@session(python=python_versions[0])
def docs(session: Session) -> None:
"""Build and serve the documentation with live reloading on file changes."""
args = session.posargs or [
"--open-browser",
"docs",
"docs/_build",
]
_install_via_pip(session)
session.install("-r", "docs/requirements.txt")
session.install("sphinx", "sphinx-autobuild")
build_dir = Path("docs", "_build")
if build_dir.exists():
shutil.rmtree(build_dir)
session.run("sphinx-autobuild", *args)
def _install_via_pip(session: Session) -> None:
with tempfile.NamedTemporaryFile() as requirements:
if sys.platform == "win32":
requirements_path = "requirements.txt"
else:
requirements_path = requirements.name
session.run(
"poetry",
"export",
"--without-hashes",
"-o",
requirements_path,
external=True,
)
session.install("-r", requirements_path)
session.install(".")
| 2.125 | 2 |
src/moodlesheet/contactsheet/__init__.py | fstwn/moodlesheet | 10 | 12793038 | # -*- coding: utf-8 -*-
"""Top-level package for contactsheet."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
| 0.941406 | 1 |
knot_a_rumor/story.py | rubyyot/knot-a-rumor | 0 | 12793039 | from os.path import join
import yaml
class Story:
def __init__(self, path):
self.path = path
self.author = None
self.title = None
self.scene = None
self.synopsis = None
def load(self):
f = open(self.filename(), 'r')
text = f.read()
f.close()
data = yaml.load(text)
if data == None:
return False
else:
self.__dict__ = data
return True
def filename(self):
return join(self.path, "story.yaml")
class Scene():
def __init__(self, path, state):
scene_file = state["current_scene"]
self.load(path, scene_file)
def load(self, path, scene_file):
f = open(self.filename(path, scene_file), 'r')
text = f.read()
f.close()
data = yaml.load(text)
if data == None:
return False
else:
self.__dict__ = data
self.__dict__["scene_map"] = self.__dict__["scene_map"].strip("\n")
return True
def filename(self, path, scene_file):
return join(path, "{0}.yaml".format(scene_file))
def disassemble_map(self):
rows = list(reversed(self.scene_map.split("\n")))
disassembled = []
for row in rows:
disassembled.append(list(row))
return disassembled
def reassemble_map(self, grid):
rows = []
for row in grid:
rows.append(''.join(row))
reassembled = "\n".join(list(reversed(rows)))
return reassembled
def build_map(self, state):
x = state["location"]["x"]
y = state["location"]["y"]
grid = self.disassemble_map()
grid[y][x] = "@"
if self.level in state["seen"]:
for item in self.items.values():
grid[item["y"]][item["x"]] = item["char"]
reassembled = self.reassemble_map(grid)
return reassembled
def valid_move(self, location, direction, times):
start_x = location["x"]
start_y = location["y"]
# validate direction and times
if not type(times) is int:
return False
if not type(direction) is str:
return False
if times < 1 or times > 100:
return False
if len(direction) > 1:
return False
if not direction in "nsew":
return False
# find new postion
x = start_x
y = start_y
rows = list(reversed(self.scene_map.split("\n")))
for i in range (0, times):
if direction == "n":
y += 1
elif direction == "s":
y -= 1
elif direction == "e":
x += 1
elif direction == "w":
x -= 1
if len(rows) <= y:
return False
if x < 0 or y < 0:
return False
tiles = list(rows[y])
if len(tiles) <= x:
return False
if tiles[x] != "#":
return False
return True
def view(self, location):
x = location["x"]
y = location["y"]
narration = None
for pview in self.views.values():
if pview["x"] == x and pview["y"] == y:
narration = pview["narration"]
return narration
def look(self, state):
seen = "\n".join(self.items.keys())
if self.level not in state["seen"]:
state["seen"].append(self.level)
return state, seen
def describe(self, state, char):
if self.level not in state["seen"]:
return state, None
items = self.items.values()
item_list = list(filter(lambda x: x["char"] == char, items))
if len(item_list) == 0:
return state, None
return state, item_list[0]["description"]
| 3.03125 | 3 |
pysite/mixins.py | schwartzadev/site | 0 | 12793040 | <reponame>schwartzadev/site<filename>pysite/mixins.py
# coding=utf-8
from flask import Blueprint
from rethinkdb.ast import Table
from _weakref import ref
from pysite.database import RethinkDB
class DBMixin():
"""
Mixin for classes that make use of RethinkDB. It can automatically create a table with the specified primary
key using the attributes set at class-level.
This class is intended to be mixed in alongside one of the other view classes. For example:
>>> class MyView(APIView, DBMixin):
... name = "my_view" # Flask internal name for this route
... path = "/my_view" # Actual URL path to reach this route
... table_name = "my_table" # Name of the table to create
... table_primary_key = "username" # Primary key to set for this table
This class will also work with Websockets:
>>> class MyWebsocket(WS, DBMixin):
... name = "my_websocket"
... path = "/my_websocket"
... table_name = "my_table"
... table_primary_key = "username"
You may omit `table_primary_key` and it will be defaulted to RethinkDB's default column - "id".
"""
table_name = "" # type: str
table_primary_key = "id" # type: str
@classmethod
def setup(cls: "DBMixin", manager: "pysite.route_manager.RouteManager", blueprint: Blueprint):
"""
Set up the view by creating the table specified by the class attributes - this will also deal with multiple
inheritance by calling `super().setup()` as appropriate.
:param manager: Instance of the current RouteManager (used to get a handle for the database object)
:param blueprint: Current Flask blueprint
"""
if hasattr(super(), "setup"):
super().setup(manager, blueprint) # pragma: no cover
if not cls.table_name:
raise RuntimeError("Routes using DBViewMixin must define `table_name`")
cls._db = ref(manager.db)
manager.db.create_table(cls.table_name, primary_key=cls.table_primary_key)
@property
def table(self) -> Table:
return self.db.query(self.table_name)
@property
def db(self) -> RethinkDB:
return self._db()
| 2.78125 | 3 |
python/AnalysisLaunchers/get_analysis.py | FabricGenomics/omicia_api_examples | 2 | 12793041 | <filename>python/AnalysisLaunchers/get_analysis.py<gh_stars>1-10
"""Get an analysis, or all analyses in the workspace.
Example usages:
python get_analysis.py --id 1802
python get_analysis.py
"""
import os
import requests
from requests.auth import HTTPBasicAuth
import sys
import simplejson as json
import argparse
# Load environment variables for request authentication parameters
if "FABRIC_API_PASSWORD" not in os.environ:
sys.exit("FABRIC_API_PASSWORD environment variable missing")
if "FABRIC_API_LOGIN" not in os.environ:
sys.exit("FABRIC_API_LOGIN environment variable missing")
FABRIC_API_LOGIN = os.environ['FABRIC_API_LOGIN']
FABRIC_API_PASSWORD = os.environ['FABRIC_API_PASSWORD']
FABRIC_API_URL = os.environ.get('FABRIC_API_URL', 'https://api.fabricgenomics.com')
auth = HTTPBasicAuth(FABRIC_API_LOGIN, FABRIC_API_PASSWORD)
def get_analysis(analysis_id=None, genome_id=None):
"""Use the Omicia API to get an analysis
"""
# Construct request
if analysis_id:
url = "{}/analysis/{}/"
url = url.format(FABRIC_API_URL, analysis_id)
else:
url = "{}/analysis"
url = url.format(FABRIC_API_URL, analysis_id)
if genome_id:
url = '{}?genome_id={}'.format(url, genome_id)
sys.stdout.flush()
result = requests.get(url, auth=auth)
return result.json()
def main():
"""Main function. Get analyses or one analysis by ID.
"""
parser = argparse.ArgumentParser(description='Fetch a Variant, VAAST or Phevor Report')
parser.add_argument('--id', metavar='analysis_id', type=int)
parser.add_argument('--genome_id', metavar='genome_id', type=int)
args = parser.parse_args()
analysis_id = args.id
genome_id = args.genome_id
json_response = get_analysis(analysis_id=analysis_id, genome_id=genome_id)
sys.stdout.write(json.dumps(json_response, indent=4))
if __name__ == "__main__":
main()
| 2.921875 | 3 |
hp_transfer_aa_experiments/run.py | hp-transfer/htaa_experiments | 1 | 12793042 | import contextlib
import logging
import logging.config
import random
import time
from pathlib import Path
import hp_transfer_benchmarks # pylint: disable=unused-import
import hp_transfer_optimizers # pylint: disable=unused-import
import hydra
import numpy as np
import yaml
from gitinfo import gitinfo
from hp_transfer_optimizers.core import nameserver as hpns
from hp_transfer_optimizers.core import result as result_utils
from hp_transfer_optimizers.core.worker import Worker
from omegaconf import OmegaConf
from hp_transfer_aa_experiments.analyse.read_results import get_batch_result_row
logger = logging.getLogger("hp_transfer_aa_experiments.run")
def _read_reference_losses(args):
reference_losses = None
if args.runtype.type.startswith("eval_reference"):
reference_losses_path = hydra.utils.to_absolute_path(args.reference_losses_path)
with Path(reference_losses_path).open("r") as stream:
reference_losses = yaml.safe_load(stream)
reference_losses = reference_losses[args.benchmark.name]
reference_losses = reference_losses[str(args.benchmark.benchmark.trajectory_id)]
reference_losses = reference_losses[str(args.benchmark.benchmark.adjustment_id)]
return reference_losses
def _get_trial_parameters(args, reference_losses, step):
if step == 1 and args.runtype.type in ["eval_dim", "eval_reference"]:
trials_per_task = args.runtype.dim_factor_pre_adjustment
else:
trials_per_task = args.runtype.dim_factor
logger.info(f"Using {trials_per_task} trials per task")
if step > 1 and args.runtype.type.startswith("eval_reference"):
trials_until_loss = reference_losses[step][f"{args.runtype.dim_factor}_loss"]
logger.info(
f"Also performing trials until loss {trials_until_loss :.4f}"
f" (max {10 * trials_per_task})"
)
else:
trials_until_loss = None
return trials_per_task, trials_until_loss
def _write_batch_result(args, result_batch):
batch_result_row = get_batch_result_row(
args.benchmark.name,
args.runtype.dim_factor_pre_adjustment,
args.approach.name,
args.benchmark.benchmark.trajectory_id,
args.benchmark.benchmark.adjustment_id,
args.run_id,
result_batch,
)
result_path = Path(
hydra.utils.to_absolute_path("results"),
args.experiment_group,
f"results/{args.experiment_name.replace('/', ',')}.csv",
)
result_path.parent.mkdir(exist_ok=True, parents=True)
with result_path.open("a") as result_stream:
result_stream.write("\t".join([str(value) for value in batch_result_row]) + "\n")
def _run_on_task_batch(
optimizer,
task_batch,
configspace,
step,
result_trajectory,
trials_per_task,
trials_until_loss,
args,
):
do_transfer = args.approach.name.startswith("transfer")
previous_results = result_trajectory if do_transfer else None
result_batch = result_utils.BatchResult(step, configspace)
for task in task_batch:
logger.info(f"Running on task {task.identifier}")
task_result = optimizer.run(
configspace=configspace,
task=task,
n_iterations=trials_per_task,
trials_until_loss=trials_until_loss,
previous_results=previous_results,
)
result_batch.insert(task_result, task)
if step > 1:
_write_batch_result(args, result_batch)
return result_batch
def _train_and_eval(optimizer, benchmark, args):
reference_losses = _read_reference_losses(args)
result_trajectory = result_utils.TrajectoryResult()
for step, (train_batch, configspace) in enumerate(
zip(benchmark.dev_trajectory, benchmark.configspace_trajectory), 1
):
if args.runtype.type == "reference" and step == 1:
continue
logger.info(f"Step ------- {step :04d}")
trials_per_task, trials_until_loss = _get_trial_parameters(
args, reference_losses, step
)
logger.info(f"Using configspace\n{configspace}".rstrip())
batch_result = _run_on_task_batch(
optimizer,
train_batch,
configspace,
step,
result_trajectory,
trials_per_task,
trials_until_loss,
args,
)
result_trajectory.insert(batch_result)
class _HPOWorker(Worker):
def __init__(self, benchmark, **kwargs):
super().__init__(**kwargs)
# Only read task once
self._benchmark = benchmark
self._previous_task_identifier = None
self._previous_development_stage = None
self._task = None
# pylint: disable=unused-argument
def compute(
self,
config_id,
config,
budget,
working_directory,
*args,
**kwargs,
):
task_identifier = kwargs["task_identifier"]
development_stage = kwargs["development_stage"]
task_changed = (
development_stage != self._previous_development_stage
or self._previous_task_identifier != task_identifier
)
if task_changed: # Only read task once
self._previous_task_identifier = task_identifier
self._previous_development_stage = development_stage
self._task = self._benchmark.get_task_from_identifier(
task_identifier, development_stage
)
if "development_step" in config:
del config["development_step"]
return self._task.evaluate(config)
def _run_worker(args, benchmark, working_directory):
time.sleep(5) # short artificial delay to make sure the nameserver is already running
host = hpns.nic_name_to_host(args.nic_name)
w = _HPOWorker(
benchmark,
run_id=args.run_id,
host=host,
logger=logging.getLogger("worker"),
)
w.load_nameserver_credentials(working_directory=str(working_directory))
w.run(background=False)
def _run_master(args, benchmark, working_directory):
nameserver = hpns.NameServer(
run_id=args.run_id,
working_directory=str(working_directory),
nic_name=args.nic_name,
)
ns_host, ns_port = nameserver.start()
# Start a background worker for the master node
w = _HPOWorker(
benchmark,
run_id=args.run_id,
host=ns_host,
nameserver=ns_host,
nameserver_port=ns_port,
logger=logging.getLogger("worker"),
)
w.run(background=True)
# Create an optimizer
optimizer = hydra.utils.instantiate(
args.approach.approach,
host=ns_host,
nameserver=ns_host,
nameserver_port=ns_port,
logger=logging.getLogger("master"),
)
# Train and evaluate the optimizer
try:
_train_and_eval(optimizer, benchmark, args)
finally:
optimizer.shutdown(shutdown_workers=True)
nameserver.shutdown()
def _set_seeds(seed):
random.seed(seed)
np.random.seed(seed)
# torch.backends.cudnn.benchmark = False
# torch.backends.cudnn.deterministic = True
# torch.manual_seed(seed)
# tf.random.set_seed(seed)
@hydra.main(config_path="configs", config_name="run")
def run(args):
_set_seeds(args.seed)
working_directory = Path().cwd()
# Log general information
logger.info(f"Using working_directory={working_directory}")
with contextlib.suppress(TypeError):
git_info = gitinfo.get_git_info()
logger.info(f"Commit hash: {git_info['commit']}")
logger.info(f"Commit date: {git_info['author_date']}")
logger.info(f"Arguments:\n{OmegaConf.to_yaml(args)}")
# Construct benchmark
if "data_path" in args.benchmark.benchmark:
args.benchmark.benchmark.data_path = hydra.utils.to_absolute_path(
args.benchmark.benchmark.data_path
)
benchmark = hydra.utils.instantiate(args.benchmark.benchmark)
# Actually run
if args.worker_id == 0:
_run_master(args, benchmark, working_directory)
else:
_run_worker(args, benchmark, working_directory)
logger.info(f"Run finished")
if __name__ == "__main__":
run() # pylint: disable=no-value-for-parameter
| 1.96875 | 2 |
process_ez.py | UrbanInstitute/nccs-public | 7 | 12793043 | from process_co_pc import *
import logging
# Code by <NAME> (<EMAIL>), 2016-2017
def ez_dup_criteria(dups):
dups['val'] = dups['TOTREV'].abs() + dups['ASS_EOY'].abs() + dups['EXPS'].abs()
return dups, ['FISYR', 'val', 'STYEAR', 'rnd']
class ProcessEZ(ProcessCOPC):
"""
Creates columns found only in the EZ dataframe
"""
def ez_calculate(self):
"""
Base method for calling all of the methods to calculate the columns for the 990 EZ form.
ARGUMENTS
None
RETURNS
None
"""
main = self.main
ez = main.data_dict['EZ']
main.logger.info('Calculating new columns for EZ.')
ez['TOTREV'] = self.ez_totrev(ez)
ez['GRREC'] = self.ez_grrec(ez)
ez['PROGREV'] = self.ez_progrev(ez)
ez['SPEVTG'] = self.ez_spevtg(ez)
ez['NETGNLS'] = self.ez_netgnls(ez)
ez['FILENAME'] = self.ez_filename(ez)
ez['EPOSTCARD'] = self.copc_epostcard(ez)
ez['STYEAR'] = self.copc_styear(ez)
ez['SOIYR'] = self.copc_soiyr(ez)
ez['SUBCD'] = self.copc_subcd(ez)
def ez_grrec(self, ez):
"""
Calculates the GRREC column. Note that the same column has a different calculation for EINs from
the Full 990 and EINS from the 990 EZ.
ARGUMENTS
ez (DataFrame) : Core file dataframe
RETURNS
Series
"""
assert(ez['TOTREV'].dtype.type in [np.int64, np.float64])
assert(ez['SALEOTHG'].dtype.type in [np.int64, np.float64])
assert(ez['DIREXP'].dtype.type in [np.int64, np.float64])
assert(ez['GOODS'].dtype.type in [np.int64, np.float64])
return ez['TOTREV'] + ez['SALEOTHG'] + ez['DIREXP'] + ez['GOODS']
def ez_progrev(self, ez):
"""
Calculates the PROGREV column.
ARGUMENTS
ez (DataFrame) : Core file dataframe
RETURNS
Series
"""
assert(ez['DUESASSESMNTS'].dtype.type in [np.int64, np.float64])
assert(ez['PRGMSERVREV'].dtype.type in [np.int64, np.float64])
return ez['DUESASSESMNTS'] + ez['PRGMSERVREV']
def ez_spevtg(self, ez):
"""
Calculates the SPEVTG column.
ARGUMENTS
ez (DataFrame) : Core file dataframe
RETURNS
Series
"""
assert(ez['GRSINCFNDRSNG'].dtype.type in [np.int64, np.float64])
assert(ez['GRSINCGAMING'].dtype.type in [np.int64, np.float64])
return ez['GRSINCFNDRSNG'] + ez['GRSINCGAMING']
def ez_totrev(self, ez):
"""
Calculates the PROGREV column. Note that TOTREV2 is taken from 990EZ part I, 9, while TOTREV
is calculated from the expense and income subtotals. This is the only column like this, and usually
any discrepencies between stated and calculated values are tested in the validation steps. However,
it was always done this way before, so it continues.
ARGUMENTS
ez (DataFrame) : Core file dataframe
RETURNS
Series
"""
return ez['EXPS'] + ez['NETINC']
def ez_netgnls(self, ez):
"""
Returns the SALEOTHN column exactly. Redundant holdover from the old SQL process.
ARGUMENTS
ez (DataFrame) : Core file dataframe
RETURNS
Series
"""
return ez['SALEOTHN']
def ez_filename(self, ez):
"""
Assembles the FILENAME column from the EIN and TAXPER columns, which is used to build the URL to
the PDF of the 990 filing on the Foundation Center's website. The full construction is:
http://990s.foundationcenter.org/990_pdf_archive/<FIRST THREE DIGITS OF EIN>/<FULL EIN>/<FILENAME>.pdf
for 990 Full or EZ filings, or
http://990s.foundationcenter.org/990pf_pdf_archive/<FIRST THREE DIGITS OF EIN>/<FULL EIN>/<FILENAME>.pdf
for 990 PF filings.
ARGUMENTS
ez (DataFrame) : Core file dataframe
RETURNS
Series
"""
return ez.index + '_' + ez['TAXPER'] + '_990EZ'
def ez_manual(self):
"""
Applies any manual, one-time fixes to the EZ data. This is usually defined as a change to a single
EIN from a single year, in a non-generalizable way, e.g. a mistyped EIN in the raw IRS data.
ARGUMENTS
None
RETURNS
None
"""
try:
entry = self.main.data_dict['EZ'].loc['580623603']
if entry['SOURCE'] == '16eofinextractez.dat' and entry['NAME'] == 'UNITED WAY OF THE COASTAL EMPIRE INC':
self.main.data_dict['EZ'].drop('580623603', inplace=True)
except KeyError:
pass
| 2.40625 | 2 |
elfinder/exceptions.py | vikifox/CMDB | 16 | 12793044 | <gh_stars>10-100
from django.utils.translation import ugettext as _
class ElfinderErrorMessages:
"""
Standard error message codes, the text message of which is handled by the
elFinder client
"""
ERROR_UNKNOWN = 'errUnknown'
ERROR_UNKNOWN_CMD = 'errUnknownCmd'
ERROR_CONF = 'errConf'
ERROR_CONF_NO_JSON = 'errJSON'
ERROR_CONF_NO_VOL = 'errNoVolumes'
ERROR_INV_PARAMS = 'errCmdParams'
ERROR_OPEN = 'errOpen'
ERROR_DIR_NOT_FOUND = 'errFolderNotFound'
ERROR_FILE_NOT_FOUND = 'errFileNotFound' #'File not found.'
ERROR_TRGDIR_NOT_FOUND = 'errTrgFolderNotFound' #'Target folder "$1" not found.'
ERROR_NOT_DIR = 'errNotFolder'
ERROR_NOT_FILE = 'errNotFile'
ERROR_PERM_DENIED = 'errPerm'
ERROR_LOCKED = 'errLocked' #'"$1" is locked and can not be renamed, moved or removed.'
ERROR_EXISTS = 'errExists' #'File named "$1" already exists.'
ERROR_INVALID_NAME = 'errInvName' #'Invalid file name.'
ERROR_MKDIR = 'errMkdir'
ERROR_MKFILE = 'errMkfile'
ERROR_RENAME = 'errRename'
ERROR_COPY = 'errCopy'
ERROR_MOVE = 'errMove'
ERROR_COPY_FROM = 'errCopyFrom'
ERROR_COPY_TO = 'errCopyTo'
ERROR_COPY_ITSELF = 'errCopyInItself'
ERROR_REPLACE = 'errReplace' #'Unable to replace "$1".'
ERROR_RM = 'errRm' #'Unable to remove "$1".'
ERROR_RM_SRC = 'errRmSrc' #'Unable remove source file(s)'
ERROR_UPLOAD = 'errUpload' #'Upload error.'
ERROR_UPLOAD_FILE = 'errUploadFile' #'Unable to upload "$1".'
ERROR_UPLOAD_NO_FILES = 'errUploadNoFiles' #'No files found for upload.'
ERROR_UPLOAD_TOTAL_SIZE = 'errUploadTotalSize' #'Data exceeds the maximum allowed size.'
ERROR_UPLOAD_FILE_SIZE = 'errUploadFileSize' #'File exceeds maximum allowed size.'
ERROR_UPLOAD_FILE_MIME = 'errUploadMime' #'File type not allowed.'
ERROR_UPLOAD_TRANSFER = 'errUploadTransfer' #'"$1" transfer error.'
ERROR_ACCESS_DENIED = 'errAccess'
ERROR_NOT_REPLACE = 'errNotReplace' #Object "$1" already exists at this location and can not be replaced with object of another type.
ERROR_SAVE = 'errSave'
ERROR_EXTRACT = 'errExtract'
ERROR_ARCHIVE = 'errArchive'
ERROR_NOT_ARCHIVE = 'errNoArchive'
ERROR_ARCHIVE_TYPE = 'errArcType'
ERROR_ARC_SYMLINKS = 'errArcSymlinks'
ERROR_ARC_MAXSIZE = 'errArcMaxSize'
ERROR_RESIZE = 'errResize'
ERROR_UNSUPPORT_TYPE = 'errUsupportType'
ERROR_NOT_UTF8_CONTENT = 'errNotUTF8Content'
ERROR_NETMOUNT = 'errNetMount'
ERROR_NETMOUNT_NO_DRIVER = 'errNetMountNoDriver'
ERROR_NETMOUNT_FAILED = 'errNetMountFailed'
class VolumeNotFoundError(Exception):
def __init__(self):
super(VolumeNotFoundError, self).__init__(_("Volume could not be found"))
class FileNotFoundError(Exception):
def __init__(self):
super(FileNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_FILE_NOT_FOUND)
class DirNotFoundError(Exception):
def __init__(self):
super(DirNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_DIR_NOT_FOUND)
class PermissionDeniedError(Exception):
def __init__(self):
super(PermissionDeniedError, self).__init__(ElfinderErrorMessages.ERROR_PERM_DENIED)
class NamedError(Exception):
"""
Elfinder-specific exception.
`msg` contains the error code
`name` holds the path for which operation failed
"""
def __init__(self, msg, name):
self.name = name
super(NamedError, self).__init__(msg)
class NotAnImageError(Exception):
def __init__(self):
super(NotAnImageError, self).__init__(_('This is not a valid image file')) | 2 | 2 |
app1/utils/image_url.py | xieyu-aa/news | 0 | 12793045 | from qiniu import Auth, put_data
#需要填写你的 Access Key 和 Secret Key
access_key = '<KEY>'
secret_key = '<KEY>'
def image_url(image_data):
#构建鉴权对象
q = Auth(access_key, secret_key)
#要上传的空间
bucket_name = 'new3333'
#上传后保存的文件名
key = None
# 处理上传结果
token = q.upload_token(bucket_name, key, 3600)
ret, info = put_data(token, key, image_data)
print(ret)
print(info)
if info.status_code == 200:
return ret.get('key')
else:
return None
if __name__ == '__main__':
with open('./滑稽.jpg', 'rb') as f:
image_data = f.read()
image_url(image_data)
| 2.765625 | 3 |
kiyoshi_ni_shokuhatsu/tools/png_to_palette.py | grokit/grokit.github.io | 10 | 12793046 | <filename>kiyoshi_ni_shokuhatsu/tools/png_to_palette.py
#!/usr/bin/python3
"""
# Links
- https://stackoverflow.com/questions/31386096/importing-png-files-into-numpy
"""
import scipy
import scipy.misc
import sys
def load_gimp_palette(filename):
"""
For simplicity's sake, a palette is just an array of RGB values:
palette = [
[r,g,b],
[r,g,b],
...
]
"""
lines = open(filename, 'r').readlines()
palette = []
for line in lines:
if '#' in line:
line = line.split('#')[1]
try:
r = int('0x'+line[0:2], 0)
g = int('0x'+line[2:4], 0)
b = int('0x'+line[4:6], 0)
rgb = [r,g,b]
palette.append(rgb)
except:
#print('Ignore %s' % line)
pass
return palette
def filter_to_red(rgba):
return [rgba[0], 0, 0, rgba[3]]
def filter_to_closest_in_palette(rgba, palette):
best = None
dist = 1e9
for prgb in palette:
assert len(prgb) == 3
diff = abs(rgba[0]-prgb[0]) + abs(rgba[1]-prgb[1]) + abs(rgba[2]-prgb[2])
if diff < dist:
dist = diff
best = prgb[:]
# rgba[3] is transparency, which we don't touch.
best.append(rgba.tolist()[3])
return best
img_file = './test_input.png'
if len(sys.argv) >= 2:
img_file = sys.argv[1]
# Expected format:
# #0080fc
# ...
# This is the format using Gimp -> export as .txt.
palette = load_gimp_palette('./palette.txt')
img = scipy.misc.imread(img_file)
for i in range(len(img)):
for j in range(len(img[i])):
rgba = img[i][j]
#print('Bef: %s' % rgba)
rgba = filter_to_closest_in_palette(rgba, palette)
#print('Aft: %s' % rgba)
img[i][j] = rgba
scipy.misc.imsave(img_file.replace('input', 'output'), img)
#scipy.misc.imsave('xxx_'+img_file, img)
| 3.453125 | 3 |
src/0059.spiral-matrix-ii/spiral-matrix-ii.py | lyphui/Just-Code | 782 | 12793047 | <reponame>lyphui/Just-Code
class Solution:
def generateMatrix(self, n: int) -> List[List[int]]:
if not n: return []
A, lo = [[n*n]], n*n
while lo > 1:
lo, hi = lo - len(A), lo
A = [[ i for i in range(lo, hi)]] + [list(j) for j in zip(*A[::-1])]
return A | 3.015625 | 3 |
game/environment.py | lucascampello/wumpus-cli | 0 | 12793048 | <reponame>lucascampello/wumpus-cli<filename>game/environment.py
from random import randrange
class Environment(object):
def __init__(self, dimension:int, n_pits:int, n_golds:int=1, n_wumpus:int=1):
# Itens Possíves de Perceber no Mapa
self.perceptions = {
"pit": "breeze",
"gold": "glitter",
"wumpus": "stench",
}
# Tamanho da largua da matrix
self.dimension = dimension
# Array de Coordenadas dos Elementos do Jogo (Poço | Wumpus | Ouro)
self.coordinate = {
"pit":[],
"wumpus":[],
"gold":[]
}
valid_environment = False
while(not valid_environment):
self.matrix = [['empty' for column in range(dimension)] for line in range(dimension)]
self.matrix[0][0] = 'start'
self.matrix_perceptions = [[ [] for column in range(dimension)] for line in range(dimension)]
self.generate({'name': 'gold','amount':n_golds}) # Gera um Logal para o Ouro
self.generate({'name': 'pit','amount':n_pits}) # Gera um local para os poços
self.generate({'name': 'wumpus','amount':n_wumpus}) # Gera um local para o Wumpus
self.screamTrigger = False
self.n_pits = n_pits
valid_environment = self.validEnvironment()
def generate(self, obj: dict) -> None:
for _ in range(obj['amount']):
x = y = 0
if(obj['name'] == 'gold'):
x = self.dimension-1
y = self.dimension-1
else:
self.coordinate[obj["name"]].append((x,y))
x,y = self.randomCoordinate()
self.matrix[x][y] = obj['name']
# Constroi as matrizes de adjascências
if obj['name'] == 'gold': self.matrix_perceptions[x][y].append(self.perceptions[obj['name']])
else:
# verifica se estar na primeira linha
if x == 0:
self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']])
# verifica se estar na primeira coluna
if y == 0:
self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']])
#verifica se estar na ultima coluna
elif y == (self.dimension-1):
self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']])
#verifica se estar nas colunas do meio
else:
self.matrix_perceptions[x][y+1].append(self.perceptions[obj['name']])
self.matrix_perceptions[x][y-1].append(self.perceptions[obj['name']])
# verifica se estar na ultima linha
elif x == (self.dimension - 1):
self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']])
# verifica se estar na primeira coluna
if y == 0:
self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']])
# verifica se estar na ultima coluna
if y == (self.dimension - 1):
self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']])
# verifica se estar nas colunas do meio
else:
self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']])
self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']])
# verifica se estar nas linhas do meio
else:
self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']])
self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']])
# verifica se estar na primeira coluna
if y == 0:
self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']])
# verifica se estar na ultima coluna
if y == (self.dimension - 1):
self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']])
# verifica se estar nas colunas do meio
else:
self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']])
self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']])
def printMatrix(self, coordinate: tuple):
output = ''
#print(coordinate)
for line in range(self.dimension -1, -1, -1):
for column in range(self.dimension):
if coordinate == (line, column):
output += '|A'
else:
if self.matrix[line][column] == 'wumpus': output += '|W'
elif self.matrix[line][column] == 'gold': output += '|G'
elif self.matrix[line][column] == 'pit': output += '|P'
else : output += '| '
output += '|\n'
print(output)
return output
def getPerceptions(self, coordinate:tuple)->list:
perceptions = []
if self.isPerception(coordinate, 'breeze'): perceptions.append('breeze')
if self.isPerception(coordinate, 'stench'): perceptions.append('stench')
if self.isPerception(coordinate, 'glitter'): perceptions.append('glitter')
if self.screamTrigger:
perceptions.append('scream')
self.screamTrigger = False
return perceptions
def isPerception(self, coordinate, perception)-> bool:
x,y = coordinate
if not self.isValid(coordinate): return False
return perception in self.matrix_perceptions[x][y]
def isPit(self, coordinate:tuple)->bool:
x, y = coordinate
if not self.isValid(coordinate): return False
return self.matrix[x][y] == 'pit'
def isWumpus(self, coordinate:tuple)->bool:
x, y = coordinate
if not self.isValid(coordinate): return False
return self.matrix[x][y] == 'wumpus'
def isGold(self, coordinate:tuple)->bool:
x, y = coordinate
if not self.isValid(coordinate): return False
return self.matrix[x][y] == 'gold'
def isExit(self, coordinate:tuple)->bool:
return coordinate == (0,0)
def removeWumpus(self, coordinate:tuple)->None:
self.screamTrigger = True
x, y = coordinate
if not self.isValid(coordinate): return
self.matrix[x][y] = 'empty'
def removeGold(self, coordinate:tuple)->None:
x, y = coordinate
if not self.isValid(coordinate): return
self.matrix[x][y] = 'empty'
self.matrix_perceptions[x][y].remove('glitter')
# Gera uma Coordenada Vazia qualquer de X e Y que não seja (0,0)
def randomCoordinate(self, )->tuple:
x,y = (0,0)
while( ((x,y) == (0,0)) or (self.matrix[x][y] != 'empty') ):
x, y = randrange(self.dimension), randrange(self.dimension)
return (x,y)
def isValid(self, coordinate) -> bool:
x , y = coordinate
if x >= self.dimension or y >= self.dimension: return False
if x < 0 or y < 0: return False
return True
def getObjectCoord(self, name: str): return self.coordinate[name]
def getGraph(self, )->dict:
grafo = {}
n = self.dimension
for i in range(n):
for j in range(n):
cima, baixo, direita, esquerda = (i+1,j), (i-1,j), (i,j+1), (i,j-1)
nodes = []
if i == 0: # 1° LINHA
if self.matrix[cima[0]][cima[1]] != 'pit': nodes.append(cima)
if j == 0: # MAIS A ESQUERDA (apenas testar a direita)
if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita)
elif j == n-1: # MAIS A DIREITA (apenas testar a esquerda)
if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda)
else: # NO MEIO (testar a esquerda e direita)
if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita)
if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda)
elif i == n-1: # ULTIMA LINHA
if self.matrix[baixo[0]][baixo[1]] != 'pit': nodes.append(baixo)
if j == 0: # MAIS A ESQUERDA (apenas testar a direita)
if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita)
elif j == n-1: # MAIS A DIREITA (apenas testar a esquerda)
if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda)
else: # NO MEIO (testar a esquerda e direita)
if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita)
if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda)
else: # NAS LINHS DO MEIO (estar CIMA e BAIXO)
if self.matrix[baixo[0]][baixo[1]] != 'pit': nodes.append(baixo)
if self.matrix[cima[0]][cima[1]] != 'pit': nodes.append(cima)
if j == 0: # MAIS A ESQUERDA (apenas testar a direita)
if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita)
elif j == n-1: # MAIS A DIREITA (apenas testar a esquerda)
if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda)
else: # NO MEIO (testar a esquerda e direita)
if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita)
if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda)
grafo.update({(i,j):nodes})
return grafo
def depthSearch(self, start:object):
graph = self.getGraph()
visiteds = [start]
not_visiteds = [start]
while not_visiteds:
current = not_visiteds.pop()
for neighbor in graph[current]:
if neighbor not in visiteds:
visiteds.append(neighbor)
not_visiteds.append(neighbor)
x,y=neighbor
if self.matrix[x][y] == 'gold': return True
return False
def validEnvironment(self, )-> bool:
return self.depthSearch((0,0))
| 3.296875 | 3 |
tests/unit/dataactvalidator/test_fabsreq9_detached_award_financial_assistance.py | brianherman/data-act-broker-backend | 0 | 12793049 | from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabsreq9_detached_award_financial_assistance'
def test_column_headers(database):
expected_subset = {'row_number', 'awardee_or_recipient_legal', 'correction_delete_indicatr',
'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" Test AwardeeOrRecipientLegalEntityName is required for all submissions except delete records. """
det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='C',
awardee_or_recipient_legal='REDACTED')
det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='',
awardee_or_recipient_legal='Name')
# Test ignoring for D records
det_award_3 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d',
awardee_or_recipient_legal=None)
det_award_4 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='D', awardee_or_recipient_legal='')
det_award_5 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d',
awardee_or_recipient_legal='Name')
errors = number_of_errors(_FILE, database, models=[det_award, det_award_2, det_award_3, det_award_4, det_award_5])
assert errors == 0
def test_failure(database):
""" Test fail AwardeeOrRecipientLegalEntityName is required for all submissions except delete records. """
det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='c', awardee_or_recipient_legal=None)
det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr=None,
awardee_or_recipient_legal='')
errors = number_of_errors(_FILE, database, models=[det_award, det_award_2])
assert errors == 2
| 2.375 | 2 |
colin-api/tests/unit/api/test_program_account.py | leksmall/lear | 0 | 12793050 | # Copyright © 2022 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to assure the ProgramAccount end-point.
Test-Suite to ensure that the /programAccount endpoint is working as expected.
"""
from tests import oracle_integration
@oracle_integration
def test_get_program_account_no_results(client):
"""Assert that the program account info."""
rv = client.get('/api/v1/programAccount/FM0000001/BNTZLDLBBE3')
assert 404 == rv.status_code
assert None is not rv.json['message']
| 1.703125 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.