blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b45b8e88f4ed303e52d90dfa4d01ea1f2386ac4f | df983affa658d3169aebcd95e255c7cafccf1aa0 | /build/common_msgs/actionlib_msgs/catkin_generated/actionlib_msgs-extras.cmake.develspace.context.cmake.py | b6d5b0ec3ce04c0a47984337071656b8a6699bb8 | [] | no_license | Ektachaurasia/Backup | 17045d3cd3185ca47c53b02298fe1c123ee8a058 | e5ab8532f6cd599ebe4b501626ddba2c6c5d83ab | refs/heads/main | 2023-06-20T21:49:54.092227 | 2021-07-28T04:31:15 | 2021-07-28T04:31:15 | 390,155,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,445 | py | # generated from catkin/cmake/template/cfg-extras.context.py.in
DEVELSPACE = 'TRUE' == 'TRUE'
INSTALLSPACE = 'FALSE' == 'TRUE'
CATKIN_DEVEL_PREFIX = '/home/pi/catkin_ws/devel'
CATKIN_GLOBAL_BIN_DESTINATION = 'bin'
CATKIN_GLOBAL_ETC_DESTINATION = 'etc'
CATKIN_GLOBAL_INCLUDE_DESTINATION = 'include'
CATKIN_GLOBAL_LIB_DESTINATION = 'lib'
CATKIN_GLOBAL_LIBEXEC_DESTINATION = 'lib'
CATKIN_GLOBAL_PYTHON_DESTINATION = 'lib/python3/dist-packages'
CATKIN_GLOBAL_SHARE_DESTINATION = 'share'
CATKIN_PACKAGE_BIN_DESTINATION = 'lib/actionlib_msgs'
CATKIN_PACKAGE_ETC_DESTINATION = 'etc/actionlib_msgs'
CATKIN_PACKAGE_INCLUDE_DESTINATION = 'include/actionlib_msgs'
CATKIN_PACKAGE_LIB_DESTINATION = 'lib'
CATKIN_PACKAGE_LIBEXEC_DESTINATION = 'lib/actionlib_msgs'
CATKIN_PACKAGE_PYTHON_DESTINATION = 'lib/python3/dist-packages/actionlib_msgs'
CATKIN_PACKAGE_SHARE_DESTINATION = 'share/actionlib_msgs'
CMAKE_BINARY_DIR = '/home/pi/catkin_ws/build'
CMAKE_CURRENT_BINARY_DIR = '/home/pi/catkin_ws/build/common_msgs/actionlib_msgs'
CMAKE_CURRENT_SOURCE_DIR = '/home/pi/catkin_ws/src/common_msgs/actionlib_msgs'
CMAKE_INSTALL_PREFIX = '/home/pi/catkin_ws/install'
CMAKE_SOURCE_DIR = '/home/pi/catkin_ws/src'
PKG_CMAKE_DIR = '/home/pi/catkin_ws/devel/share/actionlib_msgs/cmake'
PROJECT_NAME = 'actionlib_msgs'
PROJECT_BINARY_DIR = '/home/pi/catkin_ws/build/common_msgs/actionlib_msgs'
PROJECT_SOURCE_DIR = '/home/pi/catkin_ws/src/common_msgs/actionlib_msgs'
| [
"[email protected]"
] | |
e5b0112718fb300ed3d147add9a9e182d8b2ea1e | 7aec3f10b07403b542e1c14a30a6e00bb479c3fe | /Codewars/8 kyu/Convert number to reversed array of digits.py | e7cbae004c0ea231f393eff942a158cedabe542b | [] | no_license | VictorMinsky/Algorithmic-Tasks | a5871749b377767176ba82308a6a0962e1b3e400 | 03a35b0541fe413eca68f7b5521eaa35d0e611eb | refs/heads/master | 2020-08-02T23:18:06.876712 | 2020-01-16T19:08:49 | 2020-01-16T19:08:49 | 211,541,179 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | """
Convert number to reversed array of digits
Given a random number:
C#: long;
C++: unsigned long;
You have to return the digits of this number within an array in reverse order.
Example:
348597 => [7,9,5,8,4,3]
"""
def digitize(n):
return list(reversed(list(map(int, list(str(n))))))
| [
"[email protected]"
] | |
a1ac053acc68c25f371d3926ce3b7044ee603984 | e76fda1fba459456c4bc105e7a6dcc6277a1a26c | /django_cv/blog/migrations/0003_auto_20160717_0956.py | 1c16fe69db75afb49377755969266de63f4546d1 | [] | no_license | lafabo/i-love-tutorials | 6bb2a684a201975ab523d9721b02761a6269853c | eafcd47fd62e770107c7e1f08e0d6d60a539f1ec | refs/heads/master | 2021-01-21T04:46:56.365199 | 2016-07-20T17:38:03 | 2016-07-20T17:38:03 | 47,709,568 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-17 09:56
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20160717_0754'),
]
operations = [
migrations.AlterField(
model_name='post',
name='created_date',
field=models.DateTimeField(auto_created=True),
),
migrations.AlterField(
model_name='post',
name='published_date',
field=models.DateTimeField(auto_now=True, default=datetime.datetime(2016, 7, 17, 9, 56, 3, 256986, tzinfo=utc)),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
f7ac703f00dbfce30712bfb9a545f0ea45d5721d | 463bdbc8cdca6802f0ff224af0719b078d336f42 | /semana-2/futbolista.py | 9e3ed3c57cb464116fa991975c15d833f271f628 | [] | no_license | BrandonBaLu/poo--1719110177 | 21b99bf4484030c32a26dc12cc3848b9cee12c16 | a3e72a46e284bdd7f2106e444d9d262f390fb296 | refs/heads/master | 2022-12-02T13:49:20.715214 | 2020-08-06T20:53:46 | 2020-08-06T20:53:46 | 265,976,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | class futbolista:
edad= 22
altura= 1.80
camiseta= 1
posicion= "delantero"
categoria="juvenil"
nombre= "Brandon BaLu"
def correr(self):
print("correr")
def saltar(self):
print("saltar")
def patear(self):
print("patear")
def gol(self):
print("meter gol")
def festejar(self):
print("festejar")
def _init_(self):
print("Futbolista")
print(self.edad)
print(self.altura)
print(self.camiseta)
print(self.posicion)
print(self.categoria)
print(self.nombre)
objeto = futbolista()
objeto.correr()
objeto.saltar()
objeto.patear()
objeto.gol()
objeto.festejar()
objeto._init_() | [
"[email protected]"
] | |
cc2c49aa611b103c5981d71833739c0d1bfcef5b | 98e4005eb908773cd0be5b46e297024395b43b1c | /tasks.py | 40ed204392e83dfac108eef63ee1327a48ea4d32 | [
"MIT"
] | permissive | aparamon/block-timer | 110f456a5ae4e1311731c321de49565915074a70 | 8c7159253610728aaa810742bdaa1f9064e1fc5d | refs/heads/master | 2020-03-24T21:29:43.643985 | 2017-10-06T08:07:52 | 2017-10-06T08:07:52 | 143,035,520 | 0 | 0 | null | 2018-07-31T15:49:17 | 2018-07-31T15:49:17 | null | UTF-8 | Python | false | false | 1,769 | py | # -*- encoding: utf-8 -*-
# ! python3
import shutil
from invoke import run, task
@task
def clean():
"""remove build artifacts"""
shutil.rmtree('block_timer.egg-info', ignore_errors=True)
shutil.rmtree('build', ignore_errors=True)
shutil.rmtree('dist', ignore_errors=True)
shutil.rmtree('htmlcov', ignore_errors=True)
shutil.rmtree('__pycache__', ignore_errors=True)
@task
def lint():
"""check style with flake8"""
run("flake8 block_timer/ tests/")
@task
def test():
run("py.test --verbose --showlocals tests/")
@task
def check():
"""run tests quickly with the default Python"""
run("python setup.py --no-user-cfg --verbose check --metadata --restructuredtext --strict")
@task
def coverage():
"""check code coverage quickly with the default Python"""
run("coverage run --source block_timer -m py.test")
run("coverage report -m")
run("coverage html")
@task
def test_install():
"""try to install built package"""
run("pip uninstall block-timer --yes", warn=True)
run("pip install --use-wheel --no-index --find-links=file:./dist block-timer")
run("pip uninstall block-timer --yes")
@task
def build():
"""build package"""
run("python setup.py build")
run("python setup.py sdist")
run("python setup.py bdist_wheel")
@task
def publish():
"""publish package"""
check()
run('python setup.py sdist upload -r pypi') # Use python setup.py REGISTER
run('python setup.py bdist_wheel upload -r pypi')
@task
def publish_test():
"""publish package"""
check()
run('python setup.py sdist upload -r https://testpypi.python.org/pypi') # Use python setup.py REGISTER
run('python setup.py bdist_wheel upload -r https://testpypi.python.org/pypi')
| [
"[email protected]"
] | |
07b91e1d7deec94489258dd04edee096ab9d58e2 | 285c76618cf9569a6074bfe5e7f4260d1eedf62a | /jjj filter.py | fb5a33c3b46f73480634287e7d539dfdbd2ca8af | [] | no_license | kiniamogh/options_analysis | 90b13e335a4426fb98eeb9ef6da6eebeff2838f4 | da162bf4fbe7b5c5b70d48b284de0ab0f639061d | refs/heads/master | 2023-06-27T06:25:51.672197 | 2021-06-16T23:11:58 | 2021-06-16T23:11:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,337 | py | import yfinance as yf
import pandas as pd
import numpy as np
import timeit
from datetime import datetime as dt
from scipy.stats import norm
start = timeit.default_timer()
# https://algotrading101.com/learn/yfinance-guide/
#https://aroussi.com/post/download-options-data
#prompt for a symbol
symbol = input('What is your stock ticker?: ')
min_delta = float(input('what is the minimum delta(e.g. 0.7 is 70%)?: '))
min_yield = float(input('what is the minimum weekly yield (e.g. .01 is 1%)?: '))
max_expiration = input('what is the latest expiration?(mm-dd-yyyy): ')
#hard-wire a symbol without the prompt
#symbol = 'Tna'
#print symbol
#print(symbol.upper())
#yfinance version of your symbol
ticker = yf.Ticker(symbol)
# print descriptive info about the ticker
#print(ticker.info)
#historical prices
#historical = ticker.history(start="2020-12-02", end="2020-12-04", interval="5m")
#print(historical)
#how far back you go - period
# “1d”, “5d”, “1mo”, “3mo”, “6mo”, “1y”, “2y”, “5y”, “10y”, “ytd”, “max”
#bars or candles - interval
# 1m, 2m, 5m, 15m, 30m, 60m, 90m, 1h, 1d, 5d, 1wk, 1mo, 3mo
#hist = ticker.history(period="3d", interval = "5m")
#print(hist)
#multiple_tickers = yf.download("AMZN AAPL GOOG", start="2017-01-01", end="2017-04-30")
#print(multiple_tickers)
#multi_ticker_history = yf.download("AMZN AAPL GOOG", period="3d", interval = "5m")
#print(multi_ticker_history)
#options
#pull in the entire options chain for one expiration
#expiration = input('What is your expiration date? (yyyy-mm-dd): ')
#expiration = '2021-01-08'
#exp_dates = []
exp_dates = ticker.options # this is the list of expiration dates
#print(exp_dates)
#opt = ticker.option_chain(expiration)
#print(opt)
#opt = ticker.option_chain(exp_dates)
df = pd.DataFrame()
for x in exp_dates:
opt = ticker.option_chain(x)
df = df.append(opt.calls, ignore_index=True)
#df = pd.DataFrame(opt.calls)
hist = ticker.history(period="3d", interval = "5m")
#print(hist)
df_history = pd.DataFrame(hist)
recent_value = df_history['Close'].iloc[-1]
print(recent_value)
df['recent_px'] = recent_value
#df['recent_px'] = 173.75
#intrinsic value = stock price - strike price
df['intrinsic_value'] = df['recent_px'] - df['strike']
df['intrinsic_value'] = np.where(df['intrinsic_value'] < 0, 0, df['intrinsic_value'])
#option price = mid
#mid = (bid + ask) / 2
df['option_px'] = (df['bid'] + df['ask']) / 2 #mid options price
#extrinsic value = option price - intrinsic value
df['extrinsic_value'] = df['option_px'] - df['intrinsic_value']
df['extrinsic_value'] = np.where(df['extrinsic_value'] < 0, 0, df['extrinsic_value'])
#yield = ( extrinsic / recent_px ) * 100
df['yield'] = (df['extrinsic_value'] / df['recent_px'] )
#contract_symbol = str(df['contractSymbol'].iloc[0])
#print(contract_symbol)
#beginning_index = contract_symbol.find('2')
#print(beginning_index)
#ending_index = beginning_index + 6
#print(ending_index)
#expiration_slice = contract_symbol[beginning_index:ending_index]
#print(expiration_slice)
df['contract_symbol'] = df['contractSymbol'].astype(str)
df['beginning_index'] = (df['contract_symbol'].str.find('2'))
df['ending_index'] = (df['beginning_index'] + 6)
begin_index = df['beginning_index'].iloc[0]
end_index = df['ending_index'].iloc[0]
df['expiration_slice'] = df['contract_symbol'].str.slice(begin_index,end_index)
todays_date = pd.to_datetime('today')
df['today'] = todays_date
df['expiration_combined'] = '20' + df['expiration_slice']
df['converted_expiration'] = pd.to_datetime(df['expiration_combined'])
df['days_to_expiration'] = (df['converted_expiration'] - df['today']).dt.days
#number of weeks
df['number_of_weeks'] = df['days_to_expiration'] / 7
#weekly yield
df['weekly_yield'] = np.where( df['number_of_weeks'] < 1, df['yield'], df['yield'] / df['number_of_weeks'])
# Greeks
df['T'] = df['days_to_expiration'] / 200
risk_free_rate = 0.00
df['r'] = risk_free_rate
df['v'] = df['impliedVolatility']
dividend_rate = .00
df['d'] = dividend_rate
df['S'] = df['recent_px']
df['K'] = df['strike']
df['T_sqrt'] = np.sqrt(df['T'])
df['d1'] = (np.log(df['S'].astype(float) / df['K']) + (( df['r'] - df['d'] ) + df['v'] * df['v'] / 2) * df['T'] ) / (df['v'] * df['T_sqrt'])
df['delta_calc'] = norm.cdf(df['d1'])
#jjj score
df['jjj'] = df['weekly_yield'] * df['delta_calc']
# df['d2'] = df['d1'] - df['v'] * df['T_sqrt']
#
# df['gamma'] = norm.pdf(df['d1']) / (df['S'] * df['v'] * df['T_sqrt'])
#
# df['theta'] = -(df['S'] * df['v'] * norm.pdf(df['d1'])) / (2 * df['T_sqrt']) - df['r'] * df['K'] * np.exp(-df['r'] * df['T']) * norm.cdf(df['d2'])
#
# df['vega'] = df['S'] * df['T_sqrt'] * norm.pdf(df['d1'])
#
# df['rho'] = df['K'] * df['T'] * np.exp(-df['r'] * df['T']) * norm.cdf(df['d2'])
#print(df)
#df.to_csv("greeks.csv")
#dfobj = df[['delta_calc', 'strike']]
#dfobj.to_csv('just_delta_strike.csv')
df_two_colums = df[['strike','delta_calc', 'yield', 'converted_expiration', 'weekly_yield', 'jjj' ]]
#print(df_two_colums)
df_two_colums.to_csv('two_columns.csv')
#filters out for delta threshold
find_delta = df_two_colums.loc[lambda df_two_columns: df_two_columns['delta_calc'] > min_delta, :]
#print(find_delta)
#find_delta.to_csv('find_delta.csv')
#filters out for expiration threshold
find_delta_first_expiration = find_delta.loc[lambda find_delta: find_delta['converted_expiration'] <= max_expiration, :]
#print(find_delta_first_expiration)
#filters out for yield threshold
#find_delta_and_yield = find_delta_first_expiration.loc[lambda find_delta_first_expiration: find_delta_first_expiration['yield'] > .008, :]
find_delta_and_yield = find_delta_first_expiration.loc[lambda find_delta_first_expiration: find_delta_first_expiration['weekly_yield'] > min_yield, :]
# find_delta_and_yield = find_delta.loc[lambda find_delta: find_delta['yield'] > .04, :]
print(find_delta_and_yield)
find_delta_and_yield.to_csv('find_delta_and_yield.csv')
#chooses the strike with the max yield
#max_value = find_delta_and_yield['yield'].max()
max_value = find_delta_and_yield['weekly_yield'].max()
print(max_value)
find_final_strike = find_delta_and_yield.loc[lambda find_delta_and_yield: find_delta_and_yield['weekly_yield'] == max_value, :]
print(find_final_strike)
stop = timeit.default_timer()
print('Time: ', stop - start) | [
"[email protected]"
] | |
328968e0b146457abb4379014c28d200edcdd065 | 47516f1e2356b3e02d96beabf7d05f1f5d89066e | /test-mess/perlii/pypi/p.py | 00a1199efe54867d49a3142ff2d18db91f72d6f4 | [] | no_license | su8/mega-drive | 8b9de109921343302c274e3af82d035bdf7ab004 | d92efdf21734b05a55954aec93fd39a10396924f | refs/heads/master | 2021-04-03T04:16:49.454792 | 2018-05-06T12:02:22 | 2018-05-06T12:02:22 | 124,764,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | def multiply():
a=3
b=4
print("Will compute", a, "times", b)
c = 0
for i in range(0, a):
c = c + b
return c
| [
"[email protected]"
] | |
59ab40f18c28b7bc7c88eb68a4fd01b89ae91535 | 892a07f9393b51f27bdd865a8721599a5a5f63d8 | /download_info.py | 9fec5bccf1c7a6b8d435c4e5d6502f69c39d6a7b | [] | no_license | TianyuDu/UTCourses | 4263e2258e221794581418b37266b51d3070c066 | 3a2d57e67ec6109d22fe5b698ebd77a3c40a6dab | refs/heads/master | 2020-09-10T01:44:10.965653 | 2019-11-18T23:50:30 | 2019-11-18T23:50:30 | 221,618,482 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,748 | py | import numpy as np
import pandas as pd
import selenium
from selenium import webdriver
import time
class bot():
def __init__(self):
self.driver = webdriver.Chrome("./chromedriver")
self.driver.get("https://timetable.iit.artsci.utoronto.ca/")
def close(self):
self.driver.close()
def batch_retrive(self, department: str) -> pd.DataFrame:
department = department.upper()
search_box = self.driver.find_element_by_id("courseCode")
search_button = self.driver.find_element_by_id("searchButton")
search_box.clear()
search_box.click()
search_box.send_keys(department)
search_button.click()
course_lst = []
while course_lst == []:
time.sleep(1.0)
course_lst = self.driver.find_elements_by_class_name("perCourse")
course_info_lst = []
print(f"Total courses found: {len(course_lst)}")
for course in course_lst:
code, title = course.find_element_by_class_name("courseTitle").text.split(" ")
print(f"{code}\t{title}")
meeting_lst = course.find_elements_by_class_name("perMeeting")
for meeting in meeting_lst:
meeting_code = meeting.find_element_by_class_name("colCode").text
print(f"\t{meeting_code}")
try:
meeting_info = meeting.find_element_by_class_name(
"secLec" if meeting_code.startswith("LEC") else "secTut").text
except selenium.common.exceptions.NoSuchElementException:
meeting_info = meeting.find_element_by_class_name("secPra").text
info = [code, title, meeting_code, meeting_info]
course_info_lst.append(info)
course_info_df = pd.DataFrame(
np.array(course_info_lst),
columns=["Code", "Title", "Session", "Details"]
)
return course_info_df
def batch_download(self, code_lst: list, save_dir: str) -> None:
department_lst = [
x.text
for x in self.driver.find_elements_by_class_name("option")
]
print(department_lst)
code_lst = [
x[1:-1]
for dep in department_lst
for x in dep.split(" ")
if x.startswith("(") and x.endswith(")")
]
all_courses = []
for x in code_lst:
y = self.batch_retrive(x)
all_courses.append(y)
df = pd.concat(all_courses, axis=0)
print(df.head())
print(df.shape)
df.to_csv(save_dir)
if __name__ == "__main__":
b = bot()
code_lst = ["MAT"]
b.batch_download(code_lst, save_dir="./results.csv")
b.close()
| [
"[email protected]"
] | |
6a67fbcb39334683fc4c6b183bea6cd0f44d3110 | 5326f4145414e096f6f145a71f6c7e1669230e71 | /challenges/c40_FilteringRecords/filtering_records/filtering_records.py | b54f0d705e269c4f69805aa012e0edf75ed8d7bc | [] | no_license | andrew-rietz/FiftySeven_Coding_Challenges | a670bd6b1dcf6f99775c2100d297e01a26555af9 | 218894fbad8ac3389003ce7321fd4c4020239fd6 | refs/heads/master | 2022-10-17T15:41:01.705609 | 2019-10-03T05:27:45 | 2019-10-03T05:27:45 | 181,096,850 | 0 | 0 | null | 2022-09-16T18:10:31 | 2019-04-12T23:33:52 | Python | UTF-8 | Python | false | false | 3,651 | py | import sys
import os
from operator import itemgetter
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from util_functions.utils import table, user_inputs
class EmployeeDatabase():
"""Represents a database of employee information (first name, last name,
position/title, and separation date)
Attributes:
employees_data (list): A list of dictionaries. Each dictionary corresponds
to a single employee and tracks first/last names, title, and
separation date. Sample dictionary:
{
"first_name": "Foo",
"last_name": "Bar",
"position": "Programmer",
:sep_date": "",
}
filtered_data (list): A subset of employees_data based on a user search string
Methods:
load_data: Loads data into the `employees_data` attribute
get_filter_string: Prompts the user for a search string
filter: Filters the `employees_data` attribute to only those records that
include the user's search string in the employee first or last name
tabulate_filtered_data: Puts the filtered data into tabular form for printing
"""
def __init__(self, employees_data=None):
self.employees_data = employees_data
self.filtered_data = None
def load_data(self):
employee_info = [
{
"first_name": "John", "last_name": "Johnson",
"position": "Manager", "sep_date": "2016-12-31",
},
{
"first_name": "Tuo", "last_name": "Xiong",
"position": "Software Engineer", "sep_date": "2016-10-05",
},
{
"first_name": "Michaela", "last_name": "Michaelson",
"position": "District Manager", "sep_date": "2015-12-19",
},
{
"first_name": "Jake", "last_name": "Jacobsen",
"position": "Programmer", "sep_date": "",
},
{
"first_name": "Jacquelyn", "last_name": "Jackson",
"position": "DBA", "sep_date": "",
},
{
"first_name": "Sally", "last_name": "Weber",
"position": "Web Developer", "sep_date": "2015-12-18",
},
]
self.employees_data = employee_info
return employee_info
@staticmethod
def get_filter_string():
filter_string = input("Enter a search string: ").strip()
return filter_string.lower()
def filter(self, filter_string):
filtered_data = [
employee for
employee in self.employees_data
if(
(filter_string in employee["first_name"].lower()) or
(filter_string in employee["last_name"].lower())
)
]
self.filtered_data = filtered_data
return filtered_data
def tabulate_filtered_data(self):
table_data = [["Name", "Position", "Separation Date"]]
for employee in self.filtered_data:
table_data.append([
f'{employee["first_name"]} {employee["last_name"]}',
employee["position"],
employee["sep_date"],
])
ascii_table = table.ascii_table(data=table_data, user_alignment="left")
return ascii_table
def main():
employees = EmployeeDatabase()
employees.load_data()
filter_val = employees.get_filter_string()
employees.filter(filter_val)
print(employees.tabulate_filtered_data())
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
59539c4599d0962bcf71259515908994a8e5da65 | a80884040ce1c178274a3068d216f440dd541844 | /rxsci/state/store.py | 85d2a5655120e091fe557dcc1599571345abfe42 | [
"MIT"
] | permissive | maki-nage/rxsci | a4aae51edc1ef684b55df22e34c11aa1d54ef740 | 915e59ebf593c4b313265bb87cf0e1209ec2ee0f | refs/heads/master | 2023-01-19T14:32:11.638497 | 2023-01-17T08:06:35 | 2023-01-17T08:06:35 | 242,592,973 | 9 | 2 | MIT | 2022-11-08T21:54:16 | 2020-02-23T21:23:56 | Python | UTF-8 | Python | false | false | 3,514 | py |
class Store(object):
def __init__(self, topology, store_factory):
"""one per partition
"""
self.states = []
for state in topology.states:
self.states.append(store_factory(
name=state.name,
data_type=state.data_type,
default_value=state.default_value
))
def add_key(self, state, key):
return self.states[state].add_key(key)
def del_key(self, state, key):
return self.states[state].del_key(key)
def set(self, state, key, value):
return self.states[state].set(key, value)
def get(self, state, key):
return self.states[state].get(key)
def iterate(self, state):
return self.states[state].iterate()
def add_map(self, state, key, map_key):
return self.states[state].add_map(key, map_key)
def del_map(self, state, key, map_key):
return self.states[state].del_map(key, map_key)
def get_map(self, state, key, map_key):
return self.states[state].get_map(key, map_key)
def iterate_map(self, state, key):
return self.states[state].iterate_map(key)
class StoreManager(object):
def __init__(self, store_factory):
"""Manages partitions
"""
self.partitions = None
self.active_partition = None
self.topology = None
self.states = []
self.create_store = store_factory
def set_topology(self, topology):
self.topology = topology
def get_store(self):
if self.active_partition is None:
# No partitioning provided, use a single store
assert not self.states
self.states = [Store(topology=self.topology, store_factory=self.create_store)]
self.active_partition = 0
return self.states[self.active_partition]
def add_key(self, state, key):
store = self.get_store()
return store.add_key(state, key)
def del_key(self, state, key):
store = self.get_store()
return store.del_key(state, key)
def set_state(self, state, key, value):
"""Sets value of key in state
Args:
state: A state id from topology
key: A unique key for this state
value: value to set
"""
store = self.get_store()
return store.set(state, key, value)
def get_state(self, state, key):
"""Retrieves value of key in state
Args:
state: A state id from topology
key: A unique key for this state
Returns:
value of key.
"""
store = self.get_store()
return store.get(state, key)
def iterate_state(self, state):
store = self.get_store()
return store.iterate(state)
def add_map(self, state, key, map_key):
store = self.get_store()
return store.add_map(state, key, map_key)
def del_map(self, state, key, map_key):
store = self.get_store()
return store.del_map(state, key, map_key)
def get_map(self, state, key, map_key):
store = self.get_store()
return store.get_map(state, key, map_key)
def iterate_map(self, state, key):
store = self.get_store()
return store.iterate_map(state, key)
def on_partitions_revoked(self, revoked):
return
def on_partitions_assigned(self, assigned):
return
def set_active_partition(self, partition):
self.active_partition = partition
| [
"[email protected]"
] | |
791272d372a823c1d6e970236552365d7b3a754a | 7078044ab0f8f1c1f4062a0e295d0b0c66f49734 | /Seq2Seq/py/fsa.py | d3cc4cf306fb02e077bd6b0dae8f687c8cb9b7c9 | [] | no_license | shixing/xing_rnn | 854061ee0a1d9de5f3d761df8f1769a3f2b98bc9 | 48f9089f5df97ef2ee6a79f01430ab32dc101512 | refs/heads/master | 2022-11-10T11:35:16.798208 | 2018-05-17T19:08:50 | 2018-05-17T19:08:50 | 86,934,874 | 4 | 4 | null | 2022-10-20T20:53:39 | 2017-04-01T18:46:44 | Python | UTF-8 | Python | false | false | 6,181 | py | import re
import math
from logging_helper import mylog, mylog_section, mylog_subsection, mylog_line
class State:
def __init__(self, str_name):
self.name = str_name
self.weights = {} # {int_word: {str_state_name: (state_s, float_weight)}} and float_weigth are in log space
self.next_word_index_set = set()
self.next_word_index_set_ready = False
def process_link(self, state_d, int_word, float_weight):
if not int_word in self.weights:
self.weights[int_word] = {}
self.weights[int_word][state_d.name] = (state_d, float_weight)
def __repr__(self):
return "State({})".format(self.name)
def next_states(self, int_word, results):
#the fsa should not contains a *e* circle.
# results = [(state, weight)]
if int_word in self.weights:
for state_name in self.weights[int_word]:
state_s, float_weight = self.weights[int_word][state_name]
results.append((state_s, float_weight))
# check the *e* link
empty = -1
if empty in self.weights:
for state_name in self.weights[empty]:
state_s, float_weight = self.weights[empty][state_name]
temp = []
state_s.next_states(int_word, temp)
for s, w in temp:
new_w = float_weight + w
results.append((s,new_w))
def next_word_indices(self):
if self.next_word_index_set_ready:
return self.next_word_index_set
else:
# build next_word_index_set
for int_word in self.weights:
if int_word == -1: # *e*
for next_state_name in self.weights[int_word]:
state_s, float_weight = self.weights[int_word][next_state_name]
next_word_index_set = state_s.next_word_indices()
for w in next_word_index_set:
self.next_word_index_set.add(w)
else:
self.next_word_index_set.add(int_word)
self.next_word_index_set_ready = True
return self.next_word_index_set
class FSA:
def __init__(self,fsa_filename, word2index, weight_is_in_log = True):
self.fsa_filename = fsa_filename
self.start_state = None
self.end_state = None
self.patterns = [re.compile("\\(([^ ]+)[ ]+\\(([^ ]+)[ ]+\"(.*)\"[ ]*\\)\\)"),
re.compile("\\(([^ ]+)[ ]+\\(([^ ]+)[ ]+([^ ]+)[ ]*\\)\\)"),
re.compile("\\(([^ ]+)[ ]+\\(([^ ]+)[ ]+\"(.*)\"[ ]+([^ ]+)[ ]*\\)\\)"),
re.compile("\\(([^ ]+)[ ]+\\(([^ ]+)[ ]+([^ ]+)[ ]+([^ ]+)[ ]*\\)\\)"),
]
self.weight_is_in_log = weight_is_in_log
if self.weight_is_in_log:
self.default_weight = 0.0
else:
self.default_weight = 1.0
self.states = {} # {str_name: state_s}
self.word2index = word2index
self.index2word = {}
for word in self.word2index:
index = self.word2index[word]
self.index2word[index] = word
self.num_links = 0
def _process_one_line(self,line):
line = line.strip()
if len(line) == 0 or line.startswith('#'):
return None
for p in self.patterns:
r = re.match(p, line)
if r:
break
if r:
group = r.groups()
s = group[0]
d = group[1]
word = group[2]
if word == "*e*":
word = -1
else:
if not word in self.word2index:
print "{} is not in vocab".format(word)
word = -2
else:
word = self.word2index[word]
weight = self.default_weight
if len(group) == 4:
weight = float(group[3])
if not self.weight_is_in_log:
weight = math.log(weight)
return s,d,word,weight
else:
raise ValueError("Can not process line: ", line)
def load_fsa(self):
f = open(self.fsa_filename)
# the end state
line = f.readline().strip()
self.end_state = State(line)
self.states[line] = self.end_state
while True:
line = f.readline()
if not line:
break
s,d,word,weight = self._process_one_line(line)
if s not in self.states:
self.states[s] = State(s)
if d not in self.states:
self.states[d] = State(d)
if self.start_state == None:
self.start_state = self.states[s]
if word != -2:
self.states[s].process_link(self.states[d], word, weight)
self.num_links += 1
if "_EOS" not in self.states:
self.end_state.process_link(self.end_state, self.word2index["_EOS"], self.default_weight)
# FSA info
self.report_statics()
f.close()
def report_statics(self):
mylog_section("FSA")
mylog_subsection("FSA Info")
mylog("Number of States: {}".format(len(self.states)))
mylog("Number of Links: {}".format(self.num_links))
mylog("Start state: {}".format(self.start_state.name))
mylog("End state: {}".format(self.end_state.name))
def next_states(self, current_state, index, results):
if index in self.index2word:
current_state.next_states(index, results)
if __name__ == "__main__":
fsa_filename = "../data/fsa/fsa.txt"
word2index = {}
for i in xrange(0,26):
word2index[chr(i+ord('a'))] = i+1
word2index['_EOS'] = 0
fsa = FSA(fsa_filename,word2index)
fsa.load_fsa()
print fsa.end_state.weights
for i in fsa.end_state.next_word_indices():
results = []
fsa.next_states(fsa.end_state, i, results)
print i, fsa.index2word[i], results
| [
"[email protected]"
] | |
11da7904a42782276ec0655cb6e620a333aaf166 | 5839614a5e2fa0b59acd09a623115efa962ee89d | /conda/_vendor/auxlib/logz.py | ac0ff89768fcabff7dd1c707d97ee02d871516a7 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] | permissive | schae234/conda | 110c25b35a3a0cdafed2ace6895f275e82233481 | 5bb678fe9c1445e62857db4fc55f10602cfa96a3 | refs/heads/master | 2021-01-18T20:27:37.134990 | 2016-08-03T17:36:51 | 2016-08-03T17:36:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,975 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from json import JSONEncoder
from logging import getLogger, INFO, Handler, Formatter, StreamHandler, DEBUG
from pprint import pformat
from sys import stderr
log = getLogger(__name__)
root_log = getLogger()
DEBUG_FORMATTER = Formatter(
"[%(levelname)s] [%(asctime)s.%(msecs)03d] %(process)d %(name)s:%(funcName)s(%(lineno)d):\n"
"%(message)s\n",
"%Y-%m-%d %H:%M:%S")
INFO_FORMATTER = Formatter(
"[%(levelname)s] [%(asctime)s.%(msecs)03d] %(process)d %(name)s(%(lineno)d): %(message)s\n",
"%Y-%m-%d %H:%M:%S")
def set_root_level(level=INFO):
root_log.setLevel(level)
def attach_stderr(level=INFO):
has_stderr_handler = any(handler.name == 'stderr' for handler in root_log.handlers)
if not has_stderr_handler:
handler = StreamHandler(stderr)
handler.name = 'stderr'
if level is not None:
handler.setLevel(level)
handler.setFormatter(DEBUG_FORMATTER if level == DEBUG else INFO_FORMATTER)
root_log.addHandler(handler)
return True
else:
return False
def detach_stderr():
for handler in root_log.handlers:
if handler.name == 'stderr':
root_log.removeHandler(handler)
return True
return False
def initialize_logging(level=INFO):
attach_stderr(level)
class NullHandler(Handler):
def emit(self, record):
pass
class DumpEncoder(JSONEncoder):
def default(self, obj):
if hasattr(obj, 'dump'):
return obj.dump()
# Let the base class default method raise the TypeError
return super(DumpEncoder, self).default(obj)
_DUMPS = DumpEncoder(indent=2, ensure_ascii=False, sort_keys=True).encode
def jsondumps(obj):
return _DUMPS(obj)
def fullname(obj):
return obj.__module__ + "." + obj.__class__.__name__
request_header_sort_dict = {
'Host': '\x00\x00',
'User-Agent': '\x00\x01',
}
def request_header_sort_key(item):
return request_header_sort_dict.get(item[0], item[0].lower())
response_header_sort_dict = {
'Content-Length': '\x7e\x7e\x61',
'Connection': '\x7e\x7e\x62',
}
def response_header_sort_key(item):
return response_header_sort_dict.get(item[0], item[0].lower())
def stringify(obj):
def bottle_builder(builder, bottle_object):
builder.append("{0} {1}{2} {3}".format(bottle_object.method,
bottle_object.path,
bottle_object.environ.get('QUERY_STRING', ''),
bottle_object.get('SERVER_PROTOCOL')))
builder += ["{0}: {1}".format(key, value) for key, value in bottle_object.headers.items()]
builder.append('')
body = bottle_object.body.read().strip()
if body:
builder.append(body)
def requests_models_PreparedRequest_builder(builder, request_object):
builder.append("> {0} {1} {2}".format(request_object.method, request_object.path_url,
request_object.url.split(':', 1)[0].upper()))
builder.extend("> {0}: {1}".format(key, value)
for key, value in sorted(request_object.headers.items(),
key=request_header_sort_key))
builder.append('')
if request_object.body:
builder.append(request_object.body)
def requests_models_Response_builder(builder, response_object):
builder.append("< {0} {1} {2}".format(response_object.url.split(':', 1)[0].upper(),
response_object.status_code, response_object.reason))
builder.extend("> {0}: {1}".format(key, value)
for key, value in sorted(response_object.headers.items(),
key=response_header_sort_key))
builder.append('')
content_type = response_object.headers.get('Content-Type')
if content_type == 'application/json':
builder.append(pformat(response_object.json, indent=2))
builder.append('')
elif content_type.startswith('text/'):
builder.append(response_object.text)
try:
name = fullname(obj)
builder = [''] # start with new line
if name.startswith('bottle.'):
bottle_builder(builder, obj)
elif name.endswith('requests.models.PreparedRequest'):
requests_models_PreparedRequest_builder(builder, obj)
elif name.endswith('requests.models.Response'):
requests_models_PreparedRequest_builder(builder, obj.request)
requests_models_Response_builder(builder, obj)
else:
return None
builder.append('') # end with new line
return "\n".join(builder)
except Exception as e:
log.exception(e)
| [
"[email protected]"
] | |
39336a7d9b7d425b16ec7dc0e91fbba1cf58a904 | 49c2e3ebf7f5d2f79af6e26c44b4d07ec14a20d5 | /Hello World/venv/Lib/site-packages/setuptools/package_index.py | 6452c37b2a3c56f76a1d2b0f8f50c2cf42eb8788 | [] | no_license | TaylorHoll/Python_Projects | a0d86642463bdc5b3ea67dae0146c115185c1db2 | a8285b058ed0b4e0a366753d61526056dab23cd3 | refs/heads/master | 2020-06-13T09:04:29.666639 | 2020-01-07T03:40:25 | 2020-01-07T03:40:25 | 194,608,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,586 | py | """PyPI and direct package downloading"""
import itertools
import os
import re
import sys
import warnings
from distutils import log
from fnmatch import translate
import base64
import hashlib
import setuptools
import shutil
import socket
from distutils.errors import DistutilsError
from functools import wraps
from pkg_resources import (
CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
Environment, find_distributions, safe_name, safe_version,
to_filename, Requirement, DEVELOP_DIST, EGG_DIST,
)
from setuptools import ssl_support
from setuptools.extern import six
from setuptools.extern.six.moves import urllib, http_client, configparser, map
from setuptools.py27compat import get_all_headers
from setuptools.py33compat import unescape
from setuptools.wheel import Wheel
__metaclass__ = type
EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.+!]+)$')
HREF = re.compile(r"""href\s*=\s*['"]?([^'"> ]+)""", re.I)
PYPI_MD5 = re.compile(
r'<a href="([^"#]+)">([^<]+)</a>\n\s+\(<a (?:title="MD5 hash"\n\s+)'
r'href="[^?]+\?:action=show_md5&digest=([0-9a-f]{32})">md5</a>\)'
)
URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):', re.I).match
EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
__all__ = [
'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
'interpret_distro_name',
]
_SOCKET_TIMEOUT = 15
_tmpl = "setuptools/{setuptools.__version__} Python-urllib/{py_major}"
user_agent = _tmpl.format(py_major=sys.version[:3], setuptools=setuptools)
def parse_requirement_arg(spec):
try:
return Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" % (spec,)
)
def parse_bdist_wininst(name):
"""Return (base,pyversion) or (None,None) for possible .exe name"""
lower = name.lower()
base, py_ver, plat = None, None, None
if lower.endswith('.exe'):
if lower.endswith('.win32.exe'):
base = name[:-10]
plat = 'win32'
elif lower.startswith('.win32-py', -16):
py_ver = name[-7:-4]
base = name[:-16]
plat = 'win32'
elif lower.endswith('.win-amd64.exe'):
base = name[:-14]
plat = 'win-amd64'
elif lower.startswith('.win-amd64-py', -20):
py_ver = name[-7:-4]
base = name[:-20]
plat = 'win-amd64'
return base, py_ver, plat
def egg_info_for_url(url):
parts = urllib.parse.urlparse(url)
scheme, server, path, parameters, query, fragment = parts
base = urllib.parse.unquote(path.split('/')[-1])
if server == 'sourceforge.net' and base == 'download': # XXX Yuck
base = urllib.parse.unquote(path.split('/')[-2])
if '#' in base:
base, fragment = base.split('#', 1)
return base, fragment
def distros_for_url(url, metadata=None):
"""Yield egg or source distribution objects that might be found at a URL"""
base, fragment = egg_info_for_url(url)
for dist in distros_for_location(url, base, metadata):
yield dist
if fragment:
match = EGG_FRAGMENT.match(fragment)
if match:
for dist in interpret_distro_name(
url, match.group(1), metadata, precedence=CHECKOUT_DIST
):
yield dist
def distros_for_location(location, basename, metadata=None):
"""Yield egg or source distribution objects based on basename"""
if basename.endswith('.egg.zip'):
basename = basename[:-4] # strip the .zip
if basename.endswith('.egg') and '-' in basename:
# only one, unambiguous interpretation
return [Distribution.from_location(location, basename, metadata)]
if basename.endswith('.whl') and '-' in basename:
wheel = Wheel(basename)
if not wheel.is_compatible():
return []
return [Distribution(
location=location,
project_name=wheel.project_name,
version=wheel.version,
# Increase priority over eggs.
precedence=EGG_DIST + 1,
)]
if basename.endswith('.exe'):
win_base, py_ver, platform = parse_bdist_wininst(basename)
if win_base is not None:
return interpret_distro_name(
location, win_base, metadata, py_ver, BINARY_DIST, platform
)
# Try source distro extensions (.zip, .tgz, etc.)
#
for ext in EXTENSIONS:
if basename.endswith(ext):
basename = basename[:-len(ext)]
return interpret_distro_name(location, basename, metadata)
return [] # no extension matched
def distros_for_filename(filename, metadata=None):
"""Yield possible egg or source distribution objects based on a filename"""
return distros_for_location(
normalize_path(filename), os.path.basename(filename), metadata
)
def interpret_distro_name(
location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
platform=None
):
"""Generate alternative interpretations of a source distro name
Note: if `location` is a filesystem filename, you should call
``pkg_resources.normalize_path()`` on it before passing it to this
routine!
"""
# Generate alternative interpretations of a source distro name
# Because some packages are ambiguous as to name/versions split
# e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
# So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
# "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
# the spurious interpretations should be ignored, because in the event
# there's also an "adns" package, the spurious "python-1.1.0" version will
# compare lower than any numeric version number, and is therefore unlikely
# to match a request for it. It's still a potential problem, though, and
# in the long run PyPI and the distutils should go for "safe" names and
# versions in distribution archive names (sdist and bdist).
parts = basename.split('-')
if not py_version and any(re.match(r'py\d\.\d$', p) for p in parts[2:]):
# it is a bdist_dumb, not an sdist -- bail out
return
for p in range(1, len(parts) + 1):
yield Distribution(
location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
py_version=py_version, precedence=precedence,
platform=platform
)
# From Python 2.7 docs
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in six.moves.filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def unique_values(func):
"""
Wrap a function returning an iterable such that the resulting iterable
only ever yields unique items.
"""
@wraps(func)
def wrapper(*args, **kwargs):
return unique_everseen(func(*args, **kwargs))
return wrapper
REL = re.compile(r"""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
# this line is here to fix emacs' cruddy broken syntax highlighting
@unique_values
def find_external_links(url, page):
"""Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
for match in REL.finditer(page):
tag, rel = match.groups()
rels = set(map(str.strip, rel.lower().split(',')))
if 'homepage' in rels or 'download' in rels:
for match in HREF.finditer(tag):
yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
for tag in ("<th>Home Page", "<th>Download URL"):
pos = page.find(tag)
if pos != -1:
match = HREF.search(page, pos)
if match:
yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
class ContentChecker:
"""
A null content checker that defines the interface for checking content
"""
def feed(self, block):
"""
Feed a block of data to the hash.
"""
return
def is_valid(self):
"""
Check the hash. Return False if validation fails.
"""
return True
def report(self, reporter, template):
"""
Call reporter with information about the checker (hash name)
substituted into the template.
"""
return
class HashChecker(ContentChecker):
pattern = re.compile(
r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
r'(?P<expected>[a-f0-9]+)'
)
def __init__(self, hash_name, expected):
self.hash_name = hash_name
self.hash = hashlib.new(hash_name)
self.expected = expected
@classmethod
def from_url(cls, url):
"Construct a (possibly null) ContentChecker from a URL"
fragment = urllib.parse.urlparse(url)[-1]
if not fragment:
return ContentChecker()
match = cls.pattern.search(fragment)
if not match:
return ContentChecker()
return cls(**match.groupdict())
def feed(self, block):
self.hash.update(block)
def is_valid(self):
return self.hash.hexdigest() == self.expected
def report(self, reporter, template):
msg = template % self.hash_name
return reporter(msg)
class PackageIndex(Environment):
"""A distribution index that scans web pages for download URLs"""
def __init__(
self, index_url="https://pypi.org/simple/", hosts=('*',),
ca_bundle=None, verify_ssl=True, *args, **kw
):
Environment.__init__(self, *args, **kw)
self.index_url = index_url + "/" [:not index_url.endswith('/')]
self.scanned_urls = {}
self.fetched_urls = {}
self.package_pages = {}
self.allows = re.compile('|'.join(map(translate, hosts))).match
self.to_scan = []
use_ssl = (
verify_ssl
and ssl_support.is_available
and (ca_bundle or ssl_support.find_ca_bundle())
)
if use_ssl:
self.opener = ssl_support.opener_for(ca_bundle)
else:
self.opener = urllib.request.urlopen
def process_url(self, url, retrieve=False):
"""Evaluate a URL as a possible download, and maybe retrieve it"""
if url in self.scanned_urls and not retrieve:
return
self.scanned_urls[url] = True
if not URL_SCHEME(url):
self.process_filename(url)
return
else:
dists = list(distros_for_url(url))
if dists:
if not self.url_ok(url):
return
self.debug("Found link: %s", url)
if dists or not retrieve or url in self.fetched_urls:
list(map(self.add, dists))
return # don't need the actual page
if not self.url_ok(url):
self.fetched_urls[url] = True
return
self.info("Reading %s", url)
self.fetched_urls[url] = True # prevent multiple fetch attempts
tmpl = "Download error on %s: %%s -- Some packages may not be found!"
f = self.open_url(url, tmpl % url)
if f is None:
return
self.fetched_urls[f.url] = True
if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
return
base = f.url # handle redirects
page = f.read()
if not isinstance(page, str):
# In Python 3 and got bytes but want str.
if isinstance(f, urllib.error.HTTPError):
# Errors have no charset, assume latin1:
charset = 'latin-1'
else:
charset = f.headers.get_param('charset') or 'latin-1'
page = page.decode(charset, "ignore")
f.close()
for match in HREF.finditer(page):
link = urllib.parse.urljoin(base, htmldecode(match.group(1)))
self.process_url(link)
if url.startswith(self.index_url) and getattr(f, 'code', None) != 404:
page = self.process_index(url, page)
def process_filename(self, fn, nested=False):
# process filenames or directories
if not os.path.exists(fn):
self.warn("Not found: %s", fn)
return
if os.path.isdir(fn) and not nested:
path = os.path.realpath(fn)
for item in os.listdir(path):
self.process_filename(os.path.join(path, item), True)
dists = distros_for_filename(fn)
if dists:
self.debug("Found: %s", fn)
list(map(self.add, dists))
def url_ok(self, url, fatal=False):
s = URL_SCHEME(url)
is_file = s and s.group(1).lower() == 'file'
if is_file or self.allows(urllib.parse.urlparse(url)[1]):
return True
msg = (
"\nNote: Bypassing %s (disallowed host; see "
"http://bit.ly/2hrImnY for details).\n")
if fatal:
raise DistutilsError(msg % url)
else:
self.warn(msg, url)
def scan_egg_links(self, search_path):
dirs = filter(os.path.isdir, search_path)
egg_links = (
(path, entry)
for path in dirs
for entry in os.listdir(path)
if entry.endswith('.egg-link')
)
list(itertools.starmap(self.scan_egg_link, egg_links))
def scan_egg_link(self, path, entry):
with open(os.path.join(path, entry)) as raw_lines:
# filter non-empty lines
lines = list(filter(None, map(str.strip, raw_lines)))
if len(lines) != 2:
# format is not recognized; punt
return
egg_path, setup_path = lines
for dist in find_distributions(os.path.join(path, egg_path)):
dist.location = os.path.join(path, *lines)
dist.precedence = SOURCE_DIST
self.add(dist)
def process_index(self, url, page):
"""Process the contents of a PyPI page"""
def scan(link):
# Process a URL to see if it's for a package page
if link.startswith(self.index_url):
parts = list(map(
urllib.parse.unquote, link[len(self.index_url):].split('/')
))
if len(parts) == 2 and '#' not in parts[1]:
# it's a package page, sanitize and index it
pkg = safe_name(parts[0])
ver = safe_version(parts[1])
self.package_pages.setdefault(pkg.lower(), {})[link] = True
return to_filename(pkg), to_filename(ver)
return None, None
# process an index page into the package-page index
for match in HREF.finditer(page):
try:
scan(urllib.parse.urljoin(url, htmldecode(match.group(1))))
except ValueError:
pass
pkg, ver = scan(url) # ensure this page is in the page index
if pkg:
# process individual package page
for new_url in find_external_links(url, page):
# Process the found URL
base, frag = egg_info_for_url(new_url)
if base.endswith('.py') and not frag:
if ver:
new_url += '#egg=%s-%s' % (pkg, ver)
else:
self.need_version_info(url)
self.scan_url(new_url)
return PYPI_MD5.sub(
lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1, 3, 2), page
)
else:
return "" # no sense double-scanning non-package pages
def need_version_info(self, url):
self.scan_all(
"Page at %s links to .py file(s) without version info; an index "
"scan is required.", url
)
def scan_all(self, msg=None, *args):
if self.index_url not in self.fetched_urls:
if msg:
self.warn(msg, *args)
self.info(
"Scanning index of all packages (this may take a while)"
)
self.scan_url(self.index_url)
def find_packages(self, requirement):
self.scan_url(self.index_url + requirement.unsafe_name + '/')
if not self.package_pages.get(requirement.key):
# Fall back to safe version of the name
self.scan_url(self.index_url + requirement.project_name + '/')
if not self.package_pages.get(requirement.key):
# We couldn't find the target package, so search the index page too
self.not_found_in_index(requirement)
for url in list(self.package_pages.get(requirement.key, ())):
# scan each page that might be related to the desired package
self.scan_url(url)
def obtain(self, requirement, installer=None):
self.prescan()
self.find_packages(requirement)
for dist in self[requirement.key]:
if dist in requirement:
return dist
self.debug("%s does not match %s", requirement, dist)
return super(PackageIndex, self).obtain(requirement, installer)
def check_hash(self, checker, filename, tfp):
"""
checker is a ContentChecker
"""
checker.report(
self.debug,
"Validating %%s checksum for %s" % filename)
if not checker.is_valid():
tfp.close()
os.unlink(filename)
raise DistutilsError(
"%s validation failed for %s; "
"possible download problem?"
% (checker.hash.name, os.path.basename(filename))
)
def add_find_links(self, urls):
"""Add `urls` to the list that will be prescanned for searches"""
for url in urls:
if (
self.to_scan is None # if we have already "gone online"
or not URL_SCHEME(url) # or it's a local file/directory
or url.startswith('file:')
or list(distros_for_url(url)) # or a direct package link
):
# then go ahead and process it now
self.scan_url(url)
else:
# otherwise, defer retrieval till later
self.to_scan.append(url)
def prescan(self):
"""Scan urls scheduled for prescanning (e.g. --find-links)"""
if self.to_scan:
list(map(self.scan_url, self.to_scan))
self.to_scan = None # from now on, go ahead and process immediately
def not_found_in_index(self, requirement):
if self[requirement.key]: # we've seen at least one distro
meth, msg = self.info, "Couldn't retrieve index page for %r"
else: # no distros seen for this name, might be misspelled
meth, msg = (
self.warn,
"Couldn't find index page for %r (maybe misspelled?)")
meth(msg, requirement.unsafe_name)
self.scan_all()
def download(self, spec, tmpdir):
"""Locate and/or download `spec` to `tmpdir`, returning a local path
`spec` may be a ``Requirement`` object, or a string containing a URL,
an existing local filename, or a project/version requirement spec
(i.e. the string form of a ``Requirement`` object). If it is the URL
of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
automatically created alongside the downloaded file.
If `spec` is a ``Requirement`` object or a string containing a
project/version requirement spec, this method returns the location of
a matching distribution (possibly after downloading it to `tmpdir`).
If `spec` is a locally existing file or directory name, it is simply
returned unchanged. If `spec` is a URL, it is downloaded to a subpath
of `tmpdir`, and the local filename is returned. Various errors may be
raised if a problem occurs during downloading.
"""
if not isinstance(spec, Requirement):
scheme = URL_SCHEME(spec)
if scheme:
# It's a url, download it to tmpdir
found = self._download_url(scheme.group(1), spec, tmpdir)
base, fragment = egg_info_for_url(spec)
if base.endswith('.py'):
found = self.gen_setup(found, fragment, tmpdir)
return found
elif os.path.exists(spec):
# Existing file or directory, just return it
return spec
else:
spec = parse_requirement_arg(spec)
return getattr(self.fetch_distribution(spec, tmpdir), 'location', None)
def fetch_distribution(
self, requirement, tmpdir, force_scan=False, source=False,
develop_ok=False, local_index=None):
"""Obtain a distribution suitable for fulfilling `requirement`
`requirement` must be a ``pkg_resources.Requirement`` instance.
If necessary, or if the `force_scan` flag is set, the requirement is
searched for in the (online) package index as well as the locally
installed packages. If a distribution matching `requirement` is found,
the returned distribution's ``location`` is the value you would have
gotten from calling the ``download()`` method with the matching
distribution's URL or filename. If no matching distribution is found,
``None`` is returned.
If the `source` flag is set, only source distributions and source
checkout links will be considered. Unless the `develop_ok` flag is
set, development and system eggs (i.e., those using the ``.egg-info``
format) will be ignored.
"""
# process a Requirement
self.info("Searching for %s", requirement)
skipped = {}
dist = None
def find(req, env=None):
if env is None:
env = self
# Find a matching distribution; may be called more than once
for dist in env[req.key]:
if dist.precedence == DEVELOP_DIST and not develop_ok:
if dist not in skipped:
self.warn(
"Skipping development or system egg: %s", dist,
)
skipped[dist] = 1
continue
test = (
dist in req
and (dist.precedence <= SOURCE_DIST or not source)
)
if test:
loc = self.download(dist.location, tmpdir)
dist.download_location = loc
if os.path.exists(dist.download_location):
return dist
if force_scan:
self.prescan()
self.find_packages(requirement)
dist = find(requirement)
if not dist and local_index is not None:
dist = find(requirement, local_index)
if dist is None:
if self.to_scan is not None:
self.prescan()
dist = find(requirement)
if dist is None and not force_scan:
self.find_packages(requirement)
dist = find(requirement)
if dist is None:
self.warn(
"No local packages or working download links found for %s%s",
(source and "a source distribution of " or ""),
requirement,
)
else:
self.info("Best match: %s", dist)
return dist.clone(location=dist.download_location)
def fetch(self, requirement, tmpdir, force_scan=False, source=False):
"""Obtain a file suitable for fulfilling `requirement`
DEPRECATED; use the ``fetch_distribution()`` method now instead. For
backward compatibility, this routine is identical but returns the
``location`` of the downloaded distribution instead of a distribution
object.
"""
dist = self.fetch_distribution(requirement, tmpdir, force_scan, source)
if dist is not None:
return dist.location
return None
def gen_setup(self, filename, fragment, tmpdir):
match = EGG_FRAGMENT.match(fragment)
dists = match and [
d for d in
interpret_distro_name(filename, match.group(1), None) if d.version
] or []
if len(dists) == 1: # unambiguous ``#egg`` fragment
basename = os.path.basename(filename)
# Make sure the file has been downloaded to the temp dir.
if os.path.dirname(filename) != tmpdir:
dst = os.path.join(tmpdir, basename)
from setuptools.command.easy_install import samefile
if not samefile(filename, dst):
shutil.copy2(filename, dst)
filename = dst
with open(os.path.join(tmpdir, 'setup.py'), 'w') as file:
file.write(
"from setuptools import setup\n"
"setup(name=%r, version=%r, py_modules=[%r])\n"
% (
dists[0].project_name, dists[0].version,
os.path.splitext(basename)[0]
)
)
return filename
elif match:
raise DistutilsError(
"Can't unambiguously interpret project/version identifier %r; "
"any dashes in the name or version should be escaped using "
"underscores. %r" % (fragment, dists)
)
else:
raise DistutilsError(
"Can't process plain .py files without an '#egg=name-version'"
" suffix to enable automatic setup script generation."
)
dl_blocksize = 8192
def _download_to(self, url, filename):
self.info("Downloading %s", url)
# Download the file
fp = None
try:
checker = HashChecker.from_url(url)
fp = self.open_url(url)
if isinstance(fp, urllib.error.HTTPError):
raise DistutilsError(
"Can't download %s: %s %s" % (url, fp.code, fp.msg)
)
headers = fp.info()
blocknum = 0
bs = self.dl_blocksize
size = -1
if "content-length" in headers:
# Some servers return multiple Content-Length headers :(
sizes = get_all_headers(headers, 'Content-Length')
size = max(map(int, sizes))
self.reporthook(url, filename, blocknum, bs, size)
with open(filename, 'wb') as tfp:
while True:
block = fp.read(bs)
if block:
checker.feed(block)
tfp.write(block)
blocknum += 1
self.reporthook(url, filename, blocknum, bs, size)
else:
break
self.check_hash(checker, filename, tfp)
return headers
finally:
if fp:
fp.close()
def reporthook(self, url, filename, blocknum, blksize, size):
pass # no-op
def open_url(self, url, warning=None):
if url.startswith('file:'):
return local_open(url)
try:
return open_with_auth(url, self.opener)
except (ValueError, http_client.InvalidURL) as v:
msg = ' '.join([str(arg) for arg in v.args])
if warning:
self.warn(warning, msg)
else:
raise DistutilsError('%s %s' % (url, msg))
except urllib.error.HTTPError as v:
return v
except urllib.error.URLError as v:
if warning:
self.warn(warning, v.reason)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v.reason))
except http_client.BadStatusLine as v:
if warning:
self.warn(warning, v.line)
else:
raise DistutilsError(
'%s returned a bad status line. The server might be '
'down, %s' %
(url, v.line)
)
except (http_client.HTTPException, socket.error) as v:
if warning:
self.warn(warning, v)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v))
def _download_url(self, scheme, url, tmpdir):
# Determine download filename
#
name, fragment = egg_info_for_url(url)
if name:
while '..' in name:
name = name.replace('..', '.').replace('\\', '_')
else:
name = "__downloaded__" # default if URL has no path contents
if name.endswith('.egg.zip'):
name = name[:-4] # strip the extra .zip before download
filename = os.path.join(tmpdir, name)
# Download the file
#
if scheme == 'svn' or scheme.startswith('svn+'):
return self._download_svn(url, filename)
elif scheme == 'git' or scheme.startswith('git+'):
return self._download_git(url, filename)
elif scheme.startswith('hg+'):
return self._download_hg(url, filename)
elif scheme == 'file':
return urllib.request.url2pathname(urllib.parse.urlparse(url)[2])
else:
self.url_ok(url, True) # raises error if not allowed
return self._attempt_download(url, filename)
def scan_url(self, url):
self.process_url(url, True)
def _attempt_download(self, url, filename):
headers = self._download_to(url, filename)
if 'html' in headers.get('content-type', '').lower():
return self._download_html(url, headers, filename)
else:
return filename
def _download_html(self, url, headers, filename):
file = open(filename)
for line in file:
if line.strip():
# Check for a subversion index page
if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
# it's a subversion index page:
file.close()
os.unlink(filename)
return self._download_svn(url, filename)
break # not an index page
file.close()
os.unlink(filename)
raise DistutilsError("Unexpected HTML page found at " + url)
def _download_svn(self, url, filename):
warnings.warn("SVN download support is deprecated", UserWarning)
url = url.split('#', 1)[0] # remove any fragment for svn's sake
creds = ''
if url.lower().startswith('svn:') and '@' in url:
scheme, netloc, path, p, q, f = urllib.parse.urlparse(url)
if not netloc and path.startswith('//') and '/' in path[2:]:
netloc, path = path[2:].split('/', 1)
auth, host = _splituser(netloc)
if auth:
if ':' in auth:
user, pw = auth.split(':', 1)
creds = " --username=%s --password=%s" % (user, pw)
else:
creds = " --username=" + auth
netloc = host
parts = scheme, netloc, url, p, q, f
url = urllib.parse.urlunparse(parts)
self.info("Doing subversion checkout from %s to %s", url, filename)
os.system("svn checkout%s -q %s %s" % (creds, url, filename))
return filename
@staticmethod
def _vcs_split_rev_from_url(url, pop_prefix=False):
scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)
scheme = scheme.split('+', 1)[-1]
# Some fragment identification fails
path = path.split('#', 1)[0]
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
# Also, discard fragment
url = urllib.parse.urlunsplit((scheme, netloc, path, query, ''))
return url, rev
def _download_git(self, url, filename):
filename = filename.split('#', 1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing git clone from %s to %s", url, filename)
os.system("git clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Checking out %s", rev)
os.system("git -C %s checkout --quiet %s" % (
filename,
rev,
))
return filename
def _download_hg(self, url, filename):
filename = filename.split('#', 1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing hg clone from %s to %s", url, filename)
os.system("hg clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Updating to %s", rev)
os.system("hg --cwd %s up -C -r %s -q" % (
filename,
rev,
))
return filename
def debug(self, msg, *args):
log.debug(msg, *args)
def info(self, msg, *args):
log.info(msg, *args)
def warn(self, msg, *args):
log.warn(msg, *args)
# This pattern matches a character entity reference (a decimal numeric
# references, a hexadecimal numeric reference, or a named reference).
entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
def decode_entity(match):
what = match.group(0)
return unescape(what)
def htmldecode(text):
"""
Decode HTML entities in the given text.
>>> htmldecode(
... 'https://../package_name-0.1.2.tar.gz'
... '?tokena=A&tokenb=B">package_name-0.1.2.tar.gz')
'https://../package_name-0.1.2.tar.gz?tokena=A&tokenb=B">package_name-0.1.2.tar.gz'
"""
return entity_sub(decode_entity, text)
def socket_timeout(timeout=15):
def _socket_timeout(func):
def _socket_timeout(*args, **kwargs):
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
return func(*args, **kwargs)
finally:
socket.setdefaulttimeout(old_timeout)
return _socket_timeout
return _socket_timeout
def _encode_auth(auth):
"""
A function compatible with Python 2.3-3.3 that will encode
auth from a URL suitable for an HTTP header.
>>> str(_encode_auth('username%3Apassword'))
'dXNlcm5hbWU6cGFzc3dvcmQ='
Long auth strings should not cause a newline to be inserted.
>>> long_auth = 'username:' + 'password'*10
>>> chr(10) in str(_encode_auth(long_auth))
False
"""
auth_s = urllib.parse.unquote(auth)
# convert to bytes
auth_bytes = auth_s.encode()
encoded_bytes = base64.b64encode(auth_bytes)
# convert back to a string
encoded = encoded_bytes.decode()
# strip the trailing carriage return
return encoded.replace('\n', '')
class Credential:
"""
A username/password pair. Use like a namedtuple.
"""
def __init__(self, username, password):
self.username = username
self.password = password
def __iter__(self):
yield self.username
yield self.password
def __str__(self):
return '%(username)s:%(password)s' % vars(self)
class PyPIConfig(configparser.RawConfigParser):
def __init__(self):
"""
Load from ~/.pypirc
"""
defaults = dict.fromkeys(['username', 'password', 'repository'], '')
configparser.RawConfigParser.__init__(self, defaults)
rc = os.path.join(os.path.expanduser('~'), '.pypirc')
if os.path.exists(rc):
self.read(rc)
@property
def creds_by_repository(self):
sections_with_repositories = [
section for section in self.sections()
if self.get(section, 'repository').strip()
]
return dict(map(self._get_repo_cred, sections_with_repositories))
def _get_repo_cred(self, section):
repo = self.get(section, 'repository').strip()
return repo, Credential(
self.get(section, 'username').strip(),
self.get(section, 'password').strip(),
)
def find_credential(self, url):
"""
If the URL indicated appears to be a repository defined in this
config, return the credential for that repository.
"""
for repository, cred in self.creds_by_repository.items():
if url.startswith(repository):
return cred
def open_with_auth(url, opener=urllib.request.urlopen):
"""Open a urllib2 request, handling HTTP authentication"""
parsed = urllib.parse.urlparse(url)
scheme, netloc, path, params, query, frag = parsed
# Double scheme does not raise on Mac OS X as revealed by a
# failing test. We would expect "nonnumeric port". Refs #20.
if netloc.endswith(':'):
raise http_client.InvalidURL("nonnumeric port: ''")
if scheme in ('http', 'https'):
auth, address = _splituser(netloc)
else:
auth = None
if not auth:
cred = PyPIConfig().find_credential(url)
if cred:
auth = str(cred)
info = cred.username, url
log.info('Authenticating as %s for %s (from .pypirc)', *info)
if auth:
auth = "Basic " + _encode_auth(auth)
parts = scheme, address, path, params, query, frag
new_url = urllib.parse.urlunparse(parts)
request = urllib.request.Request(new_url)
request.add_header("Authorization", auth)
else:
request = urllib.request.Request(url)
request.add_header('User-Agent', user_agent)
fp = opener(request)
if auth:
# Put authentication info back into request URL if same host,
# so that links found on the page will work
s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url)
if s2 == scheme and h2 == address:
parts = s2, netloc, path2, param2, query2, frag2
fp.url = urllib.parse.urlunparse(parts)
return fp
# copy of urllib.parse._splituser from Python 3.8
def _splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
user, delim, host = host.rpartition('@')
return (user if delim else None), host
# adding a timeout to avoid freezing package_index
open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
def fix_sf_url(url):
return url # backward compatibility
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urllib.parse.urlparse(url)
filename = urllib.request.url2pathname(path)
if os.path.isfile(filename):
return urllib.request.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
filepath = os.path.join(filename, f)
if f == 'index.html':
with open(filepath, 'r') as fp:
body = fp.read()
break
elif os.path.isdir(filepath):
f += '/'
files.append('<a href="{name}">{name}</a>'.format(name=f))
else:
tmpl = (
"<html><head><title>{url}</title>"
"</head><body>{files}</body></html>")
body = tmpl.format(url=url, files='\n'.join(files))
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {'content-type': 'text/html'}
body_stream = six.StringIO(body)
return urllib.error.HTTPError(url, status, message, headers, body_stream)
| [
"[email protected]"
] | |
298572d842e993f618c640c486277f700ba6ec7b | 05b42178aaefd7efdb2fb19fdea8e58056d8d4bd | /leetcode/combination_sum/recursive_1.py | f41f141fd86c2c7bc3479dea3ee84f4c399a59cb | [] | no_license | chrisjdavie/interview_practice | 43ca3df25fb0538d685a59ac752a6a4b269c44e9 | 2d47d583ed9c838a802b4aa4cefe649c77f5dd7f | refs/heads/master | 2023-08-16T18:22:46.492623 | 2023-08-16T16:04:01 | 2023-08-16T16:04:01 | 247,268,317 | 0 | 0 | null | 2020-03-14T17:35:12 | 2020-03-14T12:01:43 | Python | UTF-8 | Python | false | false | 2,458 | py | """
https://leetcode.com/problems/combination-sum/
Given an array of distinct integers `candidates` and a target integer `target`, return a list of all unique combinations of `candidates` where the chosen numbers sum to `target`. You may return the combinations in any order.
The same number may be chosen from candidates an unlimited number of times. Two combinations are unique if the
frequency
of at least one of the chosen numbers is different.
The test cases are generated such that the number of unique combinations that sum up to `target` is less than `150` combinations for the given input.
---------------------------
While this worked the first time, it kinda went a bit pear-shaped with corner cases I hadn't really considered, so I'm
trying again
"""
import pytest
class Solution:
def combinationSum(self, candidates: list[int], target: int) -> list[list[int]]:
def _solve(i_start_cand: int, _target: int) -> list[list[int]]:
results = []
for i_cand, cand in enumerate(candidates[i_start_cand:]):
for i_mult in range(1, _target//cand+1):
mult = i_mult*cand
if mult == _target:
results.append((i_mult)*[cand])
else:
for res in _solve(i_start_cand+i_cand+1, _target - mult):
results.append((i_mult)*[cand] + res)
return results
return _solve(0, target)
@pytest.mark.parametrize(
"candidates,target,expected_combinations",
(
([2], 1, []),
([2], 2, [[2],]),
([3], 6, [[3, 3],]),
([2, 3], 6, [[2, 2, 2], [3, 3]]),
([5,], 6, []),
([2, 4], 8, [[2, 2, 2, 2], [2, 2, 4], [4, 4]]),
)
)
def test_unit(candidates, target, expected_combinations):
result = Solution().combinationSum(candidates, target)
for comb in expected_combinations:
assert comb in result
assert len(result) == len(expected_combinations)
@pytest.mark.parametrize(
"candidates,target,expected_combinations",
(
([2,3,6,7], 7, [[2,2,3],[7]]),
([2,3,5], 8, [[2,2,2,2],[2,3,3],[3,5]]),
([2], 1, []),
)
)
def test_leetcode(candidates, target, expected_combinations):
result = Solution().combinationSum(candidates, target)
for comb in expected_combinations:
assert comb in result
assert len(result) == len(expected_combinations)
| [
"[email protected]"
] | |
34545f058d107fa6129742261fa722045e4b7956 | 801510e45d9aebe5c5b8b09a3ce4453a3a11a3ca | /django/full_stack_django/fav_books/fav_books/urls.py | b93bbdcd069e6895495504d2c4b681034e2d205c | [] | no_license | michelleshan/coding_dojo_python_course | 5581ebca0a645ba7231a2da2d2d64d6c3735bfc4 | e20e8195950004ef0aa09e6b0f84e7f05bd355e8 | refs/heads/master | 2022-11-21T01:34:54.309175 | 2020-07-16T03:29:45 | 2020-07-16T03:29:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | """fav_books URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, include
urlpatterns = [
path('',include('app_fav_books.urls')),
]
| [
"[email protected]"
] | |
5685e8af0a83996ef0288ac44c99899d0a7c43ec | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2007/programming/libs/liblrdf/actions.py | bc89302e9235025b741e9b91d9bb59681be0b8c9 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005,2006 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
WorkDir = "liblrdf-0.4.0"
def setup():
autotools.configure()
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("AUTHORS", "ChangeLog", "NEWS", "README")
| [
"[email protected]"
] | |
e2d9e629d50e8ffcd661032fae542529dc7724bc | 01733042e84a768b77f64ec24118d0242b2f13b8 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/trillsimulatedmcastipv4grouplist_c79d67ba115d7ed47c7c73c05670cc0d.py | c586c365c7b04ed2c2ab59fcf760d1a6fd61ff22 | [
"MIT"
] | permissive | slieberth/ixnetwork_restpy | e95673905854bc57e56177911cb3853c7e4c5e26 | 23eeb24b21568a23d3f31bbd72814ff55eb1af44 | refs/heads/master | 2023-01-04T06:57:17.513612 | 2020-10-16T22:30:55 | 2020-10-16T22:30:55 | 311,959,027 | 0 | 0 | NOASSERTION | 2020-11-11T12:15:34 | 2020-11-11T12:06:00 | null | UTF-8 | Python | false | false | 9,149 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class TrillSimulatedMCastIpv4GroupList(Base):
"""TRILL Multicast IPv4 Groups
The TrillSimulatedMCastIpv4GroupList class encapsulates a required trillSimulatedMCastIpv4GroupList resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'trillSimulatedMCastIpv4GroupList'
_SDM_ATT_MAP = {
'Active': 'active',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'InterGrpUcastAddrIncr': 'interGrpUcastAddrIncr',
'McastAddrCnt': 'mcastAddrCnt',
'McastAddrIncr': 'mcastAddrIncr',
'Name': 'name',
'SrcGrpMapping': 'srcGrpMapping',
'StartMcastAddr': 'startMcastAddr',
'StartUcastAddr': 'startUcastAddr',
'TopologyId': 'topologyId',
'UcastAddrIncr': 'ucastAddrIncr',
'UcastSrcCnt': 'ucastSrcCnt',
'VlanId': 'vlanId',
}
def __init__(self, parent):
super(TrillSimulatedMCastIpv4GroupList, self).__init__(parent)
@property
def Active(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Activate/Deactivate Configuration
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def Count(self):
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def InterGrpUcastAddrIncr(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Inter-Group Source Address Increment
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['InterGrpUcastAddrIncr']))
@property
def McastAddrCnt(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Multicast Address Count
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['McastAddrCnt']))
@property
def McastAddrIncr(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Multicast Address Increment
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['McastAddrIncr']))
@property
def Name(self):
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def SrcGrpMapping(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Source-Group Mapping
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SrcGrpMapping']))
@property
def StartMcastAddr(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Start Multicast Address
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['StartMcastAddr']))
@property
def StartUcastAddr(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Start Source Address
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['StartUcastAddr']))
@property
def TopologyId(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Topology/Nickname
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TopologyId']))
@property
def UcastAddrIncr(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Source Address Increment
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UcastAddrIncr']))
@property
def UcastSrcCnt(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): # Sources per Multicast Group
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UcastSrcCnt']))
@property
def VlanId(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Vlan Id
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VlanId']))
def update(self, Name=None):
"""Updates trillSimulatedMCastIpv4GroupList resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def get_device_ids(self, PortNames=None, Active=None, InterGrpUcastAddrIncr=None, McastAddrCnt=None, McastAddrIncr=None, SrcGrpMapping=None, StartMcastAddr=None, StartUcastAddr=None, TopologyId=None, UcastAddrIncr=None, UcastSrcCnt=None, VlanId=None):
"""Base class infrastructure that gets a list of trillSimulatedMCastIpv4GroupList device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- InterGrpUcastAddrIncr (str): optional regex of interGrpUcastAddrIncr
- McastAddrCnt (str): optional regex of mcastAddrCnt
- McastAddrIncr (str): optional regex of mcastAddrIncr
- SrcGrpMapping (str): optional regex of srcGrpMapping
- StartMcastAddr (str): optional regex of startMcastAddr
- StartUcastAddr (str): optional regex of startUcastAddr
- TopologyId (str): optional regex of topologyId
- UcastAddrIncr (str): optional regex of ucastAddrIncr
- UcastSrcCnt (str): optional regex of ucastSrcCnt
- VlanId (str): optional regex of vlanId
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| [
"[email protected]"
] | |
877f97e8de9331c59f06734cbd4df10c70b75efd | 4b41a76c5c366ba2daa30843acea16609b8f5da7 | /2017/01/AoC17_01_1.py | 776cd60ef6af6c060d9c2488d256ca8fbd15d274 | [] | no_license | grandfoosier/AdventOfCode | c4706cfefef61e80060cca89b0433636e42bf974 | a43fdd72fe4279196252f24a4894500a4e272a5d | refs/heads/master | 2020-06-11T12:36:48.699811 | 2019-01-14T23:44:44 | 2019-01-14T23:44:44 | 75,665,958 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | fname = "AoC17_01_1.txt"
text = [line.rstrip('\n') for line in open(fname)][0]
print "\nInteger Stream Loaded\n"
n = len(text)
sol = sum(int(c) for i,c in enumerate(text)
if text[i] == text[(i+1)%n])
print sol
print "\n"
| [
"[email protected]"
] | |
d51046eba4b9559778329e5ac3429bc3f38fdbf6 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/846.py | 2e11d979e792075a6ed484de0808b1e4c66a2eee | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 898 | py | from sys import argv
script, in_txt, out_txt = argv
def solver(in_txt, out_txt):
in_file = open(in_txt)
out_file = open(out_txt, 'w')
T = int(in_file.readline())
for t in range(T):
N = int(in_file.readline())
ls = []
for i in range(4):
x = map(int, in_file.readline().split())
ls.append(x)
f = set(ls[N-1])
N = int(in_file.readline())
ls = []
for i in range(4):
x = map(int, in_file.readline().split())
ls.append(x)
g = set(ls[N-1])
h = list(f & g)
if len(h) == 0:
line = "Case #%d: Volunteer cheated!" % (t+1)
if len(h) > 1:
line = "Case #%d: Bad magician!" % (t+1)
if len(h) == 1:
line = "Case #%d: %d" % (t+1,h[0])
out_file.write(line)
out_file.write('\n')
in_file.close()
out_file.close()
return
solver(in_txt, out_txt)
| [
"[email protected]"
] | |
013ed97a9e4b87e7c35ccbdd5a03e89df9b11bf3 | 469318e9a1ff8966199529affa1e4900a9f13ff0 | /src/structured_data/_adt/sum_type.py | b356eb5a8c1981146ea9e7e04d39ee2dc238e3e9 | [
"MIT"
] | permissive | mwchase/python-structured-data | 06e968a96d3953bd6f585aad66e3bea8921106e2 | 1059ddaf0728610576299f30485fad221cf5695c | refs/heads/master | 2021-06-13T08:41:46.148869 | 2021-02-23T18:37:26 | 2021-02-23T18:37:26 | 136,821,798 | 2 | 0 | MIT | 2020-02-05T21:59:00 | 2018-06-10T15:40:41 | Python | UTF-8 | Python | false | false | 4,464 | py | """Internal implementation of the Sum base class."""
from __future__ import annotations
import typing
from .. import _cant_modify
from . import constructor
from . import ordering
from . import prewritten_methods
from . import product_type
_T = typing.TypeVar("_T")
def _conditional_call(call: bool, func: typing.Callable, *args: typing.Any) -> None:
if call:
func(*args)
def _set_new_functions(cls: type, *functions: typing.Callable) -> typing.Optional[str]:
"""Attempt to set the attributes corresponding to the functions on cls.
If any attributes are already defined, fail *before* setting any, and
return the already-defined name.
"""
cant_set = product_type.cant_set_new_functions(cls, *functions)
if cant_set:
return cant_set
for function in functions:
setattr(
cls,
product_type.name_(cls, typing.cast(product_type.MethodLike, function)),
function,
)
return None
def _sum_new(_cls: typing.Type[_T], subclasses: typing.FrozenSet[type]) -> None:
def base(cls: typing.Type[_T], args: tuple) -> _T:
# By the way, this is for https://github.com/python/mypy/issues/7580
# When that's fixed, this can be made a one-liner again.
superclass = super(_cls, cls)
return superclass.__new__(cls, args) # type: ignore
new = vars(_cls).get("__new__", staticmethod(base))
def __new__(cls: typing.Type[_T], args: tuple) -> _T:
if cls not in subclasses:
raise TypeError
return new.__get__(None, cls)(cls, args)
_cls.__new__ = staticmethod(__new__) # type: ignore
class Sum(constructor.SumBase):
"""Base class of classes with disjoint constructors.
Examines PEP 526 __annotations__ to determine subclasses.
If repr is true, a __repr__() method is added to the class.
If order is true, rich comparison dunder methods are added.
The Sum class examines the class to find Ctor annotations.
A Ctor annotation is the adt.Ctor class itself, or the result of indexing
the class, either with a single type hint, or a tuple of type hints.
All other annotations are ignored.
The subclass is not subclassable, but has subclasses at each of the
names that had Ctor annotations. Each subclass takes a fixed number of
arguments, corresponding to the type hints given to its annotation, if any.
"""
__slots__ = ()
def __new__(cls, /, *args: typing.Any, **kwargs: typing.Any) -> Sum: # noqa: E225
if not issubclass(cls, constructor.ADTConstructor):
raise TypeError
return super().__new__(cls, *args, **kwargs)
# Both of these are for consistency with modules defined in the stdlib.
# BOOM!
def __init_subclass__(
cls: type,
*,
repr: bool = True, # pylint: disable=redefined-builtin
eq: bool = True, # pylint: disable=invalid-name
order: bool = False,
**kwargs: typing.Any,
) -> None:
super().__init_subclass__(**kwargs) # type: ignore
if issubclass(cls, constructor.ADTConstructor):
return
ordering.ordering_options_are_valid(eq=eq, order=order)
prewritten_methods.SUBCLASS_ORDER[cls] = constructor.make_constructors(cls)
source = prewritten_methods.PrewrittenSumMethods
cls.__init_subclass__ = source.__init_subclass__ # type: ignore
_sum_new(cls, frozenset(prewritten_methods.SUBCLASS_ORDER[cls]))
_conditional_call(repr, _set_new_functions, cls, source.__repr__)
equality_methods_were_set = eq and not _set_new_functions(
cls, source.__eq__, source.__ne__
)
if equality_methods_were_set:
cls.__hash__ = source.__hash__ # type: ignore
ordering.raise_for_collision(
(
order
and ordering.can_set_ordering(can_set=equality_methods_were_set)
and _set_new_functions(
cls, source.__lt__, source.__le__, source.__gt__, source.__ge__
)
),
cls.__name__,
)
def __bool__(self) -> bool:
return True
def __setattr__(self, name: str, value: typing.Any) -> None:
_cant_modify.guard(self, name)
super().__setattr__(name, value)
def __delattr__(self, name: str) -> None:
_cant_modify.guard(self, name)
super().__delattr__(name)
| [
"[email protected]"
] | |
7ed1a8bc8bf59dbe6985ba6d4568c3994d6222d7 | b45b3e5e7389d071161fa52340cb119a29c76907 | /ieWin_test.py | ea4039f4389737ebc027d2b25c466e520fb3c2e2 | [] | no_license | Metallicow/wxPythonDemos | 2fc6882a11a0aa6bb35c42f163cfcd6b3456f4fd | 396d1ade5930528ec7518b9c22dc93a274cb418f | refs/heads/master | 2020-12-25T11:52:18.577898 | 2013-05-19T18:58:11 | 2013-05-19T18:58:11 | 11,283,970 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | #!/usr/bin/env
import wx
if wx.Platform == '__WXMSW__':
import wx.lib.iewin as iewin
else:
raise ImporrError("This test only works on windows")
class DemoFrame(wx.Frame):
""" This window displays a button """
def __init__(self, title = "Micro App"):
wx.Frame.__init__(self, None , -1, title)
btn = wx.Button(self, label = "Get HTML")
btn.Bind(wx.EVT_BUTTON, self.GetHTML )
self.Bind(wx.EVT_CLOSE, self.GetHTML)
self.htwin = iewin.IEHtmlWindow(self)
self.htwin.Navigate('http://cameochemicals.noaa.gov/')
S = wx.BoxSizer(wx.VERTICAL)
S.Add(btn, 0, wx.ALL, 5)
S.Add(self.htwin, 1, wx.EXPAND)
self.SetSizer(S)
self.SetSize((700,500))
self.Bind(wx.EVT_CLOSE, self.OnQuit)
def OnQuit(self,Event):
self.Destroy()
def GetHTML(self, event=None):
print "contents of HTML window as text: ", self.htwin.GetText(asHTML=False)[:500]
print "contents of HTML window as html: ", self.htwin.GetText(asHTML=True)
app = wx.App(False)
frame = DemoFrame()
frame.Show()
app.MainLoop()
| [
"[email protected]"
] | |
cba1ccfc085d6e62bbd832f0e0ea224fd7d50d46 | f5dae529fa0a42678cbf0261e227e45101317034 | /test/backward_compatibility/check_backward_compatibility.py | 0f45a263b874e529e5b5f54e3d37924c58914a83 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | kamiedu/pytorch | 0fa1d28f4332bf1fd2bb93169254f2bcc2c4d039 | 54a1e8509c9e88200139a37a7dd3a86660849591 | refs/heads/master | 2022-05-22T08:55:36.432225 | 2020-04-17T20:33:42 | 2020-04-17T20:36:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,535 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import datetime
import re
import sys
import torch
from torch._C import parse_schema
# The date specifies how long the whitelist exclusion should apply to.
#
# - If we NEVER give BC guarantee for an operator, you can put the
# date arbitrarily far in the future.
# - Otherwise, pick a date that is far enough in the future that you
# believe you can land your diff before then.
#
# Whitelist entries can be removed after the date listed on them passes.
white_list = [
('c10_experimental', datetime.date(2222, 1, 1)),
# We export some functions and classes for test_jit.py directly from libtorch.so,
# it's not important to have BC for them
('_TorchScriptTesting.*', datetime.date(9999, 1, 1)),
('aten::append*', datetime.date(2020, 4, 15)),
('aten::real*', datetime.date(2020, 4, 15)),
('aten::imag*', datetime.date(2020, 4, 15)),
('aten::quantize_per_tensor', datetime.date(2020, 4, 15)),
('aten::index_put', datetime.date(2020, 4, 10)),
('aten::index', datetime.date(2020, 4, 10)),
('aten::_index_put_impl', datetime.date(2020, 4, 10)),
('aten::index_put_', datetime.date(2020, 4, 10)),
('aten::quantize_per_tensor', datetime.date(2020, 4, 15)),
('aten::requires_grad_', datetime.date(2020, 4, 30)),
('quantized::batch_norm', datetime.date(2020, 4, 20)),
('aten::sizes', datetime.date(2020, 4, 30)),
('aten::strides', datetime.date(2020, 4, 30)),
('aten::backward', datetime.date(2020, 4, 30)),
]
# The nightly will fail to parse newly added syntax to schema declarations
# Add new schemas that will fail the nightly here
dont_parse_list = [
]
def white_listed(schema, white_list):
for item in white_list:
if item[1] < datetime.date.today():
continue
regexp = re.compile(item[0])
if regexp.search(schema.name):
return True
return False
def dont_parse(schema_line):
for item in dont_parse_list:
if item[1] < datetime.date.today():
continue
regexp = re.compile(item[0])
if regexp.search(schema_line):
return True
return False
def check_bc(new_schema_dict):
existing_schemas = torch._C._jit_get_all_schemas()
is_bc = True
broken_ops = []
for existing_schema in existing_schemas:
if white_listed(existing_schema, white_list):
print("Black list, skipping schema: ", str(existing_schema))
continue
print("processing existing schema: ", str(existing_schema))
new_schemas = new_schema_dict.get(existing_schema.name, [])
found = False
for new_schema in new_schemas:
if new_schema.is_backward_compatible_with(existing_schema):
found = True
break
if not found:
print('Can NOT find backward compatible schemas after changes '
'for schema {} from the following candidates:\n[\n{}\n]'
.format(
str(existing_schema),
"\n\t".join(str(s) for s in new_schemas)))
# TODO Print out more details about why candidates don't match.
broken_ops.append(str(existing_schema))
is_bc = False
if is_bc:
print('Found backward compatible schemas for all existing schemas')
else:
print('The PR is introducing backward incompatible changes to the '
'operator library. Please contact PyTorch team to confirm '
'whether this change is wanted or not. \n\nBroken ops: '
'[\n\t{}\n]'.format("\n\t".join(broken_ops)))
return is_bc
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument(
'--new-schemas',
help='filename to load new schemas',
type=str,
default='schemas.txt')
args = parser.parse_args()
new_schema_dict = dict()
with open(args.new_schemas, 'r') as f:
while True:
line = f.readline()
if not line:
break
if dont_parse(line.strip()):
print("Not parsing schema line: ", line.strip())
continue
s = parse_schema(line.strip())
slist = new_schema_dict.get(s.name, [])
slist.append(s)
new_schema_dict[s.name] = slist
if not check_bc(new_schema_dict):
sys.exit(1)
| [
"[email protected]"
] | |
2bbe7e0429fc6751c73c8cb1ffefd9c7c1009213 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/avs/v20211201/get_private_cloud.py | 208fc611d3549dec83e38715f8fd3279d53fb514 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,826 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPrivateCloudResult',
'AwaitableGetPrivateCloudResult',
'get_private_cloud',
'get_private_cloud_output',
]
@pulumi.output_type
class GetPrivateCloudResult:
"""
A private cloud resource
"""
def __init__(__self__, availability=None, circuit=None, encryption=None, endpoints=None, external_cloud_links=None, id=None, identity=None, identity_sources=None, internet=None, location=None, management_cluster=None, management_network=None, name=None, network_block=None, nsxt_certificate_thumbprint=None, nsxt_password=None, provisioning_network=None, provisioning_state=None, secondary_circuit=None, sku=None, tags=None, type=None, vcenter_certificate_thumbprint=None, vcenter_password=None, vmotion_network=None):
if availability and not isinstance(availability, dict):
raise TypeError("Expected argument 'availability' to be a dict")
pulumi.set(__self__, "availability", availability)
if circuit and not isinstance(circuit, dict):
raise TypeError("Expected argument 'circuit' to be a dict")
pulumi.set(__self__, "circuit", circuit)
if encryption and not isinstance(encryption, dict):
raise TypeError("Expected argument 'encryption' to be a dict")
pulumi.set(__self__, "encryption", encryption)
if endpoints and not isinstance(endpoints, dict):
raise TypeError("Expected argument 'endpoints' to be a dict")
pulumi.set(__self__, "endpoints", endpoints)
if external_cloud_links and not isinstance(external_cloud_links, list):
raise TypeError("Expected argument 'external_cloud_links' to be a list")
pulumi.set(__self__, "external_cloud_links", external_cloud_links)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if identity_sources and not isinstance(identity_sources, list):
raise TypeError("Expected argument 'identity_sources' to be a list")
pulumi.set(__self__, "identity_sources", identity_sources)
if internet and not isinstance(internet, str):
raise TypeError("Expected argument 'internet' to be a str")
pulumi.set(__self__, "internet", internet)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if management_cluster and not isinstance(management_cluster, dict):
raise TypeError("Expected argument 'management_cluster' to be a dict")
pulumi.set(__self__, "management_cluster", management_cluster)
if management_network and not isinstance(management_network, str):
raise TypeError("Expected argument 'management_network' to be a str")
pulumi.set(__self__, "management_network", management_network)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_block and not isinstance(network_block, str):
raise TypeError("Expected argument 'network_block' to be a str")
pulumi.set(__self__, "network_block", network_block)
if nsxt_certificate_thumbprint and not isinstance(nsxt_certificate_thumbprint, str):
raise TypeError("Expected argument 'nsxt_certificate_thumbprint' to be a str")
pulumi.set(__self__, "nsxt_certificate_thumbprint", nsxt_certificate_thumbprint)
if nsxt_password and not isinstance(nsxt_password, str):
raise TypeError("Expected argument 'nsxt_password' to be a str")
pulumi.set(__self__, "nsxt_password", nsxt_password)
if provisioning_network and not isinstance(provisioning_network, str):
raise TypeError("Expected argument 'provisioning_network' to be a str")
pulumi.set(__self__, "provisioning_network", provisioning_network)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if secondary_circuit and not isinstance(secondary_circuit, dict):
raise TypeError("Expected argument 'secondary_circuit' to be a dict")
pulumi.set(__self__, "secondary_circuit", secondary_circuit)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if vcenter_certificate_thumbprint and not isinstance(vcenter_certificate_thumbprint, str):
raise TypeError("Expected argument 'vcenter_certificate_thumbprint' to be a str")
pulumi.set(__self__, "vcenter_certificate_thumbprint", vcenter_certificate_thumbprint)
if vcenter_password and not isinstance(vcenter_password, str):
raise TypeError("Expected argument 'vcenter_password' to be a str")
pulumi.set(__self__, "vcenter_password", vcenter_password)
if vmotion_network and not isinstance(vmotion_network, str):
raise TypeError("Expected argument 'vmotion_network' to be a str")
pulumi.set(__self__, "vmotion_network", vmotion_network)
@property
@pulumi.getter
def availability(self) -> Optional['outputs.AvailabilityPropertiesResponse']:
"""
Properties describing how the cloud is distributed across availability zones
"""
return pulumi.get(self, "availability")
@property
@pulumi.getter
def circuit(self) -> Optional['outputs.CircuitResponse']:
"""
An ExpressRoute Circuit
"""
return pulumi.get(self, "circuit")
@property
@pulumi.getter
def encryption(self) -> Optional['outputs.EncryptionResponse']:
"""
Customer managed key encryption, can be enabled or disabled
"""
return pulumi.get(self, "encryption")
@property
@pulumi.getter
def endpoints(self) -> 'outputs.EndpointsResponse':
"""
The endpoints
"""
return pulumi.get(self, "endpoints")
@property
@pulumi.getter(name="externalCloudLinks")
def external_cloud_links(self) -> Sequence[str]:
"""
Array of cloud link IDs from other clouds that connect to this one
"""
return pulumi.get(self, "external_cloud_links")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.PrivateCloudIdentityResponse']:
"""
The identity of the private cloud, if configured.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="identitySources")
def identity_sources(self) -> Optional[Sequence['outputs.IdentitySourceResponse']]:
"""
vCenter Single Sign On Identity Sources
"""
return pulumi.get(self, "identity_sources")
@property
@pulumi.getter
def internet(self) -> Optional[str]:
"""
Connectivity to internet is enabled or disabled
"""
return pulumi.get(self, "internet")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="managementCluster")
def management_cluster(self) -> 'outputs.ManagementClusterResponse':
"""
The default cluster used for management
"""
return pulumi.get(self, "management_cluster")
@property
@pulumi.getter(name="managementNetwork")
def management_network(self) -> str:
"""
Network used to access vCenter Server and NSX-T Manager
"""
return pulumi.get(self, "management_network")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkBlock")
def network_block(self) -> str:
"""
The block of addresses should be unique across VNet in your subscription as well as on-premise. Make sure the CIDR format is conformed to (A.B.C.D/X) where A,B,C,D are between 0 and 255, and X is between 0 and 22
"""
return pulumi.get(self, "network_block")
@property
@pulumi.getter(name="nsxtCertificateThumbprint")
def nsxt_certificate_thumbprint(self) -> str:
"""
Thumbprint of the NSX-T Manager SSL certificate
"""
return pulumi.get(self, "nsxt_certificate_thumbprint")
@property
@pulumi.getter(name="nsxtPassword")
def nsxt_password(self) -> Optional[str]:
"""
Optionally, set the NSX-T Manager password when the private cloud is created
"""
return pulumi.get(self, "nsxt_password")
@property
@pulumi.getter(name="provisioningNetwork")
def provisioning_network(self) -> str:
"""
Used for virtual machine cold migration, cloning, and snapshot migration
"""
return pulumi.get(self, "provisioning_network")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="secondaryCircuit")
def secondary_circuit(self) -> Optional['outputs.CircuitResponse']:
"""
A secondary expressRoute circuit from a separate AZ. Only present in a stretched private cloud
"""
return pulumi.get(self, "secondary_circuit")
@property
@pulumi.getter
def sku(self) -> 'outputs.SkuResponse':
"""
The private cloud SKU
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="vcenterCertificateThumbprint")
def vcenter_certificate_thumbprint(self) -> str:
"""
Thumbprint of the vCenter Server SSL certificate
"""
return pulumi.get(self, "vcenter_certificate_thumbprint")
@property
@pulumi.getter(name="vcenterPassword")
def vcenter_password(self) -> Optional[str]:
"""
Optionally, set the vCenter admin password when the private cloud is created
"""
return pulumi.get(self, "vcenter_password")
@property
@pulumi.getter(name="vmotionNetwork")
def vmotion_network(self) -> str:
"""
Used for live migration of virtual machines
"""
return pulumi.get(self, "vmotion_network")
class AwaitableGetPrivateCloudResult(GetPrivateCloudResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateCloudResult(
availability=self.availability,
circuit=self.circuit,
encryption=self.encryption,
endpoints=self.endpoints,
external_cloud_links=self.external_cloud_links,
id=self.id,
identity=self.identity,
identity_sources=self.identity_sources,
internet=self.internet,
location=self.location,
management_cluster=self.management_cluster,
management_network=self.management_network,
name=self.name,
network_block=self.network_block,
nsxt_certificate_thumbprint=self.nsxt_certificate_thumbprint,
nsxt_password=self.nsxt_password,
provisioning_network=self.provisioning_network,
provisioning_state=self.provisioning_state,
secondary_circuit=self.secondary_circuit,
sku=self.sku,
tags=self.tags,
type=self.type,
vcenter_certificate_thumbprint=self.vcenter_certificate_thumbprint,
vcenter_password=self.vcenter_password,
vmotion_network=self.vmotion_network)
def get_private_cloud(private_cloud_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateCloudResult:
"""
A private cloud resource
:param str private_cloud_name: Name of the private cloud
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['privateCloudName'] = private_cloud_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:avs/v20211201:getPrivateCloud', __args__, opts=opts, typ=GetPrivateCloudResult).value
return AwaitableGetPrivateCloudResult(
availability=__ret__.availability,
circuit=__ret__.circuit,
encryption=__ret__.encryption,
endpoints=__ret__.endpoints,
external_cloud_links=__ret__.external_cloud_links,
id=__ret__.id,
identity=__ret__.identity,
identity_sources=__ret__.identity_sources,
internet=__ret__.internet,
location=__ret__.location,
management_cluster=__ret__.management_cluster,
management_network=__ret__.management_network,
name=__ret__.name,
network_block=__ret__.network_block,
nsxt_certificate_thumbprint=__ret__.nsxt_certificate_thumbprint,
nsxt_password=__ret__.nsxt_password,
provisioning_network=__ret__.provisioning_network,
provisioning_state=__ret__.provisioning_state,
secondary_circuit=__ret__.secondary_circuit,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type,
vcenter_certificate_thumbprint=__ret__.vcenter_certificate_thumbprint,
vcenter_password=__ret__.vcenter_password,
vmotion_network=__ret__.vmotion_network)
@_utilities.lift_output_func(get_private_cloud)
def get_private_cloud_output(private_cloud_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateCloudResult]:
"""
A private cloud resource
:param str private_cloud_name: Name of the private cloud
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
| [
"[email protected]"
] | |
6e40e95fe1174dabcfdcd931cd6b707d1b097850 | caef61baf7fc3f933d2fca8dceb7598be22adde1 | /openapi_core/templating/datatypes.py | 7087d9e345fd420f3213ea6bfee0e565185b661b | [
"BSD-3-Clause"
] | permissive | dlarrick/openapi-core | 679835b749d49f15da61f6f14df060e08010fee6 | 0865a4f54f38bdbe4a0de11addfa425c302aedfd | refs/heads/master | 2021-05-24T14:22:24.708899 | 2021-03-31T15:25:18 | 2021-03-31T15:25:18 | 253,604,031 | 0 | 0 | BSD-3-Clause | 2020-04-06T20:07:12 | 2020-04-06T20:07:11 | null | UTF-8 | Python | false | false | 281 | py | import attr
@attr.s
class TemplateResult(object):
pattern = attr.ib(default=None)
variables = attr.ib(default=None)
@property
def resolved(self):
if not self.variables:
return self.pattern
return self.pattern.format(**self.variables)
| [
"[email protected]"
] | |
59c5558142ea9d114fe38247f09d705fa9cdba2d | c4a33b613ffc77dccf96d33c3a5cc127405c0e95 | /life/views.py | 4a22d705c1485f87c2059c53d66383416843042d | [] | no_license | tsokac2/new-irish-life | 25f49bd0b74dfa7c0a449772249f6cb51925b643 | d09934b60a1fd4fbd4540d412dc5dab726f5b502 | refs/heads/main | 2023-07-02T09:54:55.082587 | 2021-07-30T04:42:57 | 2021-07-30T04:42:57 | 379,245,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | from django.shortcuts import render
from .models import Life
def life(request):
""" A view to return the Life template """
life_section = Life.objects.all()
template = 'life/life.html'
context = {
'life_section': life_section
}
return render(request, template, context) | [
"[email protected]"
] | |
12d069604d83d095a9d0ccc9988b33beb79b899f | 30b0a47d6373f79533c8e1a641a4c781966507ca | /OneNet/fvcore-master/tests/test_transform.py | e54d368f16e8463adf9d1dba12942d012d64464c | [
"MIT",
"Apache-2.0"
] | permissive | solicucu/detectron2_onenet | ae59a7ba13f86ba3b9c117f8e99dfab3298b2e5a | b9ed8a72720f597d65666c7f18b9a00724e3c7e9 | refs/heads/master | 2023-03-15T15:42:18.951793 | 2021-03-08T13:03:42 | 2021-03-08T13:03:42 | 345,655,710 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 40,125 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import itertools
import unittest
from typing import Any, Tuple
import numpy as np
import torch
from fvcore.transforms import transform as T
from fvcore.transforms.transform_util import to_float_tensor, to_numpy
# pyre-ignore-all-errors
class TestTransforms(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
np.random.seed(42)
def test_register(self):
"""
Test register.
"""
dtype = "int"
def add1(t, x):
return x + 1
def flip_sub_width(t, x):
return x - t.width
T.Transform.register_type(dtype, add1)
T.HFlipTransform.register_type(dtype, flip_sub_width)
transforms = T.TransformList(
[
T.ScaleTransform(0, 0, 0, 0, 0),
T.CropTransform(0, 0, 0, 0),
T.HFlipTransform(3),
]
)
self.assertEqual(transforms.apply_int(3), 2)
# Testing __add__, __iadd__, __radd__, __len__.
transforms = transforms + transforms
transforms += transforms
transforms = T.NoOpTransform() + transforms
self.assertEqual(len(transforms), 13)
with self.assertRaises(AssertionError):
T.HFlipTransform.register_type(dtype, lambda x: 1)
with self.assertRaises(AttributeError):
transforms.no_existing
def test_register_with_decorator(self):
"""
Test register using decorator.
"""
dtype = "float"
@T.HFlipTransform.register_type(dtype)
def add1(t, x):
return x + 1
transforms = T.TransformList([T.HFlipTransform(3)])
self.assertEqual(transforms.apply_float(3), 4)
def test_noop_transform_no_register(self):
"""
NoOpTransform does not need register - it's by default no-op.
"""
t = T.NoOpTransform()
self.assertEqual(t.apply_anything(1), 1)
@staticmethod
def BlendTransform_img_gt(img, *args) -> Tuple[np.ndarray, list]:
"""
Given the input array, return the expected output array and shape after
applying the blend transformation.
Args:
imgs (array): image(s) array before the transform.
args (list): list of arguments. Details can be found in test case.
Returns:
img (array): expected output array after apply the transformation.
(list): expected shape of the output array.
"""
src_image, src_weight, dst_weight = args
if img.dtype == np.uint8:
img = img.astype(np.float32)
img = src_weight * src_image + dst_weight * img
img = np.clip(img, 0, 255).astype(np.uint8)
else:
img = src_weight * src_image + dst_weight * img
return img, img.shape
@staticmethod
def CropTransform_img_gt(imgs, *args) -> Tuple[np.ndarray, list]:
"""
Given the input array, return the expected output array and shape after
applying the crop transformation.
Args:
imgs (array): image(s) array before the transform.
args (list): list of arguments. Details can be found in test case.
Returns:
img (array): expected output array after apply the transformation.
(list): expected shape of the output array.
"""
x0, y0, w, h = args
if len(imgs.shape) <= 3:
ret = imgs[y0 : y0 + h, x0 : x0 + w]
else:
ret = imgs[..., y0 : y0 + h, x0 : x0 + w, :]
return ret, ret.shape
@staticmethod
def GridSampleTransform_img_gt(imgs, *args) -> Tuple[np.ndarray, list]:
"""
Given the input array, return the expected output array and shape after
applying the grid sampling transformation. Currently only dummy gt is
prepared.
Args:
imgs (array): image(s) array before the transform.
args (list): list of arguments. Details can be found in test case.
Returns:
img (array): expected output array after apply the transformation.
(list): expected shape of the output array.
"""
return imgs, imgs.shape
@staticmethod
def VFlipTransform_img_gt(imgs, *args) -> Tuple[np.ndarray, list]:
"""
Given the input array, return the expected output array and shape after
applying the vertical flip transformation.
Args:
imgs (array): image(s) array before the transform.
args (list): list of arguments. Details can be found in test case.
Returns:
img (array): expected output array after apply the transformation.
(list): expected shape of the output array.
"""
if len(imgs.shape) <= 3:
# HxW or HxWxC.
return imgs[::-1, :], imgs.shape
else:
# TxHxWxC.
return imgs[:, ::-1, :], imgs.shape
@staticmethod
def HFlipTransform_img_gt(imgs, *args) -> Tuple[np.ndarray, list]:
"""
Given the input array, return the expected output array and shape after
applying the horizontal flip transformation.
Args:
imgs (array): image(s) array before the transform.
args (list): list of arguments. Details can be found in test case.
Returns:
img (array): expected output array after apply the transformation.
(list): expected shape of the output array.
"""
if len(imgs.shape) <= 3:
# HxW or HxWxC.
return imgs[:, ::-1], imgs.shape
else:
# TxHxWxC.
return imgs[:, :, ::-1], imgs.shape
@staticmethod
def NoOpTransform_img_gt(imgs, *args) -> Tuple[np.ndarray, list]:
"""
Given the input array, return the expected output array and shape after
applying no transformation.
Args:
imgs (array): image(s) array before the transform.
args (list): list of arguments. Details can be found in test case.
Returns:
img (array): expected output array after apply the transformation.
(list): expected shape of the output array.
"""
return imgs, imgs.shape
@staticmethod
def ScaleTransform_img_gt(imgs, *args) -> Tuple[Any, Any]:
"""
Given the input array, return the expected output array and shape after
applying the resize transformation.
Args:
imgs (array): image(s) array before the transform.
args (list): list of arguments. Details can be found in test case.
Returns:
img (array): expected output array after apply the transformation.
None means does not have expected output array for sanity check.
(list): expected shape of the output array. None means does not have
expected output shape for sanity check.
"""
h, w, new_h, new_w, interp = args
float_tensor = to_float_tensor(imgs)
if interp == "nearest":
if float_tensor.dim() == 3:
float_tensor = torch._C._nn.upsample_nearest1d(
float_tensor, (new_h, new_w)
)
elif float_tensor.dim() == 4:
float_tensor = torch._C._nn.upsample_nearest2d(
float_tensor, (new_h, new_w)
)
elif float_tensor.dim() == 5:
float_tensor = torch._C._nn.upsample_nearest3d(
float_tensor, (new_h, new_w)
)
else:
return None, None
elif interp == "bilinear":
if float_tensor.dim() == 4:
float_tensor = torch._C._nn.upsample_bilinear2d(
float_tensor, (new_h, new_w), False
)
else:
return None, None
numpy_tensor = to_numpy(float_tensor, imgs.shape, imgs.dtype)
return numpy_tensor, numpy_tensor.shape
@staticmethod
def _seg_provider(n: int = 8, h: int = 10, w: int = 10) -> np.ndarray:
"""
Provide different segmentations as test cases.
Args:
n (int): number of points to generate in the image as segmentations.
h, w (int): height and width dimensions.
Returns:
(np.ndarray): the segmentation to test on.
"""
# Prepare random segmentation as test cases.
for _ in range(n):
yield np.random.randint(2, size=(h, w))
@staticmethod
def _img_provider(
n: int = 8, c: int = 3, h: int = 10, w: int = 10
) -> Tuple[np.ndarray, type, str]:
"""
Provide different image inputs as test cases.
Args:
n, c, h, w (int): batch, channel, height, and width dimensions.
Returns:
(np.ndarray): an image to test on.
(type): type of the current array.
(str): string to represent the shape. Options include `hw`, `hwc`,
`nhwc`.
"""
# Prepare mesh grid as test case.
img_h_grid, img_w_grid = np.mgrid[0 : h * 2 : 2, 0 : w * 2 : 2]
img_hw_grid = img_h_grid * w + img_w_grid
img_hwc_grid = np.repeat(img_hw_grid[:, :, None], c, axis=2)
img_nhwc_grid = np.repeat(img_hwc_grid[None, :, :, :], n, axis=0)
for b in range(img_nhwc_grid.shape[0]):
img_nhwc_grid[b] = img_nhwc_grid[b] + b
# Prepare random array as test case.
img_hw_random = np.random.rand(h, w)
img_hwc_random = np.random.rand(h, w, c)
img_nhwc_random = np.random.rand(n, h, w, c)
for array_type, input_shape, init in itertools.product(
[np.uint8, np.float32], ["hw", "hwc", "nhwc"], ["grid", "random"]
):
yield locals()["img_{}_{}".format(input_shape, init)].astype(
array_type
), array_type, input_shape
def test_abstract(self):
with self.assertRaises(TypeError):
T.Transform()
def test_blend_img_transforms(self):
"""
Test BlendTransform.
"""
_trans_name = "BlendTransform"
blend_src_hw = np.ones((10, 10))
blend_src_hwc = np.ones((10, 10, 3))
blend_src_nhwc = np.ones((8, 10, 10, 3))
for img, array_type, shape_str in TestTransforms._img_provider():
blend_src = locals()["blend_src_{}".format(shape_str)].astype(array_type)
params = (
(blend_src, 0.0, 1.0),
(blend_src, 0.3, 0.7),
(blend_src, 0.5, 0.5),
(blend_src, 0.7, 0.3),
(blend_src, 1.0, 0.0),
)
for param in params:
gt_transformer = getattr(self, "{}_img_gt".format(_trans_name))
transformer = getattr(T, _trans_name)(*param)
result = transformer.apply_image(img)
img_gt, shape_gt = gt_transformer(img, *param)
self.assertEqual(
shape_gt,
result.shape,
"transform {} failed to pass the shape check with"
"params {} given input with shape {} and type {}".format(
_trans_name, param, shape_str, array_type
),
)
self.assertTrue(
np.allclose(result, img_gt),
"transform {} failed to pass the value check with"
"params {} given input with shape {} and type {}".format(
_trans_name, param, shape_str, array_type
),
)
def test_crop_img_transforms(self):
"""
Test CropTransform..
"""
_trans_name = "CropTransform"
params = (
(0, 0, 0, 0),
(0, 0, 1, 1),
(0, 0, 6, 1),
(0, 0, 1, 6),
(0, 0, 6, 6),
(1, 3, 6, 6),
(3, 1, 6, 6),
(3, 3, 6, 6),
(6, 6, 6, 6),
)
for (img, array_type, shape_str), param in itertools.product(
TestTransforms._img_provider(), params
):
gt_transformer = getattr(self, "{}_img_gt".format(_trans_name))
transformer = getattr(T, _trans_name)(*param)
result = transformer.apply_image(img)
img_gt, shape_gt = gt_transformer(img, *param)
self.assertEqual(
shape_gt,
result.shape,
"transform {} failed to pass the shape check with"
"params {} given input with shape {} and type {}".format(
_trans_name, param, shape_str, array_type
),
)
self.assertTrue(
np.allclose(result, img_gt),
"transform {} failed to pass the value check with"
"params {} given input with shape {} and type {}".format(
_trans_name, param, shape_str, array_type
),
)
def test_vflip_img_transforms(self):
"""
Test VFlipTransform..
"""
_trans_name = "VFlipTransform"
params = ((0,), (1,))
for (img, array_type, shape_str), param in itertools.product(
TestTransforms._img_provider(), params
):
gt_transformer = getattr(self, "{}_img_gt".format(_trans_name))
transformer = getattr(T, _trans_name)(*param)
result = transformer.apply_image(img)
img_gt, shape_gt = gt_transformer(img, *param)
self.assertEqual(
shape_gt,
result.shape,
"transform {} failed to pass the shape check with"
"params {} given input with shape {} and type {}".format(
_trans_name, param, shape_str, array_type
),
)
self.assertTrue(
np.allclose(result, img_gt),
"transform {} failed to pass the value check with"
"params {} given input with shape {} and type {}.\n"
"Output: {} -> {}".format(
_trans_name, param, shape_str, array_type, result, img_gt
),
)
def test_hflip_img_transforms(self):
"""
Test HFlipTransform..
"""
_trans_name = "HFlipTransform"
params = ((0,), (1,))
for (img, array_type, shape_str), param in itertools.product(
TestTransforms._img_provider(), params
):
gt_transformer = getattr(self, "{}_img_gt".format(_trans_name))
transformer = getattr(T, _trans_name)(*param)
result = transformer.apply_image(img)
img_gt, shape_gt = gt_transformer(img, *param)
self.assertEqual(
shape_gt,
result.shape,
"transform {} failed to pass the shape check with"
"params {} given input with shape {} and type {}".format(
_trans_name, param, shape_str, array_type
),
)
self.assertTrue(
np.allclose(result, img_gt),
"transform {} failed to pass the value check with"
"params {} given input with shape {} and type {}.\n"
"Output: {} -> {}".format(
_trans_name, param, shape_str, array_type, result, img_gt
),
)
def test_no_op_img_transforms(self):
"""
Test NoOpTransform..
"""
_trans_name = "NoOpTransform"
params = ()
for (img, array_type, shape_str), param in itertools.product(
TestTransforms._img_provider(), params
):
gt_transformer = getattr(self, "{}_img_gt".format(_trans_name))
transformer = getattr(T, _trans_name)(*param)
result = transformer.apply_image(img)
img_gt, shape_gt = gt_transformer(img, *param)
self.assertEqual(
shape_gt,
result.shape,
"transform {} failed to pass the shape check with"
"params {} given input with shape {} and type {}".format(
_trans_name, param, shape_str, array_type
),
)
self.assertTrue(
np.allclose(result, img_gt),
"transform {} failed to pass the value check with"
"params {} given input with shape {} and type {}".format(
_trans_name, param, shape_str, array_type
),
)
def test_scale_img_transforms(self):
"""
Test ScaleTransform.
"""
_trans_name = "ScaleTransform"
# Testing success cases.
params = (
(10, 20, 20, 20, "nearest"),
(10, 20, 10, 20, "nearest"),
(10, 20, 20, 10, "nearest"),
(10, 20, 1, 1, "nearest"),
(10, 20, 3, 3, "nearest"),
(10, 20, 5, 10, "nearest"),
(10, 20, 10, 5, "nearest"),
(10, 20, 20, 20, "bilinear"),
(10, 20, 10, 20, "bilinear"),
(10, 20, 20, 10, "bilinear"),
(10, 20, 1, 1, "bilinear"),
(10, 20, 3, 3, "bilinear"),
(10, 20, 5, 10, "bilinear"),
(10, 20, 10, 5, "bilinear"),
)
for (img, array_type, shape_str), param in itertools.product(
TestTransforms._img_provider(h=10, w=20), params
):
gt_transformer = getattr(self, "{}_img_gt".format(_trans_name))
transformer = getattr(T, _trans_name)(*param)
result = transformer.apply_image(img)
img_gt, shape_gt = gt_transformer(img, *param)
if shape_gt is not None:
self.assertEqual(
shape_gt,
result.shape,
"transform {} failed to pass the shape check with"
"params {} given input with shape {} and type {}".format(
_trans_name, param, shape_str, array_type
),
)
if img_gt is not None:
self.assertTrue(
np.allclose(result, img_gt),
"transform {} failed to pass the value check with"
"params {} given input with shape {} and type {}".format(
_trans_name, param, shape_str, array_type
),
)
# Testing failure cases.
params = (
(0, 0, 20, 20, "nearest"),
(0, 0, 0, 0, "nearest"),
(-1, 0, 0, 0, "nearest"),
(0, -1, 0, 0, "nearest"),
(0, 0, -1, 0, "nearest"),
(0, 0, 0, -1, "nearest"),
(20, 10, 0, -1, "nearest"),
)
for (img, _, _), param in itertools.product(
TestTransforms._img_provider(h=10, w=20), params
):
gt_transformer = getattr(self, "{}_img_gt".format(_trans_name))
transformer = getattr(T, _trans_name)(*param)
with self.assertRaises((RuntimeError, AssertionError)):
result = transformer.apply_image(img)
def test_grid_sample_img_transform(self):
"""
Test grid sampling tranformation.
"""
# TODO: add more complex test case for grid sample.
for interp in ["nearest"]:
grid_2d = np.stack(
np.meshgrid(np.linspace(-1, 1, 10), np.linspace(-1, 1, 10)), axis=2
).astype(np.float)
grid = np.tile(grid_2d[None, :, :, :], [8, 1, 1, 1])
transformer = T.GridSampleTransform(grid, interp)
img_h, img_w = np.mgrid[0:10:1, 0:10:1].astype(np.float)
img_hw = img_h * 10 + img_w
img_hwc = np.repeat(img_hw[:, :, None], 3, axis=2)
img_bhwc = np.repeat(img_hwc[None, :, :, :], 8, axis=0)
result = transformer.apply_image(img_bhwc)
img_gt, shape_gt = TestTransforms.GridSampleTransform_img_gt(
img_bhwc, *(grid_2d, interp)
)
self.assertEqual(shape_gt, result.shape)
self.assertTrue(np.allclose(result, img_gt))
def test_crop_polygons(self):
# Ensure that shapely produce an extra vertex at the end
# This is assumed when copping polygons
try:
import shapely.geometry as geometry
except ImportError:
return
polygon = np.asarray([3, 3.5, 11, 10.0, 38, 98, 15.0, 100.0]).reshape(-1, 2)
g = geometry.Polygon(polygon)
coords = np.asarray(g.exterior.coords)
self.assertEqual(coords[0].tolist(), coords[-1].tolist())
@staticmethod
def _coords_provider(
num_coords: int = 5,
n: int = 50,
h_max: int = 10,
h_min: int = 0,
w_max: int = 10,
w_min: int = 0,
) -> Tuple[np.ndarray, type, str]:
"""
Provide different coordinate inputs as test cases.
Args:
num_coords (int): number of coordinates to provide.
n (int): size of the batch.
h_max, h_min (int): max, min coordinate value on height dimension.
w_max, w_min (int): max, min coordinate value on width dimension.
Returns:
(np.ndarray): coordinates array of shape Nx2 to test on.
"""
for _ in range(num_coords):
yield np.concatenate(
[
np.random.randint(low=h_min, high=h_max, size=(n, 1)),
np.random.randint(low=w_min, high=w_max, size=(n, 1)),
],
axis=1,
).astype("float32")
@staticmethod
def BlendTransform_coords_gt(coords, *args) -> Tuple[np.ndarray, list]:
"""
Given the input array, return the expected output array and shape after
applying the blend transformation.
Args:
coords (array): coordinates before the transform.
args (list): list of arguments. Details can be found in test case.
Returns:
coords (array): expected output coordinates after apply the
transformation.
(list): expected shape of the output array.
"""
return coords, coords.shape
def test_blend_coords_transforms(self):
"""
Test BlendTransform.
"""
_trans_name = "BlendTransform"
for coords in TestTransforms._coords_provider(w_max=10, h_max=20):
params = (
(coords, 0.0, 1.0),
(coords, 0.3, 0.7),
(coords, 0.5, 0.5),
(coords, 0.7, 0.3),
(coords, 1.0, 0.0),
)
for param in params:
gt_transformer = getattr(self, "{}_coords_gt".format(_trans_name))
transformer = getattr(T, _trans_name)(*param)
result = transformer.apply_coords(np.copy(coords))
coords_gt, shape_gt = gt_transformer(np.copy(coords), *param)
self.assertEqual(
shape_gt,
result.shape,
"transform {} failed to pass the shape check with"
"params {} given input with shape {}".format(
_trans_name, param, result.shape
),
)
self.assertTrue(
np.allclose(result, coords_gt),
"transform {} failed to pass the value check with"
"params {} given input with shape {}".format(
_trans_name, param, result.shape
),
)
@staticmethod
def VFlipTransform_coords_gt(coords, *args) -> Tuple[np.ndarray, list]:
"""
Given the input array, return the expected output array and shape after
applying the vflip transformation.
Args:
coords (array): coordinates before the transform.
args (list): list of arguments. Details can be found in test case.
Returns:
coords (array): expected output coordinates after apply the
transformation.
(list): expected shape of the output array.
"""
height = args
coords[:, 1] = height - coords[:, 1]
return coords, coords.shape
def test_vflip_coords_transforms(self):
"""
Test VFlipTransform.
"""
_trans_name = "VFlipTransform"
params = ((20,), (30,))
for coords, param in itertools.product(
TestTransforms._coords_provider(), params
):
gt_transformer = getattr(self, "{}_coords_gt".format(_trans_name))
transformer = getattr(T, _trans_name)(*param)
result = transformer.apply_coords(np.copy(coords))
coords_gt, shape_gt = gt_transformer(np.copy(coords), *param)
self.assertEqual(
shape_gt,
result.shape,
"transform {} failed to pass the shape check with"
"params {} given input with shape {}".format(
_trans_name, param, result.shape
),
)
self.assertTrue(
np.allclose(result, coords_gt),
"transform {} failed to pass the value check with"
"params {} given input with shape {}".format(
_trans_name, param, result.shape
),
)
@staticmethod
def HFlipTransform_coords_gt(coords, *args) -> Tuple[np.ndarray, list]:
"""
Given the input array, return the expected output array and shape after
applying the hflip transformation.
Args:
coords (array): coordinates before the transform.
args (list): list of arguments. Details can be found in test case.
Returns:
coords (array): expected output coordinates after apply the
transformation.
(list): expected shape of the output array.
"""
width = args
coords[:, 0] = width - coords[:, 0]
return coords, coords.shape
def test_hflip_coords_transforms(self):
"""
Test HFlipTransform.
"""
_trans_name = "HFlipTransform"
params = ((20,), (30,))
for coords, param in itertools.product(
TestTransforms._coords_provider(), params
):
gt_transformer = getattr(self, "{}_coords_gt".format(_trans_name))
transformer = getattr(T, _trans_name)(*param)
result = transformer.apply_coords(np.copy(coords))
coords_gt, shape_gt = gt_transformer(np.copy(coords), *param)
self.assertEqual(
shape_gt,
result.shape,
"transform {} failed to pass the shape check with"
"params {} given input with shape {}".format(
_trans_name, param, result.shape
),
)
self.assertTrue(
np.allclose(result, coords_gt),
"transform {} failed to pass the value check with"
"params {} given input with shape {}".format(
_trans_name, param, result.shape
),
)
coords_inversed = transformer.inverse().apply_coords(result)
self.assertTrue(
np.allclose(coords_inversed, coords),
f"Transform {_trans_name}'s inverse fails to produce the original coordinates.",
)
@staticmethod
def CropTransform_coords_gt(coords, *args) -> Tuple[np.ndarray, list]:
"""
Given the input array, return the expected output array and shape after
applying the crop transformation.
Args:
coords (array): coordinates before the transform.
args (list): list of arguments. Details can be found in test case.
Returns:
coords (array): expected output coordinates after apply the
transformation.
(list): expected shape of the output array.
"""
x0, y0, w, h, ow, oh = args
coords[:, 0] -= x0
coords[:, 1] -= y0
return coords, coords.shape
def test_crop_coords_transforms(self):
"""
Test CropTransform.
"""
_trans_name = "CropTransform"
params = (
(0, 0, 0, 0, 10, 11),
(0, 0, 1, 1, 10, 11),
(0, 0, 6, 1, 10, 11),
(0, 0, 1, 6, 10, 11),
(0, 0, 6, 6, 10, 11),
(1, 3, 6, 6, 10, 11),
(3, 1, 6, 6, 10, 11),
(3, 3, 6, 6, 10, 11),
(6, 6, 6, 6, 10, 11),
)
for coords, param in itertools.product(
TestTransforms._coords_provider(), params
):
gt_transformer = getattr(self, "{}_coords_gt".format(_trans_name))
transformer = getattr(T, _trans_name)(*param)
result = transformer.apply_coords(np.copy(coords))
coords_gt, shape_gt = gt_transformer(np.copy(coords), *param)
self.assertEqual(
shape_gt,
result.shape,
"transform {} failed to pass the shape check with"
"params {} given input with shape {}".format(
_trans_name, param, result.shape
),
)
self.assertTrue(
np.allclose(result, coords_gt),
"transform {} failed to pass the value check with"
"params {} given input with shape {}".format(
_trans_name, param, result.shape
),
)
coords_inversed = transformer.inverse().apply_coords(result)
self.assertTrue(
np.allclose(coords_inversed, coords),
f"Transform {_trans_name}'s inverse fails to produce the original coordinates.",
)
@staticmethod
def ScaleTransform_coords_gt(coords, *args) -> Tuple[np.ndarray, list]:
"""
Given the input array, return the expected output array and shape after
applying the crop transformation.
Args:
coords (array): coordinates before the transform.
args (list): list of arguments. Details can be found in test case.
Returns:
coords (array): expected output coordinates after apply the
transformation.
(list): expected shape of the output array.
"""
h, w, new_h, new_w = args
coords[:, 0] = coords[:, 0] * (new_w * 1.0 / w)
coords[:, 1] = coords[:, 1] * (new_h * 1.0 / h)
return coords, coords.shape
def test_scale_coords_transforms(self):
"""
Test ScaleTransform.
"""
_trans_name = "ScaleTransform"
params = (
(10, 20, 20, 20),
(10, 20, 10, 20),
(10, 20, 20, 10),
(10, 20, 1, 1),
(10, 20, 3, 3),
(10, 20, 5, 10),
(10, 20, 10, 5),
)
for coords, param in itertools.product(
TestTransforms._coords_provider(), params
):
gt_transformer = getattr(self, "{}_coords_gt".format(_trans_name))
transformer = getattr(T, _trans_name)(*param)
result = transformer.apply_coords(np.copy(coords))
coords_gt, shape_gt = gt_transformer(np.copy(coords), *param)
self.assertEqual(
shape_gt,
result.shape,
"transform {} failed to pass the shape check with"
"params {} given input with shape {}".format(
_trans_name, param, result.shape
),
)
self.assertTrue(
np.allclose(result, coords_gt),
"transform {} failed to pass the value check with"
"params {} given input with shape {}".format(
_trans_name, param, result.shape
),
)
coords_inversed = transformer.inverse().apply_coords(result)
self.assertTrue(
np.allclose(coords_inversed, coords),
f"Transform {_trans_name}'s inverse fails to produce the original coordinates.",
)
@staticmethod
def BlendTransform_seg_gt(seg, *args) -> Tuple[np.ndarray, list]:
"""
Given the input segmentation, return the expected output array and shape
after applying the blend transformation.
Args:
seg (array): segmentation before the transform.
args (list): list of arguments. Details can be found in test case.
Returns:
seg (array): expected output segmentation after apply the
transformation.
(list): expected shape of the output array.
"""
return seg, seg.shape
def test_blend_seg_transforms(self):
"""
Test BlendTransform.
"""
_trans_name = "BlendTransform"
for seg in TestTransforms._seg_provider(w=10, h=20):
params = (
(seg, 0.0, 1.0),
(seg, 0.3, 0.7),
(seg, 0.5, 0.5),
(seg, 0.7, 0.3),
(seg, 1.0, 0.0),
)
for param in params:
gt_transformer = getattr(self, "{}_seg_gt".format(_trans_name))
transformer = getattr(T, _trans_name)(*param)
result = transformer.apply_segmentation(seg)
seg_gt, shape_gt = gt_transformer(seg, *param)
self.assertEqual(
shape_gt,
result.shape,
"transform {} failed to pass the shape check with"
"params {} given input with shape {}".format(
_trans_name, param, result.shape
),
)
self.assertTrue(
np.allclose(result, seg_gt),
"transform {} failed to pass the value check with"
"params {} given input with shape {}".format(
_trans_name, param, result.shape
),
)
@staticmethod
def ScaleTransform_seg_gt(seg, *args) -> Tuple[np.ndarray, list]:
"""
Given the input segmentation, return the expected output array and shape
after applying the blend transformation.
Args:
seg (array): segmentation before the transform.
args (list): list of arguments. Details can be found in test case.
Returns:
seg (array): expected output segmentation after apply the
transformation.
(list): expected shape of the output array.
"""
h, w, new_h, new_w = args
float_tensor = torch.nn.functional.interpolate(
to_float_tensor(seg),
size=(new_h, new_w),
mode="nearest",
align_corners=None,
)
numpy_tensor = to_numpy(float_tensor, seg.shape, seg.dtype)
return numpy_tensor, numpy_tensor.shape
def test_scale_seg_transforms(self):
"""
Test ScaleTransform.
"""
_trans_name = "ScaleTransform"
params = (
(10, 20, 20, 20),
(10, 20, 10, 20),
(10, 20, 20, 10),
(10, 20, 1, 1),
(10, 20, 3, 3),
(10, 20, 5, 10),
(10, 20, 10, 5),
)
for seg, param in itertools.product(
TestTransforms._seg_provider(h=10, w=20), params
):
gt_transformer = getattr(self, "{}_seg_gt".format(_trans_name))
transformer = getattr(T, _trans_name)(*param)
result = transformer.apply_segmentation(seg)
seg_gt, shape_gt = gt_transformer(seg, *param)
if shape_gt is not None:
self.assertEqual(
shape_gt,
result.shape,
"transform {} failed to pass the shape check with"
"params {} given input with shape {}".format(
_trans_name, param, result.shape
),
)
if seg_gt is not None:
self.assertTrue(
np.allclose(result, seg_gt),
"transform {} failed to pass the value check with"
"params {} given input with shape {}".format(
_trans_name, param, result.shape
),
)
# Testing failure cases.
params = (
(0, 0, 20, 20),
(0, 0, 0, 0),
(-1, 0, 0, 0),
(0, -1, 0, 0),
(0, 0, -1, 0),
(0, 0, 0, -1),
(20, 10, 0, -1),
)
for seg, param in itertools.product(
TestTransforms._seg_provider(w=10, h=20), params
):
gt_transformer = getattr(self, "{}_seg_gt".format(_trans_name))
transformer = getattr(T, _trans_name)(*param)
with self.assertRaises((RuntimeError, AssertionError)):
result = transformer.apply_image(seg)
@staticmethod
def NoOpTransform_coords_gt(coords, *args) -> Tuple[np.ndarray, list]:
"""
Given the input array, return the expected output array and shape after
applying no transformation.
Args:
coords (array): coordinates before the transform.
args (list): list of arguments. Details can be found in test case.
Returns:
coords (array): expected output coordinates after apply the
transformation.
(list): expected shape of the output array.
"""
return coords, coords.shape
def test_no_op_coords_transforms(self):
"""
Test NoOpTransform..
"""
_trans_name = "NoOpTransform"
params = ()
for coords, param in itertools.product(
TestTransforms._coords_provider(), params
):
gt_transformer = getattr(self, "{}_coords_gt".format(_trans_name))
transformer = getattr(T, _trans_name)(*param)
result = transformer.apply_coords(np.copy(coords))
coords_gt, shape_gt = gt_transformer(np.copy(coords), *param)
self.assertEqual(
shape_gt,
result.shape,
"transform {} failed to pass the shape check with"
"params {} given input with shape {}".format(
_trans_name, param, result.shape
),
)
self.assertTrue(
np.allclose(result, coords_gt),
"transform {} failed to pass the value check with"
"params {} given input with shape {}".format(
_trans_name, param, result.shape
),
)
def test_transformlist_flatten(self):
t0 = T.HFlipTransform(width=100)
t1 = T.ScaleTransform(3, 4, 5, 6)
t2 = T.CropTransform(4, 5, 6, 7)
t = T.TransformList([T.TransformList([t0, t1]), t2])
self.assertEqual(len(t.transforms), 3)
def test_print_transform(self):
t0 = T.HFlipTransform(width=100)
self.assertEqual(str(t0), "HFlipTransform(width=100)")
t = T.TransformList([T.NoOpTransform(), t0])
self.assertEqual(str(t), f"TransformList[NoOpTransform(), {t0}]")
t = T.BlendTransform(np.zeros((100, 100, 100)), 1.0, 1.0)
self.assertEqual(
str(t), "BlendTransform(src_image=..., src_weight=1.0, dst_weight=1.0)"
)
| [
"[email protected]"
] | |
13f587518e71891fcfa8d3c3adc7a7c6bae59559 | 19236d9e966cf5bafbe5479d613a175211e1dd37 | /cohesity_management_sdk/models/principal.py | bb52d45d0c611c758c973745b3a5977b46a127fd | [
"MIT"
] | permissive | hemanshu-cohesity/management-sdk-python | 236c44fbd9604809027f8ddd0ae6c36e4e727615 | 07c5adee58810979780679065250d82b4b2cdaab | refs/heads/master | 2020-04-29T23:22:08.909550 | 2019-04-10T02:42:16 | 2019-04-10T02:42:16 | 176,474,523 | 0 | 0 | NOASSERTION | 2019-03-19T09:27:14 | 2019-03-19T09:27:12 | null | UTF-8 | Python | false | false | 2,583 | py | # -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
class Principal(object):
"""Implementation of the 'Principal.' model.
Specifies information about a single Principal.
Attributes:
domain (string): Specifies the domain name of the where the principal'
account is maintained.
full_name (string): Specifies the full name (first and last names) of
the principal.
object_class (ObjectClassEnum): Specifies the object class of the
principal (either 'kGroup' or 'kUser'). 'kUser' specifies a user
object class. 'kGroup' specifies a group object class. 'kComputer'
specifies a computer object class.
principal_name (string): Specifies the name of the principal.
sid (string): Specifies the unique Security id (SID) of the
principal.
"""
# Create a mapping from Model property names to API property names
_names = {
"domain":'domain',
"full_name":'fullName',
"object_class":'objectClass',
"principal_name":'principalName',
"sid":'sid'
}
def __init__(self,
domain=None,
full_name=None,
object_class=None,
principal_name=None,
sid=None):
"""Constructor for the Principal class"""
# Initialize members of the class
self.domain = domain
self.full_name = full_name
self.object_class = object_class
self.principal_name = principal_name
self.sid = sid
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
domain = dictionary.get('domain')
full_name = dictionary.get('fullName')
object_class = dictionary.get('objectClass')
principal_name = dictionary.get('principalName')
sid = dictionary.get('sid')
# Return an object of this model
return cls(domain,
full_name,
object_class,
principal_name,
sid)
| [
"[email protected]"
] | |
51bb7209404f42fecaedf13bb20311729de39237 | 6521f8e16b5bf802c9f0313adc146a2720b37541 | /problems/flow_spiral.py | 516fc3b2391c06497573c08adefba5eb2478b375 | [
"MIT"
] | permissive | simrit1/BERNAISE | fcbc8116a14f823b30263e5fff36260d35d44836 | fdeff715c1730dd7867ee371d1150f06b4c52d15 | refs/heads/master | 2023-08-07T03:35:01.384642 | 2021-09-15T17:57:59 | 2021-09-15T17:57:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,396 | py | import dolfin as df
import numpy as np
import os
from . import *
from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather
from common.cmd import MPI_rank
# import mpi4py
from common.bcs import Fixed, Pressure, NoSlip
#
from ufl import max_value
__author__ = "Matthew Hockley"
def FaceLength(faceNum, mesh, subdomains_file, dim):
print(faceNum)
# State desired face which measures are taking place upon
if mpi_is_root():
print(faceNum)
# Display how mesh is separated
# print("Node: ", MPI_rank, "Mesh Cells: ", mesh.cells().size)
# Import subdomains
mvc = df.MeshValueCollection("size_t", mesh, dim-1)
with df.XDMFFile(mpi_comm(), subdomains_file) as infile:
infile.read(mvc, "name_to_read")
facet_domains = df.cpp.mesh.MeshFunctionSizet(mesh, mvc)
## Calculate limits so inflow parabolic can work on co-ordinates not at 0
# Create empty variable space
X = []
Y = []
Z = []
xInflowRange = 0
yInflowRange = 0
zInflowRange = 0
xInflowMin = 0
yInflowMin = 0
zInflowMin = 0
# Retrive all co-ords as element for desired face
It_facet = df.SubsetIterator(facet_domains,faceNum)
mpi_barrier()
# print("Faces: ", df.SubsetIterator(facet_domains,faceNum))
#https://fenicsproject.org/qa/13995/print-coordinate-of-boundary-seperately-i-e-left-boundary/
#It_mesh = vertices([facet_domains.array() == 26])
# Collected all co-ords for desired face
for facet_domains in It_facet:
for v in df.vertices(facet_domains):
X.append(v.point().x())
Y.append(v.point().y())
Z.append(v.point().z())
# Ensure all processes collect co-ords for desired face
mpi_barrier()
# Gather all co-ords to calc length/min
X = mpi_gather(X, 0)
Y = mpi_gather(Y, 0)
Z = mpi_gather(Z, 0)
# Sync all parallel processes for length/min calc
mpi_barrier()
if mpi_is_root():
# Remove empty and combine all arrays
X = np.concatenate(X)
Y = np.concatenate(Y)
Z = np.concatenate(Z)
# Calculate length and min values
xInflowRange = np.ptp(X,axis=0)
yInflowRange = np.ptp(Y,axis=0)
zInflowRange = np.ptp(Z,axis=0)
xInflowMin = np.amin(X)
yInflowMin = np.amin(Y)
zInflowMin = np.amin(Z)
# END: Sync all parallel processes for length/min calc
mpi_barrier()
# Broadcast all length/min calc to all nodes used
xInflowRange = mpi_bcast(xInflowRange, 0)
yInflowRange = mpi_bcast(yInflowRange, 0)
zInflowRange = mpi_bcast(zInflowRange, 0)
xInflowMin = mpi_bcast(xInflowMin, 0)
yInflowMin = mpi_bcast(yInflowMin, 0)
zInflowMin = mpi_bcast(zInflowMin, 0)
# Clear variables
v = []
It_facet = []
facet_domains = []
return xInflowRange, yInflowRange, zInflowRange, xInflowMin, yInflowMin, zInflowMin
def problem():
info_cyan("Flow around 3D spiral benchmark.")
# 2, beta in phase 1, beta in phase 2
#solutes = [["c_p", 1, 1e-4, 1e-2, 4., 1.],
# ["c_m", -1, 1e-4, 1e-2, 4., 1.]]
solutes = [["c_p", 0, 1e-3, 1e-2, 4., 1.]]
# Format: name : (family, degree, is_vector)
base_elements = dict(u=["Lagrange", 2, True],
p=["Lagrange", 1, False],
phi=["Lagrange", 1, False],
g=["Lagrange", 1, False],
c=["Lagrange", 1, False],
V=["Lagrange", 1, False])
factor = 2
scaling_factor = 1
# Default parameters to be loaded unless starting from checkpoint.
parameters = dict(
solver="basic",#"basic_IPCS_Adj" # Type of problem sovler
folder="results_spiral3D", # Save folder
import_mesh = True, # If importing XDMF mesh files
scale_factor = scaling_factor, # Change mesh dimension (Use if mesh not in metres)
mesh_file = "meshes/mesh_Spiral3D.xdmf", # Mesh filepath
subdomains_file = "meshes/mf_Spiral3D.xdmf", # Subdomains filepath
name_Facet = "inlet", # Name of inlet within "boundaries_Facet" for Hmin/H
restart_folder=False, # Use if restarting from different folder
enable_NS=True, # Enable Navier Stokes (NS)
enable_PF=False, # Enable Phase Field (PF)
enable_EC=False, # Enable Electrochem (EC)
save_intv=5, # Export data time point interval
stats_intv=5, # Export stats interval
checkpoint_intv=50, # Export checkpoint for restart
tstep=0, # Unsure
dt=0.0015/factor, # s Time steps
t_0=0., # s Start time
T=8.,# s Total time
solutes=solutes, # I believe are electrochem (EC)/phase field (PF) related
base_elements=base_elements, # Basic "CG"/"Lagrange" function space
#
H=0.41, # Length of inlet (Updated in "faceLength")
HZ=0, # Length of inlet 2D dimension (Updated in "faceLength")
Hmin=0, # Minimum of inlet (Updated in "faceLength")
HminZ=0, # Minimum of inlet 2D dimension (Updated in "faceLength")
dim = 3, # Dimensions
XYZ = 0, # If XY(2), XZ(1) or YZ(0) direction of flow
#
# Simulation parameters
grav_const=0.0, # 0 gravity as microfluidic
inlet_velocity=-1.5, # m/s (Negative due to -x inflow direction)
V_0=0., # Unsure
#
# Fluid parameters (Water at 22C)
density=[998.2, 998.2], # Kg/m3
viscosity=[1.003e-3, 1.003e-3], # Kg/m.s kinematic viscosity
permittivity=[1., 1.], # EC?
#
# Solver parameters
use_iterative_solvers=True, # if False, might have memory issues
use_pressure_stabilization=False, # Seems to be a type of SUPG, unsure (see solver)
#
# Boundary related physical labels (Numbers within mf_subdomains.xdmf)
# Typically created within GMSH/Netgen and converted by Meshio
boundaries_Facet = {'inlet': 10,
'outletL': 9,
'outletR': 8,
'wall': [3,2,1,11,4,5,6,7],
}
)
# Retrieve inlet dimensions (min/length) from mesh
[mesh1, parameters1] = mesh(parameters["mesh_file"],
parameters["subdomains_file"], parameters["XYZ"],
parameters["boundaries_Facet"], "inlet", parameters["scale_factor"], False)
# Remove temp mesh, not required
mesh1 = []
# Save parameters to main dictionary (Already bcast from mesh function)
parameters["dim"] = parameters1["dim"]
parameters["H"] = parameters1["H"]
parameters["Hmin"] = parameters1["Hmin"]
parameters["HZ"] = parameters1["HZ"]
parameters["HminZ"] = parameters1["HminZ"]
# Output of Hmin and H for inlet velocity calculations (see "velocity_init")
# mpi_barrier()
# if mpi_is_root():
# print("Hmin: ", parameters["Hmin"])
# print("HminZ: ", parameters["Hmin"])
# print("H: ", parameters["H"])
# print("HZ: ", parameters["H"])
# Ensure all processes complete before return (Might be redundant)
mpi_barrier()
return parameters
def mesh(mesh_file, subdomains_file, XYZ,
boundaries_Facet, name_Facet, scale_factor,
import_mesh, **namespace):
# Load mesh from file (NETGEN mesh as .grid to .xml using DOLFIN)
mesh = df.Mesh()
with df.XDMFFile(mpi_comm(), mesh_file) as infile:
infile.read(mesh)
# # Scale mesh from mm to m
x = mesh.coordinates()
x[:, :] *= scale_factor
# # Move mesh so co-ords always positive
#
xyzmin = x.min(axis=0)
mpi_barrier()
xyzmin = np.min(mpi_gather(xyzmin, 0))
mpi_barrier()
xyzmin = mpi_bcast(xyzmin, 0)
mpi_barrier()
x[:, :] = x[:, :] - xyzmin
# Apply to mesh
mesh.bounding_box_tree().build(mesh)
# Define boundary conditions
dim = mesh.topology().dim()
if mpi_is_root():
print('Dim:',dim)
# Ensure all processes have completed
mpi_barrier()
if import_mesh: #Mesh import is true
return mesh
else: #Otherwise generating range and min of boundary facet assuming line
# Retrieve length and min of boundary facet (inlet in most cases)
[X, Y, Z, Xmin, Ymin, Zmin] = FaceLength(boundaries_Facet[name_Facet], mesh,
subdomains_file, dim)
# Display boundary dimensions (inlet in most cases)
mpi_barrier()
if mpi_is_root():
info_yellow("Boundary Dimensions")
print("x: ",X)
print("y: ",Y)
print("z: ",Z)
print("xMin: ",Xmin)
print("yMin: ",Ymin)
print("zMin: ",Zmin)
# Save length/min to dictionary
# This will not overwrite prior dictionary
# as this is in an indepenent function
parameters = dict()
parameters["dim"] = dim
# Depending on flow direction (X/Y/Z),
# the remainder axes need min/length
# for calculating 3D parabolic inflow
if XYZ == 0:
parameters["H"] = Y
parameters["Hmin"] = Ymin
else:
parameters["H"] = X
parameters["Hmin"] = Xmin
parameters["HZ"] = Z
parameters["HminZ"] = Zmin
if XYZ == 3:
parameters["HZ"] = Y
parameters["HminZ"] = Ymin
# Ensure all processes have completed (Might be redundant)
mpi_barrier()
return mesh, parameters
def initialize(H, Hmin, HZ, HminZ, solutes, restart_folder,
field_to_subspace, XYZ,
inlet_velocity,
enable_NS, enable_PF, enable_EC,
**namespace):
""" Create the initial state.
The initial states are specified in a dict indexed by field. The format
should be
w_init_field[field] = 'df.Function(...)'.
The work dicts w_ and w_1 are automatically initialized from these
functions elsewhere in the code.
Note: You only need to specify the initial states that are nonzero.
"""
w_init_field = dict()
# if not restart_folder:
# if enable_NS:
# try:
# subspace = field_to_subspace["u"].collapse()
# except:
# subspace = field_to_subspace["u"]
# u_init = velocity_init(H, HZ, inlet_velocity, XYZ, 1, Hmin, HminZ)
# w_init_field["u"] = df.interpolate(u_init, subspace)
# Ensure all processes have completed (Might be redundant)
mpi_barrier()
return w_init_field
def create_bcs(dim, H, Hmin, HZ, HminZ, XYZ, inlet_velocity,
V_0, solutes, subdomains_file,
enable_NS, enable_PF, enable_EC,
mesh, boundaries_Facet, **namespace):
""" The boundaries and boundary conditions are defined here. """
mvc = df.MeshValueCollection("size_t", mesh, dim-1)
with df.XDMFFile(subdomains_file) as infile:
infile.read(mvc, "name_to_read")
facet_domains = df.cpp.mesh.MeshFunctionSizet(mesh, mvc)
# Re-create boundaries with facet_domain for mesh relevance
boundaries = dict(
inlet = [facet_domains, boundaries_Facet["inlet"]],
outletL = [facet_domains, boundaries_Facet["outletL"]],
outletR = [facet_domains, boundaries_Facet["outletR"]],
wall = [facet_domains, boundaries_Facet["wall"]],
)
# Alocating the boundary dicts
bcs = dict()
bcs_pointwise = dict()
for boundary in boundaries:
bcs[boundary] = dict()
## Velocity Phase Flow In (Retrieve expression)
#
#length inlet, water inflow, X/Y/Z, Positive/neg flow along axis
velocity_expr = velocity_init(H, HZ, inlet_velocity, XYZ, 1, Hmin, HminZ)
velocity_in = Fixed(velocity_expr)
# Pressure set to 0 at outlet
pressure_out = Pressure(0.0)
# Create NoSlip function for walls
noslip = Fixed((0., 0., 0.)) # Unlike 2D "NoSlip()", need 3 dimensions
## Define boundaries
# Note we have two outlets
if enable_NS:
bcs["inlet"]["u"] = velocity_in
bcs["outletL"]["p"] = pressure_out
bcs["outletR"]["p"] = pressure_out
bcs["wall"]["u"] = noslip
# Ensure all processes have completed (Might be redundant)
mpi_barrier()
return boundaries, bcs, bcs_pointwise
def velocity_init(H, HZ, inlet_velocity, XYZ, Pos, Hmin, HminZ, dim=3):
# length inlet, water inflow, X/Y/Z, Positive/neg flow along axis
# XYZ = XY(2), XZ(1) or YZ(0) boundaries
if XYZ == 0: # X axis
return df.Expression(
("((((A*4.0*(x[2] - zMin)*(zRange - (x[2] - zMin))) / pow(zRange,2)) + ((A*4.0*(x[1] - yMin)*(yRange - (x[1] - yMin))) / pow(yRange, 2)))/2)","0.0","0.0"),
A=df.Constant(inlet_velocity), yRange=df.Constant(H), zRange=df.Constant(HZ), yMin=df.Constant(Hmin), zMin=df.Constant(HminZ), degree=dim)
elif XYZ == 1: # Y axis
return df.Expression(
("0.0","((((A*4.0*(x[2] - zMin)*(zRange - (x[2] - zMin))) / pow(zRange,2)) + ((A*4.0*(x[0] - xMin)*(xRange - (x[0] - xMin))) / pow(xRange, 2)))/2)","0.0"),
A=df.Constant(inlet_velocity), xRange=df.Constant(H), zRange=df.Constant(HZ), xMin=df.Constant(Hmin), zMin=df.Constant(HminZ), degree=dim)
else: # if XY == 2: # Z axis
return df.Expression(
("0.0","0.0","((((A*4.0*(x[1] - yMin)*(yRange - (x[1] - yMin))) / pow(yRange,2)) + ((A*4.0*(x[0] - xMin)*(xRange - (x[0] - xMin))) / pow(xRange, 2)))/2)"),
A=df.Constant(inlet_velocity), xRange=df.Constant(H), yRange=df.Constant(HZ), xMin=df.Constant(Hmin), yMin=df.Constant(HminZ), degree=dim)
## If you want a constant and not parabolic inflow, comment above and use...
#
# return df.Expression(("U","0.0","0.0"), U=inlet_velocity, degree=dim)
# Remember to define X/Y/Z inflow manually if constant (current X)
def tstep_hook(t, tstep, stats_intv, statsfile, field_to_subspace,
field_to_subproblem, subproblems, w_, **namespace):
info_blue("Timestep = {}".format(tstep))
# Function which runs every simulation tick
def start_hook(newfolder, **namespace):
statsfile = os.path.join(newfolder, "Statistics/stats.dat")
return dict(statsfile=statsfile)
# Function which runs at start of simulation | [
"[email protected]"
] | |
4d781c9a4ed8adc2dc2d0cdeded54192e62e110c | 003ffcf8144565404636f3d74590a8d6b10a90a4 | /492-construct-the-rectangle/492-construct-the-rectangle.py | 563533a10a837712700af804ed787500a2abbf67 | [] | no_license | congve1/leetcode | fb31edf93049e21210d73f7b3e7b9b82057e1d7a | ce1e802b5052da2cdb919d6d7e39eed860e0b61b | refs/heads/master | 2020-05-13T19:19:58.835432 | 2019-05-06T00:44:07 | 2019-05-06T00:44:07 | 181,652,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | class Solution(object):
def constructRectangle(self, area):
"""
:type area: int
:rtype: List[int]
"""
| [
"[email protected]"
] | |
0312c4b5ff3caa1244cdf11977dc0ede1ef32a0c | 4554fcb85e4c8c33a5b5e68ab9f16c580afcab41 | /projecteuler/test_xiaobai_41.py | b6b7e14cd121456b1dd69da79870b0bbfcde910c | [] | no_license | xshen1122/Follow_Huang_Python | 12f4cebd8ddbc241a1c32cfa16288f059b530557 | fcea6d1361aa768fb286e1ef4a22d5c4d0026667 | refs/heads/master | 2021-01-01T04:37:31.081142 | 2017-12-05T07:31:34 | 2017-12-05T07:31:34 | 97,211,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | # test_xiaobai_41.py
# coding: utf-8
'''
We shall say that an n-digit number is pandigital if it makes use of
all the digits 1 to n exactly once. For example, 2143 is a 4-digit
pandigital and is also prime.
What is the largest n-digit pandigital prime that exists?
'''
def checkPrime(number):
for i in range(2,number):
if number%i == 0:
return False
return True
def checkNumber(number):
n_list = []
for item in str(number):
n_list.append(item)
if len(n_list) == len(set(n_list)):
return True
else:
return False
if __name__ == '__main__':
number = 987654321
for number in range(987654321,1000,-1):
print number
if not checkNumber(number):
pass
else:
if checkPrime(number):
print 'the largest pandigital prime is ', number
break
else:
pass
| [
"[email protected]"
] | |
38bbe8aae94fbcd4dffe66ee910ac8d600b52462 | ae6f8eec0f08045624c6042b723f99695c5e446c | /backend/course/admin.py | d2dcfb95302bae425f5dc9bbdb332bbc87f07efe | [] | no_license | crowdbotics-apps/practual-life-21189 | a15aa6f2fe6cd5dc8feb6f6a214aed258509b4f7 | d33e5d7e5d5d9eba0c549a90cecdd96e90d5f8ae | refs/heads/master | 2022-12-29T12:45:45.469879 | 2020-10-06T20:41:31 | 2020-10-06T20:41:31 | 301,849,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | from django.contrib import admin
from .models import (
Recording,
Event,
Subscription,
Course,
Group,
Module,
SubscriptionType,
Enrollment,
Category,
)
admin.site.register(Category)
admin.site.register(Module)
admin.site.register(Event)
admin.site.register(Subscription)
admin.site.register(SubscriptionType)
admin.site.register(Recording)
admin.site.register(Enrollment)
admin.site.register(Course)
admin.site.register(Group)
# Register your models here.
| [
"[email protected]"
] | |
882dc53ad67fb969d878846c3bdb805805d7b2b1 | 353b36f7907569945d9f956730b31463df8fa1f4 | /bpl_lib/transactions/Vote.py | 82a48eea0e3afe81b8c61396acbb0a027d422154 | [
"MIT"
] | permissive | DuneRoot/bpl-lib | 54b6a4387ecd404f1fcfa9d46b7ce68f136d90ac | 3ac1026cfc01ca5a71515caa5e352e4517cba0cc | refs/heads/master | 2020-03-23T19:56:49.291707 | 2019-02-20T19:08:55 | 2019-02-20T19:08:55 | 142,011,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,096 | py | from bpl_lib.transactions.Transaction import Transaction
from bpl_lib.helpers.Constants import TRANSACTION_TYPE
from bpl_lib.address.Address import Address
from bpl_lib.crypto.Keys import Keys
class Vote(Transaction):
def __init__(self, fee, _error_use_class_method=True):
"""
Creates a vote transaction
:param fee: fee for transaction
:param _error_use_class_method: boolean flag, used to indicate if the transaction
was created from generate or from_dict
"""
if _error_use_class_method:
raise TypeError("Please use Vote.generate(args) or Vote.from_dict(args) to construct me.")
super().__init__(TRANSACTION_TYPE.VOTE, fee)
@classmethod
def generate(cls, votes, secret, second_secret=None, fee=None):
"""
Creates a vote transaction
:param votes: votes (list)
:param secret: secret passphrase (string or bytes)
:param second_secret: second secret passphrase (string or bytes)
:param fee: fee for transaction
:return: (Vote)
"""
self = cls(fee, _error_use_class_method=False)
self._sender_public_key = Keys(secret).get_public_key()
self._recipient_id = Address.from_secret(secret)
self._asset["votes"] = votes
self.sign(secret, second_secret)
return self
@classmethod
def from_dict(cls, transaction):
"""
Creates a vote transaction
:param transaction: transaction (dict)
:return: (Vote)
"""
self = cls(transaction["fee"], _error_use_class_method=False)
self._sender_public_key = transaction["senderPublicKey"]
self._recipient_id = transaction["recipientId"]
self._timestamp = transaction["timestamp"]
self._asset["votes"] = transaction["asset"]["votes"]
self.sign_from_dict(transaction)
return self
def _handle_transaction_type(self, buffer):
buffer.write_bytes("".join(self._asset["votes"]).encode())
return buffer
| [
"[email protected]"
] | |
1c03923938a1f6d9898a0b07bb9d16d14b83fab1 | 73e3990fdb1e38a053a047d204e26acb43d403e6 | /hooks/post_gen_project.py | 49fda6a120e8657cb108e878edb283020eea6efc | [
"MIT"
] | permissive | dunderlabs/dunder_cookiecutter_template | ad972870570463ff3aa68d887d7bf92b25ef9d11 | bda261b9f0e5c171470b9eaa80c416ba1a8e656d | refs/heads/master | 2021-01-10T12:21:11.022232 | 2016-04-06T19:48:15 | 2016-04-06T19:48:15 | 52,767,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,151 | py | import os
import hashlib
import random
import time
PROJECT_DIR = os.path.realpath(os.path.curdir)
# Use the system PRNG if possible
# https://github.com/django/django/blob/stable/1.9.x/django/utils/crypto.py#L18-L26
try:
random = random.SystemRandom()
using_sysrandom = True
except NotImplementedError:
import warnings
warnings.warn('A secure pseudo-random number generator is not available '
'on your system. Falling back to Mersenne Twister.')
using_sysrandom = False
def get_random_string(length=12,
allowed_chars='abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
"""
Returns a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
https://github.com/django/django/blob/stable/1.9.x/django/utils/crypto.py#L54-L77
"""
if not using_sysrandom:
# This is ugly, and a hack, but it makes things better than
# the alternative of predictability. This re-seeds the PRNG
# using a value that is hard for an attacker to predict, every
# time a random string is required. This may change the
# properties of the chosen random sequence slightly, but this
# is better than absolute predictability.
random.seed(
hashlib.sha256(
("%s%s%s" % (
random.getstate(),
time.time(),
settings.SECRET_KEY)).encode('utf-8')
).digest())
return ''.join(random.choice(allowed_chars) for i in range(length))
def generate_secret_key(project_directory):
env_path = os.path.join(project_directory, '.env.example')
with open(env_path) as f:
env_file = f.read()
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
env_file = env_file.replace('KEY_PLACE', get_random_string(50, chars))
env_file = env_file.replace('DEBUG_VALUE', 'True')
with open(env_path, 'w') as f:
f.write(env_file)
generate_secret_key(PROJECT_DIR)
| [
"[email protected]"
] | |
5d280846dece158fbb8c53206b775dc82c7999b2 | 523fb785bda41e33546c929a5c2de6c93f98b434 | /LeetCode/208.implement-trie-prefix-tree.py | 24c8114b532aab37979451a9d92a433188a77e05 | [] | no_license | lizhe960118/TowardOffer | afd2029f8f9a1e782fe56ca0ff1fa8fb37892d0e | a0608d34c6ed96c9071cc3b9bdf70c95cef8fcbd | refs/heads/master | 2020-04-27T10:33:21.452707 | 2019-05-02T10:47:01 | 2019-05-02T10:47:01 | 174,259,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,385 | py | #
# @lc app=leetcode id=208 lang=python3
#
# [208] Implement Trie (Prefix Tree)
#
# https://leetcode.com/problems/implement-trie-prefix-tree/description/
#
# algorithms
# Medium (37.24%)
# Total Accepted: 167K
# Total Submissions: 448.5K
# Testcase Example: '["Trie","insert","search","search","startsWith","insert","search"]\n[[],["apple"],["apple"],["app"],["app"],["app"],["app"]]'
#
# Implement a trie with insert, search, and startsWith methods.
#
# Example:
#
#
# Trie trie = new Trie();
#
# trie.insert("apple");
# trie.search("apple"); // returns true
# trie.search("app"); // returns false
# trie.startsWith("app"); // returns true
# trie.insert("app");
# trie.search("app"); // returns true
#
#
# Note:
#
#
# You may assume that all inputs are consist of lowercase letters a-z.
# All inputs are guaranteed to be non-empty strings.
#
#
#
class TrieNode(object):
def __init__(self):
self.child = {}
self.is_end = False
class Trie:
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = TrieNode()
def insert(self, word: str) -> None:
"""
Inserts a word into the trie.
"""
cur_node = self.root
for char_s in word:
if char_s not in cur_node.child:
next_node = TrieNode()
cur_node.child[char_s] = next_node
cur_node = cur_node.child[char_s]
cur_node.is_end = True
def search(self, word: str) -> bool:
"""
Returns if the word is in the trie.
"""
cur_node = self.root
for char_s in word:
if char_s not in cur_node.child:
return False
else:
cur_node = cur_node.child[char_s]
return cur_node.is_end
def startsWith(self, prefix: str) -> bool:
"""
Returns if there is any word in the trie that starts with the given prefix.
"""
cur_node = self.root
for char_s in prefix:
if char_s not in cur_node.child:
return False
else:
cur_node = cur_node.child[char_s]
return True
# Your Trie object will be instantiated and called as such:
# obj = Trie()
# obj.insert(word)
# param_2 = obj.search(word)
# param_3 = obj.startsWith(prefix)
| [
"[email protected]"
] | |
8dad47d2b5726e1d51d8266f3a07f170aa7f9599 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_enmeshes.py | 8ab83b49bb6e84503d7c1bb5705287d69908efb8 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py |
from xai.brain.wordbase.verbs._enmesh import _ENMESH
#calss header
class _ENMESHES(_ENMESH, ):
def __init__(self,):
_ENMESH.__init__(self)
self.name = "ENMESHES"
self.specie = 'verbs'
self.basic = "enmesh"
self.jsondata = {}
| [
"[email protected]"
] | |
b22ee84a165ff4362a0945dff1f3663ca11391c7 | c5d05d0ef2eca69c59278b0aabc919f705bb8f79 | /session7/block.py | 0531f86f84d558145141994434bb3b31066f4617 | [] | no_license | brettg/pb-exercises | feeca91cec969a78956f298fa305aabf3aabd5a1 | b72ebc29920975d78bc82fadad1f69b532dcd3d8 | refs/heads/master | 2021-07-23T02:37:13.061790 | 2017-10-31T15:10:22 | 2017-10-31T15:10:22 | 109,151,385 | 0 | 0 | null | 2017-11-01T15:52:25 | 2017-11-01T15:52:25 | null | UTF-8 | Python | false | false | 18,692 | py | from binascii import hexlify, unhexlify
from io import BytesIO
from unittest import TestCase
from helper import (
double_sha256,
int_to_little_endian,
little_endian_to_int,
merkle_parent,
merkle_parent_level,
merkle_path,
merkle_root,
)
class Proof:
def __init__(self, merkle_root, tx_hash, index, merkle_proof):
self.merkle_root = merkle_root
self.tx_hash = tx_hash
self.index = index
self.merkle_proof = merkle_proof
def __repr__(self):
s = '{}:{}:{}:['.format(
hexlify(self.merkle_root).decode('ascii'),
hexlify(self.tx_hash).decode('ascii'),
self.index,
)
for p in self.merkle_proof:
s += '{},'.format(hexlify(p).decode('ascii'))
s += ']'
return s
def verify(self):
'''Returns whether this proof is valid'''
# current_hash starts with the tx_hash
# Get the Merkle Path for the index and 2**len(merkle_proof)
# Loop through Merkle Path
# If odd, combine current_hash with merkle proof[i] on the left
# If even, combine current_hash with merkle proof[i] on the right
# current_hash is the double_sha256 of the concatenation
# if final result is equal to merkle_root, return True
raise NotImplementedError
class Block:
def __init__(self, version, prev_block, merkle_root, timestamp, bits, nonce, tx_hashes=None):
self.version = version
self.prev_block = prev_block
self.merkle_root = merkle_root
self.timestamp = timestamp
self.bits = bits
self.nonce = nonce
self.tx_hashes = tx_hashes
self.merkle_tree = None
@classmethod
def parse(cls, s):
'''Takes a byte stream and parses a block. Returns a Block object'''
# s.read(n) will read n bytes from the stream
# version - 4 bytes, little endian, interpret as int
version = little_endian_to_int(s.read(4))
# prev_block - 32 bytes, little endian
prev_block = s.read(32)[::-1]
# merkle_root - 32 bytes, little endian
merkle_root = s.read(32)[::-1]
# timestamp - 4 bytes, little endian, interpret as int
timestamp = little_endian_to_int(s.read(4))
# bits - 4 bytes
bits = s.read(4)
# nonce - 4 bytes
nonce = s.read(4)
return cls(version, prev_block, merkle_root, timestamp, bits, nonce)
def serialize(self):
'''Returns the 80 byte block header'''
result = int_to_little_endian(self.version, 4)
result += self.prev_block[::-1]
result += self.merkle_root[::-1]
result += int_to_little_endian(self.timestamp, 4)
result += self.bits
result += self.nonce
return result
def hash(self):
'''Returns the double-sha256 interpreted little endian of the block'''
return double_sha256(self.serialize())[::-1]
def bip9(self):
'''Returns whether this block is signaling readiness for BIP9'''
# BIP9 is signalled if the top 3 bits are 001
return self.version >> 29 == 0b001
def bip91(self):
'''Returns whether this block is signaling readiness for BIP91'''
# BIP91 is signalled if the top 5th bit from the right is 1
return (self.version >> 4) & 1 == 1
def bip141(self):
'''Returns whether this block is signaling readiness for BIP141'''
# BIP91 is signalled if the top 2nd bit from the right is 1
return (self.version >> 1) & 1 == 1
def target(self):
'''Returns the proof-of-work target based on the bits'''
# reverse the bits
# first byte is exponent
# the other three bytes are the coefficient.
# the formula is:
# coefficient * 2**(8*(exponent-3))
exponent = self.bits[-1]
coefficient = little_endian_to_int(self.bits[:-1])
return coefficient * 2**(8*(exponent - 3))
def difficulty(self):
'''Returns the block difficulty based on the bits'''
# note difficulty is (target of lowest difficulty) / (self's target)
# lowest difficulty has bits that equal 0xffff001d
exponent = 0x1d
minimum_target = 0xffff * 2**(8*(0x1d-3))
return minimum_target / self.target()
def check_pow(self):
'''Returns whether this block satisfies proof of work'''
# You will need to get the hash of this block and interpret it
# as an integer. If the hash of the block is lower, pow is good.
# hint: int.from_bytes('', 'big')
s256 = self.hash()
return int.from_bytes(s256, 'big') < self.target()
def validate_merkle_root(self):
'''Gets the merkle root of the tx_hashes and checks that it's
the same as the merkle root of this block.
'''
# reverse all the transaction hashes
# get the Merkle Root
# reverse the Merkle Root
# check that this block's merkle root is the same as the Merkle Root
raise NotImplementedError
def calculate_merkle_tree(self):
'''Calculate and store the entire Merkle Tree'''
# store the result in self.merkle_tree, an array, 0 representing
# the bottom level and 1 the parent level of level 0 and so on.
# reverse all the transaction hashes
# if there is more than 1 hash:
# store current level
# Make current level Merkle Parent level
# store root as the final level
raise NotImplementedError
def create_merkle_proof(self, tx_hash):
# if self.merkle_tree is empty, go and calculate the merkle tree
# find the index of this tx_hash
# Get the Merkle Path
# initialize merkle_proof list
# Loop over the items in the Merkle Path
# Find the partner index (-1 for odd, +1 for even)
# add partner to merkle_proof list
# Return a Proof instance
raise NotImplementedError
class BlockTest(TestCase):
def test_parse(self):
block_raw = unhexlify('020000208ec39428b17323fa0ddec8e887b4a7c53b8c0a0a220cfd0000000000000000005b0750fce0a889502d40508d39576821155e9c9e3f5c3157f961db38fd8b25be1e77a759e93c0118a4ffd71d')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertEqual(block.version, 0x20000002)
want = unhexlify('000000000000000000fd0c220a0a8c3bc5a7b487e8c8de0dfa2373b12894c38e')
self.assertEqual(block.prev_block, want)
want = unhexlify('be258bfd38db61f957315c3f9e9c5e15216857398d50402d5089a8e0fc50075b')
self.assertEqual(block.merkle_root, want)
self.assertEqual(block.timestamp, 0x59a7771e)
self.assertEqual(block.bits, unhexlify('e93c0118'))
self.assertEqual(block.nonce, unhexlify('a4ffd71d'))
def test_serialize(self):
block_raw = unhexlify('020000208ec39428b17323fa0ddec8e887b4a7c53b8c0a0a220cfd0000000000000000005b0750fce0a889502d40508d39576821155e9c9e3f5c3157f961db38fd8b25be1e77a759e93c0118a4ffd71d')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertEqual(block.serialize(), block_raw)
def test_hash(self):
block_raw = unhexlify('020000208ec39428b17323fa0ddec8e887b4a7c53b8c0a0a220cfd0000000000000000005b0750fce0a889502d40508d39576821155e9c9e3f5c3157f961db38fd8b25be1e77a759e93c0118a4ffd71d')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertEqual(block.hash(), unhexlify('0000000000000000007e9e4c586439b0cdbe13b1370bdd9435d76a644d047523'))
def test_bip9(self):
block_raw = unhexlify('020000208ec39428b17323fa0ddec8e887b4a7c53b8c0a0a220cfd0000000000000000005b0750fce0a889502d40508d39576821155e9c9e3f5c3157f961db38fd8b25be1e77a759e93c0118a4ffd71d')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertTrue(block.bip9())
block_raw = unhexlify('0400000039fa821848781f027a2e6dfabbf6bda920d9ae61b63400030000000000000000ecae536a304042e3154be0e3e9a8220e5568c3433a9ab49ac4cbb74f8df8e8b0cc2acf569fb9061806652c27')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertFalse(block.bip9())
def test_bip91(self):
block_raw = unhexlify('1200002028856ec5bca29cf76980d368b0a163a0bb81fc192951270100000000000000003288f32a2831833c31a25401c52093eb545d28157e200a64b21b3ae8f21c507401877b5935470118144dbfd1')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertTrue(block.bip91())
block_raw = unhexlify('020000208ec39428b17323fa0ddec8e887b4a7c53b8c0a0a220cfd0000000000000000005b0750fce0a889502d40508d39576821155e9c9e3f5c3157f961db38fd8b25be1e77a759e93c0118a4ffd71d')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertFalse(block.bip91())
def test_bip141(self):
block_raw = unhexlify('020000208ec39428b17323fa0ddec8e887b4a7c53b8c0a0a220cfd0000000000000000005b0750fce0a889502d40508d39576821155e9c9e3f5c3157f961db38fd8b25be1e77a759e93c0118a4ffd71d')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertTrue(block.bip141())
block_raw = unhexlify('0000002066f09203c1cf5ef1531f24ed21b1915ae9abeb691f0d2e0100000000000000003de0976428ce56125351bae62c5b8b8c79d8297c702ea05d60feabb4ed188b59c36fa759e93c0118b74b2618')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertFalse(block.bip141())
def test_target(self):
block_raw = unhexlify('020000208ec39428b17323fa0ddec8e887b4a7c53b8c0a0a220cfd0000000000000000005b0750fce0a889502d40508d39576821155e9c9e3f5c3157f961db38fd8b25be1e77a759e93c0118a4ffd71d')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertEqual(block.target(), 0x13ce9000000000000000000000000000000000000000000)
self.assertEqual(int(block.difficulty()), 888171856257)
def test_check_pow(self):
block_raw = unhexlify('04000000fbedbbf0cfdaf278c094f187f2eb987c86a199da22bbb20400000000000000007b7697b29129648fa08b4bcd13c9d5e60abb973a1efac9c8d573c71c807c56c3d6213557faa80518c3737ec1')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertTrue(block.check_pow())
block_raw = unhexlify('04000000fbedbbf0cfdaf278c094f187f2eb987c86a199da22bbb20400000000000000007b7697b29129648fa08b4bcd13c9d5e60abb973a1efac9c8d573c71c807c56c3d6213557faa80518c3737ec0')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertFalse(block.check_pow())
def test_validate_merkle_root(self):
hashes_hex = [
'f54cb69e5dc1bd38ee6901e4ec2007a5030e14bdd60afb4d2f3428c88eea17c1',
'c57c2d678da0a7ee8cfa058f1cf49bfcb00ae21eda966640e312b464414731c1',
'b027077c94668a84a5d0e72ac0020bae3838cb7f9ee3fa4e81d1eecf6eda91f3',
'8131a1b8ec3a815b4800b43dff6c6963c75193c4190ec946b93245a9928a233d',
'ae7d63ffcb3ae2bc0681eca0df10dda3ca36dedb9dbf49e33c5fbe33262f0910',
'61a14b1bbdcdda8a22e61036839e8b110913832efd4b086948a6a64fd5b3377d',
'fc7051c8b536ac87344c5497595d5d2ffdaba471c73fae15fe9228547ea71881',
'77386a46e26f69b3cd435aa4faac932027f58d0b7252e62fb6c9c2489887f6df',
'59cbc055ccd26a2c4c4df2770382c7fea135c56d9e75d3f758ac465f74c025b8',
'7c2bf5687f19785a61be9f46e031ba041c7f93e2b7e9212799d84ba052395195',
'08598eebd94c18b0d59ac921e9ba99e2b8ab7d9fccde7d44f2bd4d5e2e726d2e',
'f0bb99ef46b029dd6f714e4b12a7d796258c48fee57324ebdc0bbc4700753ab1',
]
hashes = [unhexlify(x) for x in hashes_hex]
stream = BytesIO(unhexlify('00000020fcb19f7895db08cadc9573e7915e3919fb76d59868a51d995201000000000000acbcab8bcc1af95d8d563b77d24c3d19b18f1486383d75a5085c4e86c86beed691cfa85916ca061a00000000'))
block = Block.parse(stream)
block.tx_hashes = hashes
self.assertTrue(block.validate_merkle_root())
def test_calculate_merkle_tree(self):
hashes_hex = [
'f54cb69e5dc1bd38ee6901e4ec2007a5030e14bdd60afb4d2f3428c88eea17c1',
'c57c2d678da0a7ee8cfa058f1cf49bfcb00ae21eda966640e312b464414731c1',
'b027077c94668a84a5d0e72ac0020bae3838cb7f9ee3fa4e81d1eecf6eda91f3',
'8131a1b8ec3a815b4800b43dff6c6963c75193c4190ec946b93245a9928a233d',
'ae7d63ffcb3ae2bc0681eca0df10dda3ca36dedb9dbf49e33c5fbe33262f0910',
'61a14b1bbdcdda8a22e61036839e8b110913832efd4b086948a6a64fd5b3377d',
'fc7051c8b536ac87344c5497595d5d2ffdaba471c73fae15fe9228547ea71881',
'77386a46e26f69b3cd435aa4faac932027f58d0b7252e62fb6c9c2489887f6df',
'59cbc055ccd26a2c4c4df2770382c7fea135c56d9e75d3f758ac465f74c025b8',
'7c2bf5687f19785a61be9f46e031ba041c7f93e2b7e9212799d84ba052395195',
'08598eebd94c18b0d59ac921e9ba99e2b8ab7d9fccde7d44f2bd4d5e2e726d2e',
'f0bb99ef46b029dd6f714e4b12a7d796258c48fee57324ebdc0bbc4700753ab1',
]
hashes = [unhexlify(x) for x in hashes_hex]
stream = BytesIO(unhexlify('00000020fcb19f7895db08cadc9573e7915e3919fb76d59868a51d995201000000000000acbcab8bcc1af95d8d563b77d24c3d19b18f1486383d75a5085c4e86c86beed691cfa85916ca061a00000000'))
block = Block.parse(stream)
block.tx_hashes = hashes
block.calculate_merkle_tree()
want0 = [
'c117ea8ec828342f4dfb0ad6bd140e03a50720ece40169ee38bdc15d9eb64cf5',
'c131474164b412e3406696da1ee20ab0fc9bf41c8f05fa8ceea7a08d672d7cc5',
'f391da6ecfeed1814efae39e7fcb3838ae0b02c02ae7d0a5848a66947c0727b0',
'3d238a92a94532b946c90e19c49351c763696cff3db400485b813aecb8a13181',
'10092f2633be5f3ce349bf9ddbde36caa3dd10dfa0ec8106bce23acbff637dae',
'7d37b3d54fa6a64869084bfd2e831309118b9e833610e6228adacdbd1b4ba161',
'8118a77e542892fe15ae3fc771a4abfd2f5d5d5997544c3487ac36b5c85170fc',
'dff6879848c2c9b62fe652720b8df5272093acfaa45a43cdb3696fe2466a3877',
'b825c0745f46ac58f7d3759e6dc535a1fec7820377f24d4c2c6ad2cc55c0cb59',
'95513952a04bd8992721e9b7e2937f1c04ba31e0469fbe615a78197f68f52b7c',
'2e6d722e5e4dbdf2447ddecc9f7dabb8e299bae921c99ad5b0184cd9eb8e5908',
'b13a750047bc0bdceb2473e5fe488c2596d7a7124b4e716fdd29b046ef99bbf0',
]
want1 = [
'8b30c5ba100f6f2e5ad1e2a742e5020491240f8eb514fe97c713c31718ad7ecd',
'7f4e6f9e224e20fda0ae4c44114237f97cd35aca38d83081c9bfd41feb907800',
'ade48f2bbb57318cc79f3a8678febaa827599c509dce5940602e54c7733332e7',
'68b3e2ab8182dfd646f13fdf01c335cf32476482d963f5cd94e934e6b3401069',
'43e7274e77fbe8e5a42a8fb58f7decdb04d521f319f332d88e6b06f8e6c09e27',
'4f492e893bf854111c36cb5eff4dccbdd51b576e1cfdc1b84b456cd1c0403ccb',
]
want2 = [
'26906cb2caeb03626102f7606ea332784281d5d20e2b4839fbb3dbb37262dbc1',
'717a0d17538ff5ad2c020bab38bdcde66e63f3daef88f89095f344918d5d4f96',
'd20629030c7e48e778c1c837d91ebadc2f2ee319a0a0a461f4a9538b5cae2a69',
'd20629030c7e48e778c1c837d91ebadc2f2ee319a0a0a461f4a9538b5cae2a69',
]
want3 = [
'b9f5560ce9630ea4177a7ac56d18dea73c8f76b59e02ab4805eaeebd84a4c5b1',
'00aa9ad6a7841ffbbf262eb775f8357674f1ea23af11c01cfb6d481fec879701',
]
want4 = [
'acbcab8bcc1af95d8d563b77d24c3d19b18f1486383d75a5085c4e86c86beed6',
]
self.assertEqual(block.merkle_tree[0], [unhexlify(x) for x in want0])
self.assertEqual(block.merkle_tree[1], [unhexlify(x) for x in want1])
self.assertEqual(block.merkle_tree[2], [unhexlify(x) for x in want2])
self.assertEqual(block.merkle_tree[3], [unhexlify(x) for x in want3])
self.assertEqual(block.merkle_tree[4], [unhexlify(x) for x in want4])
def test_create_merkle_proof(self):
hashes_hex = [
'f54cb69e5dc1bd38ee6901e4ec2007a5030e14bdd60afb4d2f3428c88eea17c1',
'c57c2d678da0a7ee8cfa058f1cf49bfcb00ae21eda966640e312b464414731c1',
'b027077c94668a84a5d0e72ac0020bae3838cb7f9ee3fa4e81d1eecf6eda91f3',
'8131a1b8ec3a815b4800b43dff6c6963c75193c4190ec946b93245a9928a233d',
'ae7d63ffcb3ae2bc0681eca0df10dda3ca36dedb9dbf49e33c5fbe33262f0910',
'61a14b1bbdcdda8a22e61036839e8b110913832efd4b086948a6a64fd5b3377d',
'fc7051c8b536ac87344c5497595d5d2ffdaba471c73fae15fe9228547ea71881',
'77386a46e26f69b3cd435aa4faac932027f58d0b7252e62fb6c9c2489887f6df',
'59cbc055ccd26a2c4c4df2770382c7fea135c56d9e75d3f758ac465f74c025b8',
'7c2bf5687f19785a61be9f46e031ba041c7f93e2b7e9212799d84ba052395195',
'08598eebd94c18b0d59ac921e9ba99e2b8ab7d9fccde7d44f2bd4d5e2e726d2e',
'f0bb99ef46b029dd6f714e4b12a7d796258c48fee57324ebdc0bbc4700753ab1',
]
hashes = [unhexlify(x) for x in hashes_hex]
stream = BytesIO(unhexlify('00000020fcb19f7895db08cadc9573e7915e3919fb76d59868a51d995201000000000000acbcab8bcc1af95d8d563b77d24c3d19b18f1486383d75a5085c4e86c86beed691cfa85916ca061a00000000'))
block = Block.parse(stream)
block.tx_hashes = hashes
h = hashes[7]
proof = block.create_merkle_proof(h)
self.assertEqual(proof.index, 7)
want = [
'8118a77e542892fe15ae3fc771a4abfd2f5d5d5997544c3487ac36b5c85170fc',
'ade48f2bbb57318cc79f3a8678febaa827599c509dce5940602e54c7733332e7',
'26906cb2caeb03626102f7606ea332784281d5d20e2b4839fbb3dbb37262dbc1',
'00aa9ad6a7841ffbbf262eb775f8357674f1ea23af11c01cfb6d481fec879701',
]
self.assertEqual(proof.merkle_proof, [unhexlify(x) for x in want])
def test_verify_merkle_proof(self):
merkle_root = unhexlify('d6ee6bc8864e5c08a5753d3886148fb1193d4cd2773b568d5df91acc8babbcac')
tx_hash = unhexlify('77386a46e26f69b3cd435aa4faac932027f58d0b7252e62fb6c9c2489887f6df')
index = 7
proof_hex_hashes = [
'8118a77e542892fe15ae3fc771a4abfd2f5d5d5997544c3487ac36b5c85170fc',
'ade48f2bbb57318cc79f3a8678febaa827599c509dce5940602e54c7733332e7',
'26906cb2caeb03626102f7606ea332784281d5d20e2b4839fbb3dbb37262dbc1',
'00aa9ad6a7841ffbbf262eb775f8357674f1ea23af11c01cfb6d481fec879701',
]
proof_hashes = [unhexlify(x) for x in proof_hex_hashes]
proof = Proof(merkle_root=merkle_root, tx_hash=tx_hash, index=index, merkle_proof=proof_hashes)
self.assertTrue(proof.verify())
| [
"[email protected]"
] | |
355814c9f4e4dbeb520f70c969d171be39f800ac | 37eda7bc5ea24e25a11d68c352b6c8e5a1ca2ae4 | /barkscape/server/base_runner.py | 4427eeda115344df19efe5ab9bef9147e3cddac5 | [] | no_license | bark-simulator/barkscape | d1c3668cc3cd9773380b4b2ed365f96ac01548c3 | dc5265ef8f970488646e6ae91cd47563a0ef11f1 | refs/heads/master | 2023-05-07T21:05:11.338776 | 2021-05-31T13:18:55 | 2021-05-31T13:18:55 | 352,927,882 | 3 | 3 | null | 2021-03-31T19:37:49 | 2021-03-30T08:33:01 | Python | UTF-8 | Python | false | false | 1,335 | py | # Copyright (c) 2021 fortiss GmbH
#
# Authors: Julian Bernhard, Klemens Esterle, Patrick Hart and
# Tobias Kessler
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import sys, os, logging
import asyncio, json
import xviz_avs
from xviz_avs.server import XVIZBaseSession
# BARKSCAPE
from barkscape.server.bark_xviz_stream import BarkXvizStream
"""BaseRunner
Steps the runnable_object and the XVIZstream.
"""
class BaseRunner(XVIZBaseSession):
def __init__(
self, socket, request, runnable_object=None,
dt=0.2, logger=None, stream=None):
super().__init__(socket, request)
self._runnable_object = runnable_object
self._bark_xviz_stream = stream or BarkXvizStream()
self._socket = socket
self._dt = dt
self._logger = logger
def on_connect(self):
print("Web-client connected.")
def on_disconnect(self):
print("Web-client disconnect.")
"""Main functionality for stepping and sending visualization messages
"""
async def main(self):
t = 0
metadata = self._bark_xviz_stream.get_metadata()
await self._socket.send(json.dumps(metadata))
message = await self._bark_xviz_stream.get_message(t, self._runnable_object)
await self._socket.send(json.dumps(message))
await asyncio.sleep(self._dt) | [
"[email protected]"
] | |
ed5f51e53bd578380ba9d9e7121d1bb5587ed8b7 | 15f0514701a78e12750f68ba09d68095172493ee | /Python3/504.py | 866ce9e5a97deb28377cac17d45bd877278d543f | [
"MIT"
] | permissive | strengthen/LeetCode | 5e38c8c9d3e8f27109b9124ae17ef8a4139a1518 | 3ffa6dcbeb787a6128641402081a4ff70093bb61 | refs/heads/master | 2022-12-04T21:35:17.872212 | 2022-11-30T06:23:24 | 2022-11-30T06:23:24 | 155,958,163 | 936 | 365 | MIT | 2021-11-15T04:02:45 | 2018-11-03T06:47:38 | null | UTF-8 | Python | false | false | 927 | py | __________________________________________________________________________________________________
sample 24 ms submission
class Solution:
def convertToBase7(self, num: int) -> str:
if num < 0:
return '-' + self.convertToBase7(-num)
elif num >= 7:
return self.convertToBase7(num//7)+str(num%7)
elif num < 7:
return str(num)
__________________________________________________________________________________________________
sample 32 ms submission
class Solution:
def convertToBase7(self, num: int) -> str:
if num < 0:
return "-" + self.convertToBase7(-num);
elif num == 0:
return "0"
res = [];
while num > 0:
res.append(str(num % 7))
num //= 7
return ''.join(res[::-1])
__________________________________________________________________________________________________
| [
"[email protected]"
] | |
97d89af9dfe4ef5088a883b84ba3d9d590cc0f80 | 80ae9b5cfb45b6e9cf7873ef7c46e17e117e4019 | /data/CodeChef/OMWG.py | 9be4d99b7ea695d8b09d5f1beca4eb627affcc1c | [] | no_license | Ritvik19/CodeBook | ef7764d89b790e902ede5802f36d5ca910d8a50e | 2b4ed7938bbf156553d6ba5cba6216449528f0fc | refs/heads/master | 2021-07-04T08:25:52.478719 | 2020-08-08T06:54:14 | 2020-08-08T06:54:14 | 138,744,302 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | for i in range(int(input())):
n, m = input().split()
n, m = int(n), int(m)
score = (n-1) + (m-1) + ((n-1)*(m-1)*2)
print(score)
| [
"[email protected]"
] | |
b68da664f55e3ce8d1b55150e00655508d5a1602 | 48832d27da16256ee62c364add45f21b968ee669 | /res/scripts/client/flock.py | 4f97c4035d81836c667a98dd90cc9fe5ddb7753f | [] | no_license | webiumsk/WOT-0.9.15.1 | 0752d5bbd7c6fafdd7f714af939ae7bcf654faf7 | 17ca3550fef25e430534d079876a14fbbcccb9b4 | refs/heads/master | 2021-01-20T18:24:10.349144 | 2016-08-04T18:08:34 | 2016-08-04T18:08:34 | 64,955,694 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 9,371 | py | # 2016.08.04 19:47:40 Střední Evropa (letní čas)
# Embedded file name: scripts/client/Flock.py
from AvatarInputHandler import mathUtils
import BigWorld
import Math
import ResMgr
import math
import random
import BattleReplay
import SoundGroups
from debug_utils import LOG_CURRENT_EXCEPTION, LOG_ERROR
from Math import Vector3
ENVIRONMENT_EFFECTS_CONFIG_FILE = 'scripts/environment_effects.xml'
class DebugGizmo:
def __init__(self, spaceID, modelName = 'helpers/models/position_gizmo.model'):
self.model = BigWorld.Model(modelName)
BigWorld.addModel(self.model, spaceID)
self.motor = BigWorld.Servo(Math.Matrix())
self.model.addMotor(self.motor)
def visible(self, show):
self.model.visible = show
def attachTo(self, model):
self.motor.signal = model.matrix
def attachToPosition(self, pos):
self.model.motors = ()
self.model.position = pos
class DebugLine(object):
def _setThickness(self, value):
self.__thickness = value
thickness = property(lambda self: self.__thickness, _setThickness)
def __init__(self, start, end):
self.model = BigWorld.Model('helpers/models/unit_cube.model')
self.motor = BigWorld.Servo(Math.Matrix())
self.model.addMotor(self.motor)
self.__thickness = 0.1
self.set(start, end)
BigWorld.addModel(self.model)
def set(self, start, end):
self.start = start
self.end = end
direction = end - start
m = mathUtils.createSRTMatrix((self.__thickness, self.__thickness, direction.length), (direction.yaw, direction.pitch, 0), start + direction / 2)
m.preMultiply(mathUtils.createTranslationMatrix(Vector3(-0.5, -0.5, -0.5)))
self.motor.signal = m
class DebugPolyLine(object):
def __init__(self):
self.lines = []
def set(self, points):
idx = 0
for curP, nextP in zip(points, points[1:]):
if idx == len(self.lines):
self.lines.append(DebugLine(curP, nextP))
else:
self.lines[idx].set(curP, nextP)
self.lines[idx].model.visible = True
idx += 1
while idx < len(self.lines):
self.lines[idx].model.visible = False
idx += 1
class FlockLike:
__SoundNames = None
MAX_DIST_SQ = 10000
def __init__(self):
if FlockLike.__SoundNames is None:
FlockLike.__SoundNames = {}
flockDataSect = ResMgr.openSection(ENVIRONMENT_EFFECTS_CONFIG_FILE + '/birds')
for value in flockDataSect.values():
modelName = value.readString('modelName', '')
soundName = value.readString('wwsound', '')
if modelName != '' and soundName != '':
FlockLike.__SoundNames[modelName] = soundName
self.__sound = None
return
def destroy(self):
self.__sound = None
return
def _getModelsToLoad(self):
list = []
modelNames = [self.modelName]
if self.modelName2 != '':
modelNames.append(self.modelName2)
for i in xrange(0, self.modelCount):
list.append(random.choice(modelNames))
return list
def _loadModels(self, prereqs):
try:
for modelId in prereqs.keys():
if modelId in prereqs.failedIDs:
LOG_ERROR('Failed to load flock model: %s' % modelId)
continue
model = prereqs[modelId]
model.outsideOnly = 1
model.moveAttachments = True
self.addModel(model)
if self.__sound is None:
self._addSound(model)
animSpeed = random.uniform(self.animSpeedMin, self.animSpeedMax)
model.actionScale = animSpeed
model.action('FlockAnimAction')()
except Exception:
LOG_CURRENT_EXCEPTION()
return
def _addSound(self, model, soundName = ''):
if not model.sources:
return
else:
modelName = model.sources[0]
if soundName == '':
soundName = FlockLike.__SoundNames.get(modelName, None)
if soundName is None or soundName == '':
return
try:
self.__sound = SoundGroups.g_instance.getSound3D(model.root, soundName)
if self.__sound is not None:
self.__sound.play()
except Exception:
LOG_CURRENT_EXCEPTION()
return
return
def _delSound(self):
if self.__sound is not None:
self.__sound.stop()
self.__sound.releaseMatrix()
self.__sound = None
return
class Flock(BigWorld.Entity, FlockLike):
STRATEGY_USUAL_FLY = 0
STRATEGY_FLY_AROUND_CW = 1
STRATEGY_FLY_AROUND_CCW = 2
HEIGHT_CHANGE_DECISION_COUNT = 3
HEIGHT_CHANGE_SPEED_MULTIPLIER = 1.1
HEIGHT_DISPERSION_CORRIDOR = 0.05
CIRCLE_FLIGHT_ABOVE = 0.5
CIRCLE_FLIGHT_PROBABILITY = 0.25
__SoundNames = None
def __init__(self):
BigWorld.Entity.__init__(self)
FlockLike.__init__(self)
self.__decisionCallbackId = None
self.__decisionCount = 0
return
def prerequisites(self):
return self._getModelsToLoad()
def onEnterWorld(self, prereqs):
if BattleReplay.g_replayCtrl.isPlaying:
return
self._loadModels(prereqs)
if len(self.models) > 0:
self._addSound(self.models[0])
self.__decisionStrategy = self.__doUsualFly
if self.flyAroundCenter != Flock.STRATEGY_USUAL_FLY:
self.__setupFlyAroundCenter()
self.filter = BigWorld.BoidsFilter()
self.filter.speed = self.speedAtBottom
self.filter.yprSpeed = Vector3(self.yawSpeed, self.pitchSpeed, self.rollSpeed)
self.filter.deadZonePosition = self.position
self.filter.deadZoneRadius = self.deadZoneRadius
self.middlePosition = Math.Vector3()
self.minHeight = self.position.y
self.maxHeight = self.minHeight + self.height
for boid in self.models:
boid.visible = True
self.middlePosition = Math.Vector3(self.position)
self.physics = 0
newPosition = Math.Vector3(self.position)
newPosition.y = (self.minHeight + self.maxHeight) / 2.0
self.physics.teleport(newPosition)
self.__makeDecision()
def onLeaveWorld(self):
self.models = []
if self.__decisionCallbackId is not None:
BigWorld.cancelCallback(self.__decisionCallbackId)
self.__decisionStrategy = None
FlockLike.destroy(self)
return
def set_state(self, oldState):
pass
def boidsLanded(self):
pass
def name(self):
return 'Flock'
def __setupFlyAroundCenter(self):
self.__decisionStrategy = self.__doAroundCenterFly
self.deadZoneRadius = self.radius
for boid in self.models:
boid.position = Vector3(0.0, 0.0, self.deadZoneRadius)
if self.flyAroundCenter == Flock.STRATEGY_FLY_AROUND_CW:
boid.yaw = math.pi / 2.0
else:
boid.yaw = -math.pi / 2.0
def __doUsualFly(self):
flightZoneHeight = self.maxHeight - self.minHeight
if self.__decisionCount >= Flock.HEIGHT_CHANGE_DECISION_COUNT:
randY = random.uniform(self.minHeight, self.maxHeight)
heightFraction = (randY - self.minHeight) / flightZoneHeight
self.filter.speed = self.speedAtBottom + (self.speedAtTop - self.speedAtBottom) * heightFraction
self.__decisionCount = 0
else:
heightFraction = (self.position.y - self.minHeight) / flightZoneHeight
if heightFraction >= Flock.CIRCLE_FLIGHT_ABOVE and random.random() <= Flock.CIRCLE_FLIGHT_PROBABILITY:
return
self.filter.speed = self.speedAtBottom + (self.speedAtTop - self.speedAtBottom) * heightFraction
randY = self.position.y + random.uniform(-flightZoneHeight * Flock.HEIGHT_DISPERSION_CORRIDOR, flightZoneHeight * Flock.HEIGHT_DISPERSION_CORRIDOR)
if randY < self.minHeight:
randY = self.minHeight
elif randY > self.maxHeight:
randY = self.maxHeight
randRadius = random.uniform(self.deadZoneRadius, self.radius)
randAngle = random.uniform(0.0, 2.0 * math.pi)
newPosition = Math.Vector3(self.middlePosition.x + randRadius * math.cos(randAngle), randY, self.middlePosition.z + randRadius * math.sin(randAngle))
self.physics.teleport(newPosition)
def __doAroundCenterFly(self):
randY = random.uniform(self.minHeight, self.maxHeight)
self.physics.teleport(Math.Vector3(self.middlePosition.x, randY, self.middlePosition.z))
def __makeDecision(self):
self.__decisionCallbackId = BigWorld.callback(self.decisionTime, self.__makeDecision)
self.__decisionCount += 1
self.__decisionStrategy()
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\flock.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 19:47:41 Střední Evropa (letní čas)
| [
"[email protected]"
] | |
0e237708463f88a8264fb311025fee4c4efe4a2a | e71c8efe431fb5d8bf5076a054aeaeeccf3f6404 | /django_site/torah/templatetags/torah_filters.py | 252b9b0470086bd807c7f0db15ae01dbcaba3da9 | [] | no_license | suhailvs/torah | 2f587744da4f01719b5cc47b78a3da11cab032f2 | 0bc59099b0635c31b296a74b5bd9fbfb8798553a | refs/heads/master | 2021-11-10T23:12:54.719502 | 2021-11-06T12:26:48 | 2021-11-06T12:26:48 | 156,322,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,361 | py | from django import template
register = template.Library()
PATTERN = 'abgdefzhjiklmnxopsqrct'
fiej = lambda p: p.replace('f','v').replace('i','y').replace('e','H').replace('j','T')
@register.filter(name='get_letternumber')
def get_letternumber(letter):
"""
Return Number curresponding to PaleoHebrew letter
"""
return PATTERN.index(letter)+1
@register.filter(name='get_words')
def get_words(line):
"""
Return list of words of given line
"""
return line.split(' ')
@register.filter(name='get_hebrewletter')
def get_hebrewletter(paleoletter):
"""
Return Hebrew Letter curresponding to PaleoHebrew letter
input: a
output: \u05d0
"""
HEBREW_UNICODE = ['\u05d0','\u05d1','\u05d2','\u05d3','\u05d4','\u05d5','\u05d6','\u05d7','\u05d8','\u05d9','\u05db','\u05dc','\u05de','\u05e0','\u05e1','\u05e2','\u05e4','\u05e6','\u05e7','\u05e8','\u05e9','\u05ea']
return HEBREW_UNICODE[PATTERN.index(paleoletter)]
@register.filter(name='replace_fie')
def replace_fie(paleoword):
"""
Replace f -> v, i -> y, e -> H
"""
return fiej(paleoword)
from torah.models import Word
@register.filter(name='get_englishword')
def get_englishword(paleoword):
"""
Return English meaning curresponding to PaleoHebrew Word
"""
w = Word.objects.get(name = paleoword[::-1])
return w.translation | [
"[email protected]"
] | |
92a55865ce6f0721b8e14204bf9d1663b2200a98 | 9405aa570ede31a9b11ce07c0da69a2c73ab0570 | /aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/CreateDemandRequest.py | 6526229cdbc8a205e15c3771f39ddb7a45a15023 | [
"Apache-2.0"
] | permissive | liumihust/aliyun-openapi-python-sdk | 7fa3f5b7ea5177a9dbffc99e73cf9f00e640b72b | c7b5dd4befae4b9c59181654289f9272531207ef | refs/heads/master | 2020-09-25T12:10:14.245354 | 2019-12-04T14:43:27 | 2019-12-04T14:43:27 | 226,002,339 | 1 | 0 | NOASSERTION | 2019-12-05T02:50:35 | 2019-12-05T02:50:34 | null | UTF-8 | Python | false | false | 3,911 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class CreateDemandRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'CreateDemand','ecs')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_StartTime(self):
return self.get_query_params().get('StartTime')
def set_StartTime(self,StartTime):
self.add_query_param('StartTime',StartTime)
def get_DemandDescription(self):
return self.get_query_params().get('DemandDescription')
def set_DemandDescription(self,DemandDescription):
self.add_query_param('DemandDescription',DemandDescription)
def get_InstanceType(self):
return self.get_query_params().get('InstanceType')
def set_InstanceType(self,InstanceType):
self.add_query_param('InstanceType',InstanceType)
def get_InstanceChargeType(self):
return self.get_query_params().get('InstanceChargeType')
def set_InstanceChargeType(self,InstanceChargeType):
self.add_query_param('InstanceChargeType',InstanceChargeType)
def get_DemandName(self):
return self.get_query_params().get('DemandName')
def set_DemandName(self,DemandName):
self.add_query_param('DemandName',DemandName)
def get_Amount(self):
return self.get_query_params().get('Amount')
def set_Amount(self,Amount):
self.add_query_param('Amount',Amount)
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_PeriodUnit(self):
return self.get_query_params().get('PeriodUnit')
def set_PeriodUnit(self,PeriodUnit):
self.add_query_param('PeriodUnit',PeriodUnit)
def get_ZoneId(self):
return self.get_query_params().get('ZoneId')
def set_ZoneId(self,ZoneId):
self.add_query_param('ZoneId',ZoneId) | [
"[email protected]"
] | |
44b9f6666d722193eb9a18f604bc1c34b6dab2cd | c317f6a390de255540c2fb6a2e637c20bec03762 | /final/pwn-exzilla/container/server.py | 476db68bfe9316a904eccb9facdbd2bd25009136 | [] | no_license | Kodsport/sakerhetssm-2021-solutions | a7329ef22862bcfc4c970d43ac210bbe951cf3a8 | 85bc2aa619d55139acf7c91483259088329c15e2 | refs/heads/master | 2023-05-12T00:54:24.546337 | 2021-06-07T14:12:32 | 2021-06-07T14:12:32 | 353,975,490 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,576 | py | #!/bin/env python3
import string, json, base64, sys
sys.setrecursionlimit(30)
def f():
pass
CODE_TYPE = type(f.__code__)
FUNC_TYPE = type(f)
class NumberCell:
def __init__(self, n):
self.n = n
@staticmethod
def new():
while True:
try:
n = int(input("Cell value:\n> "))
return NumberCell(n)
except:
print("not a number")
def eval(self, _):
return str(self.n)
def view(self):
return str(self.n)
class FormulaCell:
def __init__(self, formula):
self.formula = formula
@staticmethod
def new():
print("Create a Formula Cell. Not for the faint of heart.")
while True:
try:
x = input("Formula import string:\n> ")
x = base64.b64decode(x)
x = json.loads(x)
assert len(x) == 14
x = CODE_TYPE(x[0], x[1], x[2], x[3], x[4], x[5], base64.b64decode(x[6]), tuple(x[7]), tuple(x[8]), tuple(x[9]), x[10], x[11], x[12], base64.b64decode(x[13]))
x = FUNC_TYPE(x, globals())
return FormulaCell(x)
except:
print("Bad import string")
def eval(self, sheet):
return str(self.formula(sheet))
def view(self):
c = self.formula.__code__
x = [c.co_argcount, c.co_posonlyargcount, c.co_kwonlyargcount, c.co_nlocals, c.co_stacksize, c.co_flags, base64.b64encode(c.co_code).decode(), c.co_consts, c.co_names, c.co_varnames, c.co_filename, c.co_name, c.co_firstlineno, base64.b64encode(c.co_lnotab).decode()]
x = base64.b64encode(json.dumps(x).encode())
return x.decode()
class Sheet:
def __init__(self, w, h):
self.grid = []
for _ in range(h):
row = []
for _ in range(w):
row.append(NumberCell(1))
self.grid.append(row)
def display(self):
col_widths = [1]*len(self.grid[0])
for row in self.grid:
for col in range(len(row)):
col_widths[col] = max(col_widths[col], len(row[col].eval(self)))
separator = "+----+" + "+".join(["-"*(n+2) for n in col_widths]) + "+"
print(separator)
self.display_row(" ", [chr(ord("A")+i) for i in range(len(col_widths))], col_widths)
print(separator)
for (i, row) in enumerate(self.grid):
self.display_row(str(i), [cell.eval(self) for cell in row], col_widths)
print(separator)
def display_row(self, first, values, col_widths):
print("| "+"%2s"%first+" | " + " | ".join([("%"+str(n)+"s")%val for (val, n) in zip(values,col_widths)]) + " |")
def edit(self, r, c):
while True:
try:
choice = int(input("1. Create number cell\n2. Create formula cell\n> "))
except:
print("Bad option!")
continue
if choice == 1:
self.grid[r][c] = NumberCell.new()
return
elif choice == 2:
self.grid[r][c] = FormulaCell.new()
return
else:
print("Bad option")
def view(self, r, c):
print("Cell:", self.grid[r][c].view())
the_sheets = {}
def new_sheet():
name = input("Name? ")
the_sheets[name] = Sheet(10, 10)
open_sheet(name)
def list_sheets():
print("The Sheets:")
for k in the_sheets.keys():
print(k)
def open_sheet(name=""):
if len(the_sheets) == 0:
print("There are no sheets yet! Create one first!")
return
if name == "":
list_sheets()
name = input("Name? ")
while name not in the_sheets:
list_sheets()
print("Sheet doesn't exist")
name = input("Name? ")
sheet = the_sheets[name]
while True:
print("------", name, "--------------------------")
sheet.display()
print("t <pos> - edit, w <pos> - view, e - close")
choice = input("> ").split(" ")
if choice[0] == "t":
try:
r, c = pos2rowcol(choice[1])
sheet.edit(r, c)
except:
print("Bad row or col!")
elif choice[0] == "w":
try:
r, c = pos2rowcol(choice[1])
sheet.view(r, c)
except:
print("Bad row or col!")
elif choice[0] == "e":
return
else:
print("Bad choice!")
def pos2rowcol(pos):
assert len(pos) >= 2
assert pos[0] in string.ascii_uppercase
assert all([c in string.digits for c in pos[1:]])
return int(pos[1:]), ord(pos[0])-ord("A")
def menu():
print("Menu")
print("1. Open sheet")
print("2. New sheet")
print("3. Exit")
def banner():
print(""" ______ __ __ ______ __ __ __ ______
/\ ___\ /\_\_\_\ /\___ \ /\ \ /\ \ /\ \ /\ __ \
\ \ __\ \/_/\_\/_ \/_/ /__ \ \ \ \ \ \____ \ \ \____ \ \ __ \
\ \_____\ /\_\/\_\ /\_____\ \ \_\ \ \_____\ \ \_____\ \ \_\ \_\
\/_____/ \/_/\/_/ \/_____/ \/_/ \/_____/ \/_____/ \/_/\/_/
Excel killa""")
def main():
banner()
while True:
menu()
try:
choice = int(input("> "))
except:
print("Bad choice!")
continue
if choice == 1:
open_sheet()
elif choice == 2:
new_sheet()
elif choice == 3:
print("Bye!")
break
else:
print("Bad choice!")
if __name__ == "__main__":
import random
random.seed(0)
example = Sheet(2, 12)
for i in range(10):
example.grid[i][0] = NumberCell(random.randint(1, 9)*100)
example.grid[i][1] = NumberCell(random.randint(1, 9)*100)
def sumcol0(sheet):
res = 0
for i in range(10):
res += int(sheet.grid[i][0].eval(sheet))
return str(res)
def sumcol1(sheet):
res = 0
for i in range(10):
res += int(sheet.grid[i][1].eval(sheet))
return str(res)
def total(sheet):
return str(int(sheet.grid[10][0].eval(sheet))+int(sheet.grid[10][1].eval(sheet)))
example.grid[10][0] = FormulaCell(sumcol0)
example.grid[10][1] = FormulaCell(sumcol1)
example.grid[11][0] = NumberCell(0)
example.grid[11][1] = FormulaCell(total)
the_sheets["budget"] = example
main()
| [
"[email protected]"
] | |
999925a84d6f0ad85d37484c414d300427a63c09 | 54857571461a579bed30cee27871aaa5fe396bcc | /nltk-0.9.7/src/nltk/wordnet/__init__.py | 68c62dcb3b99882cb006753e2b13e209d780a975 | [] | no_license | ahmedBazaz/affective-text-classification | 78375182e800b39e0e309e8b469e273c0d9590f0 | 719e9b26e60863c620662564fb9cfeafc004777f | refs/heads/master | 2021-01-10T14:50:01.100274 | 2009-01-09T03:59:01 | 2009-01-09T03:59:01 | 48,296,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,177 | py | # Natural Language Toolkit: WordNet Interface
#
# Copyright (C) 2001-2008 NLTK Project
# Author: Oliver Steele <[email protected]>
# Steven Bird <[email protected]>
# David Ormiston Smith <[email protected]>>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
WordNet interface, based on Oliver Steele's Pywordnet, together
with an implementation of Ted Pedersen's Wordnet::Similarity package.
Usage
=====
>>> from nltk.wordnet import *
Retrieve words from the database
>>> N['dog']
dog (noun)
>>> V['dog']
dog (verb)
>>> ADJ['clear']
clear (adj)
>>> ADV['clearly']
clearly (adv)
Examine a word's senses and pointers:
>>> N['dog'].synsets()
[{noun: dog, domestic_dog, Canis_familiaris}, {noun: frump, dog}, {noun: dog}, {noun: cad, bounder, blackguard, dog, hound, heel}, {noun: frank, frankfurter, hotdog, hot_dog, dog, wiener, wienerwurst, weenie}, {noun: pawl, detent, click, dog}, {noun: andiron, firedog, dog, dog-iron}]
('dog' in {noun: dog, domestic dog, Canis familiaris}, 'dog' in {noun: frump, dog}, 'dog' in {noun: dog}, 'dog' in {noun: cad, bounder, blackguard, dog, hound, heel}, 'dog' in {noun: frank, frankfurter, hotdog, hot dog, dog, wiener, wienerwurst, weenie}, 'dog' in {noun: pawl, detent, click, dog}, 'dog' in {noun: andiron, firedog, dog, dog-iron})
Extract the first sense:
>>> N['dog'][0]
{noun: dog, domestic_dog, Canis_familiaris}
Get the first five pointers (relationships) from dog to other synsets:
>>> N['dog'][0].relations()
{'hypernym': [('noun', 2083346, 0), ('noun', 1317541, 0)],
'part holonym': [('noun', 2158846, 0)],
'member meronym': [('noun', 2083863, 0), ('noun', 7994941, 0)],
'hyponym': [('noun', 1322604, 0), ('noun', 2084732, 0), ...]}
Get those synsets of which 'dog' is a member meronym:
>>> N['dog'][0][MEMBER_MERONYM]
[{noun: Canis, genus Canis}, {noun: pack}]
"""
from util import *
from cache import *
from lexname import *
from dictionary import *
from similarity import *
from synset import *
from browse import *
from stemmer import *
from browser import *
| [
"tytung@6129d76e-ddfe-11dd-a37d-c9d1c40e0883"
] | tytung@6129d76e-ddfe-11dd-a37d-c9d1c40e0883 |
e28425c6be03c6d9956daa0eddac1c7fa2d826f5 | 816232db2f21e193612eaa60eda0d5897d31caaf | /COS_PRO/4차/6_자아도취수.py | edbcf8c6e72656e86c99fe03d7d9553c8241eb4a | [] | no_license | Juyoung4/StudyAlgorithm | a60bfa7657eac57f59200bfa204aff1ad27c79f8 | 4b190e0bfeb268bef4be00ae9bedd9ca8946fbd6 | refs/heads/master | 2023-08-31T04:37:07.422641 | 2021-09-27T08:38:09 | 2021-09-27T08:38:09 | 282,757,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | def power(base, exponent):
val = 1
for i in range(exponent):
val *= base
return val
def solution(k):
answer = []
bound = power(10, k)
for i in range(bound // 10, bound):
current = i
calculated = 0
while current != 0:
calculated += (current%10)**k
current = current // 10
if calculated == i:
answer.append(i)
return answer
k = 3
ret = solution(k)
print("solution 함수의 반환 값은", ret, "입니다.") | [
"[email protected]"
] | |
8338f1a20433e20abae78ed49ba41b21de94e06a | 35efa062451213e6a821c4064c436a1f13f2835c | /src/sentry/south_migrations/0314_auto__add_distribution__add_unique_distribution_release_name__add_fiel.py | 1319b6f0fc474f1f98547a3ae43d59475aba64de | [
"BSD-2-Clause"
] | permissive | ekhavana/sentry | 4ba3d6c6bf6a11edb7570ae94e17d5fba3e0d166 | 1bd28dadf51473eba81670fea66dfeb8c1970c25 | refs/heads/master | 2021-06-01T22:35:34.105147 | 2017-06-09T19:23:28 | 2017-06-09T19:23:28 | 93,899,538 | 0 | 0 | BSD-3-Clause | 2021-01-03T10:50:57 | 2017-06-09T21:53:07 | Python | UTF-8 | Python | false | false | 77,728 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Distribution'
db.create_table('sentry_distribution', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('organization_id', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(db_index=True)),
('release', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.Release'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['Distribution'])
# Adding unique constraint on 'Distribution', fields ['release', 'name']
db.create_unique('sentry_distribution', ['release_id', 'name'])
# Adding field 'ReleaseFile.dist'
db.add_column('sentry_releasefile', 'dist',
self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.Distribution'], null=True),
keep_default=False)
def backwards(self, orm):
# Removing unique constraint on 'Distribution', fields ['release', 'name']
db.delete_unique('sentry_distribution', ['release_id', 'name'])
# Deleting model 'Distribution'
db.delete_table('sentry_distribution')
# Deleting field 'ReleaseFile.dist'
db.delete_column('sentry_releasefile', 'dist_id')
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apiapplication': {
'Meta': {'object_name': 'ApiApplication'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "'ffbbf6b343b747ed91117b04cdb3bee064adbbe53c92438d8be538489b02368b'", 'unique': 'True', 'max_length': '64'}),
'client_secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'165c86887a4b4cdeacb475b734cdd9c1cbe997ecc99d4a60b076f3f920540445'"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Monostichous Jacquelynn'", 'max_length': '64', 'blank': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'privacy_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'terms_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.apiauthorization': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApiAuthorization'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apigrant': {
'Meta': {'object_name': 'ApiGrant'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']"}),
'code': ('django.db.models.fields.CharField', [], {'default': "'2c73cb9bcc43471593c28f79464e2f96'", 'max_length': '64', 'db_index': 'True'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 4, 21, 0, 0)', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 5, 21, 0, 0)', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'refresh_token': ('django.db.models.fields.CharField', [], {'default': "'41c39eef95e74b2e893fa914b0192a3a23e1353e60594eb5b5947b02a1479027'", 'max_length': '64', 'unique': 'True', 'null': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'e2d5c174f9444cb898f842dbc8ab1a63855b744464824395a98f61d194a11b31'", 'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 4, 28, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'), ('organization_id', 'external_id'))", 'object_name': 'CommitAuthor'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '164', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.commitfilechange': {
'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.deploy': {
'Meta': {'object_name': 'Deploy'},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.distribution': {
'Meta': {'unique_together': "(('release', 'name'),)", 'object_name': 'Distribution'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.dsymapp': {
'Meta': {'unique_together': "(('project', 'platform', 'app_id'),)", 'object_name': 'DSymApp'},
'app_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'sync_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'})
},
'sentry.dsymbundle': {
'Meta': {'object_name': 'DSymBundle'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'sdk': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymSDK']"})
},
'sentry.dsymobject': {
'Meta': {'object_name': 'DSymObject'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_path': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'db_index': 'True'}),
'vmaddr': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'vmsize': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'})
},
'sentry.dsymsdk': {
'Meta': {'object_name': 'DSymSDK', 'index_together': "[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"},
'dsym_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'sdk_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'version_build': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {'unique_together': "[('object', 'address')]", 'object_name': 'DSymSymbol'},
'address': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {'unique_together': "(('project_id', 'name'),)", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.EnvironmentProject']", 'symmetrical': 'False'})
},
'sentry.environmentproject': {
'Meta': {'unique_together': "(('project', 'environment'),)", 'object_name': 'EnvironmentProject'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventprocessingissue': {
'Meta': {'unique_together': "(('raw_event', 'processing_issue'),)", 'object_name': 'EventProcessingIssue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'processing_issue': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProcessingIssue']"}),
'raw_event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.RawEvent']"})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project', 'ident'), ('project', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {'object_name': 'GlobalDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcommitresolution': {
'Meta': {'unique_together': "(('group_id', 'commit_id'),)", 'object_name': 'GroupCommitResolution'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project', 'key', 'value', 'last_seen'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationavatar': {
'Meta': {'object_name': 'OrganizationAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Organization']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.processingissue': {
'Meta': {'unique_together': "(('project', 'checksum', 'type'),)", 'object_name': 'ProcessingIssue'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.rawevent': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'RawEvent'},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.release': {
'Meta': {'unique_together': "(('organization', 'version'),)", 'object_name': 'Release'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'releases'", 'symmetrical': 'False', 'through': "orm['sentry.ReleaseProject']", 'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('project_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'dist': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Distribution']", 'null': 'True'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseheadcommit': {
'Meta': {'unique_together': "(('repository_id', 'release'),)", 'object_name': 'ReleaseHeadCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.releaseproject': {
'Meta': {'unique_together': "(('project', 'release'),)", 'object_name': 'ReleaseProject', 'db_table': "'sentry_release_project'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.reprocessingreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'ReprocessingReport'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'i6wkGTuvBT0W3Q7ZfTS9CdB64GEWljm2'", 'max_length': '32'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.versiondsymfile': {
'Meta': {'unique_together': "(('dsym_file', 'version', 'build'),)", 'object_name': 'VersionDSymFile'},
'build': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dsym_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymApp']"}),
'dsym_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDSymFile']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '32'})
}
}
complete_apps = ['sentry'] | [
"[email protected]"
] | |
0d34f49f9c5a57b17efc8069e4270058d795f429 | f95d0c620151ae16d0139f742b461e411ecf1f7c | /assignments/assignment2/cs231n/optim.py | a337f3432eaf4530675c0b50c45c671825d0b4c6 | [] | no_license | chintanbetrabet/CS231n | 5116925618df18a23bc5a99d5003f3a6273e3f95 | 96cd5bb8ff32a80fcf5fe82e485669187696f267 | refs/heads/master | 2021-07-10T04:53:51.654828 | 2017-10-07T18:53:39 | 2017-10-07T18:53:39 | 104,069,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,106 | py | import numpy as np
"""
This file implements various first-order update rules that are commonly used for
training neural networks. Each update rule accepts current weights and the
gradient of the loss with respect to those weights and produces the next set of
weights. Each update rule has the same interface:
def update(w, dw, config=None):
Inputs:
- w: A numpy array giving the current weights.
- dw: A numpy array of the same shape as w giving the gradient of the
loss with respect to w.
- config: A dictionary containing hyperparameter values such as learning rate,
momentum, etc. If the update rule requires caching values over many
iterations, then config will also hold these cached values.
Returns:
- next_w: The next point after the update.
- config: The config dictionary to be passed to the next iteration of the
update rule.
NOTE: For most update rules, the default learning rate will probably not perform
well; however the default values of the other hyperparameters should work well
for a variety of different problems.
For efficiency, update rules may perform in-place updates, mutating w and
setting next_w equal to w.
"""
def sgd(w, dw, config=None):
"""
Performs vanilla stochastic gradient descent.
config format:
- learning_rate: Scalar learning rate.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-2)
w -= config['learning_rate'] * dw
#w-=1e-3*dw
return w, config
def sgd_momentum(w, dw, config=None):
"""
Performs stochastic gradient descent with momentum.
config format:
- learning_rate: Scalar learning rate.
- momentum: Scalar between 0 and 1 giving the momentum value.
Setting momentum = 0 reduces to sgd.
- velocity: A numpy array of the same shape as w and dw used to store a moving
average of the gradients.
"""
#print "HI"
if config is None: config = {}
config.setdefault('learning_rate', 1e-2)
config.setdefault('momentum', 0.9)
v = config.get('velocity', np.zeros_like(w))
next_w = None
#############################################################################
# TODO: Implement the momentum update formula. Store the updated value in #
# the next_w variable. You should also use and update the velocity v. #
#############################################################################
pass
w-=config['learning_rate']*dw
v=config['momentum']*v-config['learning_rate']*dw
w+=v
next_w=w
#print "nw"
#print next_w
#############################################################################
# END OF YOUR CODE #
#############################################################################
config['velocity'] = v
return next_w, config
def rmsprop(x, dx, config=None):
"""
Uses the RMSProp update rule, which uses a moving average of squared gradient
values to set adaptive per-parameter learning rates.
config format:
- learning_rate: Scalar learning rate.
- decay_rate: Scalar between 0 and 1 giving the decay rate for the squared
gradient cache.
- epsilon: Small scalar used for smoothing to avoid dividing by zero.
- cache: Moving average of second moments of gradients.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-2)
config.setdefault('decay_rate', 0.99)
config.setdefault('epsilon', 1e-8)
config.setdefault('cache', np.zeros_like(x))
next_x = None
cache=config['decay_rate']*config['cache']+(1-config['decay_rate'])*(dx**2)
next_x=x-config['learning_rate']*dx/(pow(cache,.5)+config['epsilon'])
config['cache']=cache
#############################################################################
# TODO: Implement the RMSprop update formula, storing the next value of x #
# in the next_x variable. Don't forget to update cache value stored in #
# config['cache']. #
#############################################################################
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
return next_x, config
def adam(x, dx, config=None):
"""
Uses the Adam update rule, which incorporates moving averages of both the
gradient and its square and a bias correction term.
config format:
- learning_rate: Scalar learning rate.
- beta1: Decay rate for moving average of first moment of gradient.
- beta2: Decay rate for moving average of second moment of gradient.
- epsilon: Small scalar used for smoothing to avoid dividing by zero.
- m: Moving average of gradient.
- v: Moving average of squared gradient.
- t: Iteration number.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-3)
config.setdefault('beta1', 0.9)
config.setdefault('beta2', 0.999)
config.setdefault('epsilon', 1e-8)
config.setdefault('m', np.zeros_like(x))
config.setdefault('v', np.zeros_like(x))
config.setdefault('t', 0)
next_x = None
#############################################################################
# TODO: Implement the Adam update formula, storing the next value of x in #
# the next_x variable. Don't forget to update the m, v, and t variables #
# stored in config. #
#############################################################################
config['m']=config['beta1']*config['m']+(1-config['beta1'])*dx
config['v']=config['beta2']*config['v']+(1-config['beta2'])*dx**2
next_x=x-config['learning_rate']*config['m']/(np.sqrt(config['v']+config['epsilon']))
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
return next_x, config
| [
"[email protected]"
] | |
5c4cd537a6ff217add8a3efe464d8da6c2abbd93 | b726ec9abfbd53f03fa96ccd336ed165aaa306c8 | /find_max_common_subsequence.py | 16249cff0778023e0896a80ee83635bd50f86092 | [] | no_license | hcxie20/Algorithm | de0256aa3acacf96833e46a0b0c66517dae6cbfd | cace01b8441a8a1923b827de844965874e790d77 | refs/heads/master | 2021-08-15T23:18:00.364329 | 2020-12-02T05:53:32 | 2020-12-02T05:53:32 | 228,120,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | class Solution(object):
def __init__(self, str1, str2):
self.value = 0
self.str = ""
if str1 and str2:
dp = [[0 for i in range(len(str1) + 1)] for j in range(len(str2) + 1)]
for i in range(1, len(str2) + 1):
for j in range(1, len(str1) + 1):
if str1[j - 1] == str2[i - 1]:
dp[i][j] = dp[i - 1][j - 1] + 1
else:
dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])
while i != 0:
if dp[i][j] == dp[i - 1][j - 1]:
self.str = str2[i - 1] + self.str
i -= 1
j -= 1
elif dp[i][j] == dp[i - 1][j]:
i -= 1
else:
j -= 1
pass
if __name__ == "__main__":
a = "abcdef"
b = ""
c = Solution(a, b) | [
"="
] | = |
a7629ce10b51f47ce22435f78f847ce0d23ad3c8 | 5397c6bd14e272f4a274412018084615f5da7b9b | /django/django_into/manasa/manage.py | ed935ca5ecf259f3fbfd2ab1198f0af517010ed8 | [] | no_license | nassersayeh/Python_stack | f7be8b4e7406078e4864118350989f2f9c1a905c | c91d220b27f4faf5a90f492bf3b3fa0d8d2957a2 | refs/heads/main | 2023-05-04T03:49:51.624679 | 2021-05-29T15:30:16 | 2021-05-29T15:30:16 | 367,853,112 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'manasa.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
7c04a3f7c065a01a0d8e43922f46a8251de774d9 | 54fc549a8af5bad5cfeb97af92a02448297f1ea9 | /src/gather_reviews/template.py | b60cad3a592d610f9ae8012b9ac6cb7da4f2f52a | [] | no_license | ace-racer/ReviewAnalysis | 35659ba09917a345edb3e3701aa12ae78602333b | 95fee3407791b5bbbc47619b06e603689e2249ed | refs/heads/master | 2020-07-26T06:10:05.563327 | 2019-10-26T14:58:12 | 2019-10-26T14:58:12 | 208,559,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | class ReviewTemplate:
def __init__(self):
self.rating = 0
self.title = ""
self.review_text = ""
self.reviewer = ""
self.review_date = ""
self.review_upvotes = 0
self.review_downvotes = 0
| [
"[email protected]"
] | |
b87a8135b8a13d1fa8a6878cd59447c839828a69 | e53b7bbcea1a6f06175a9f14e31d5725fe80e804 | /Question_100/Q15_SobelFilter.py | d31f99bef7a488d33314a96b6c743a73bd861769 | [] | no_license | Zpadger/ObjectDetection | 5777c8d78c71dca1af6bccf25b01288dca7100c3 | aa0193a38f3d5c3a318501c3a59e89b73d3e244b | refs/heads/master | 2020-08-16T02:58:45.412713 | 2019-12-14T08:18:51 | 2019-12-14T08:18:51 | 215,446,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,995 | py | import cv2
import numpy as np
# Gray scale
def BGR2GRAY(img):
b = img[:, :, 0].copy()
g = img[:, :, 1].copy()
r = img[:, :, 2].copy()
# Gray scale
out = 0.2126 * r + 0.7152 * g + 0.0722 * b
out = out.astype(np.uint8)
return out
# sobel filter
def sobel_filter(img, K_size=3):
if len(img.shape) == 3:
H, W, C = img.shape
else:
img = np.expand_dims(img,axis=-1)
H, W, C = img.shape
# Zero padding
pad = K_size // 2
out = np.zeros((H + pad * 2, W + pad * 2), dtype=np.float)
out[pad: pad + H, pad: pad + W] = gray.copy().astype(np.float)
tmp = out.copy()
out_v = out.copy()
out_h = out.copy()
# sobel vertical
#纵方向
Kv = [[1., 2., 1.],[0., 0., 0.],[-1., -2., -1.]]
# sobel horizontal
#横方向
Kh = [[1., 0., -1.],[2., 0., -2.], [1., 0., -1.]]
# filtering
for y in range(H):
for x in range(W):
out_v[pad + y, pad + x] = np.sum(Kv * (tmp[y: y + K_size, x: x + K_size]))
out_h[pad + y, pad + x] = np.sum(Kh * (tmp[y: y + K_size, x: x + K_size]))
out_v = np.clip(out_v, 0, 255)
out_h = np.clip(out_h, 0, 255)
out_v = out_v[pad: pad + H, pad: pad + W].astype(np.uint8)
out_h = out_h[pad: pad + H, pad: pad + W].astype(np.uint8)
return out_v, out_h
# Read image
img = cv2.imread("imori.jpg").astype(np.float)
# grayscale
gray = BGR2GRAY(img)
# different filtering
out_v, out_h = sobel_filter(gray, K_size=3)
# Save result
cv2.imwrite("out_v.jpg", out_v)
cv2.imshow("result_v", out_v)
while cv2.waitKey(100) != 27:# loop if not get ESC
if cv2.getWindowProperty('result_v',cv2.WND_PROP_VISIBLE) <= 0:
break
cv2.destroyWindow('result_v')
cv2.imwrite("out_h.jpg", out_h)
cv2.imshow("result_h", out_h)
# loop if not get ESC or click x
while cv2.waitKey(100) != 27:
if cv2.getWindowProperty('result_h',cv2.WND_PROP_VISIBLE) <= 0:
break
cv2.destroyWindow('result_h')
cv2.destroyAllWindows() | [
"[email protected]"
] | |
ecf51cb881137d1f924dbc463f597d153a029da8 | 641ac23338d100a4aea52b8246c924f219a4b276 | /fabfile.py | 6ca0d13fd156aefa0294704b471dc351415f5463 | [] | no_license | loogica/videos | e1557f38913cdeb9c48d478a420c25ea6aac33ef | 2ae2817f4630d5c351a510b81a6c007dbfdda09a | refs/heads/master | 2021-03-13T00:04:20.355363 | 2013-08-17T22:24:04 | 2013-08-17T22:24:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | true | false | 530 | py | from fabric.api import env, run, put, local, cd, sudo
env.hosts = ['loogica.net']
def zip_output():
local('zip -r blog_static.zip output')
def send_data():
put('blog_static.zip', '/tmp')
def remote_deploy_zip():
with cd('/tmp'):
sudo('unzip blog_static.zip')
sudo('mv output/ /opt/apps/')
with cd('/opt/apps'):
sudo('rm -rf videos')
sudo('mv output videos')
sudo('chown -R deploy:www-data videos')
def deploy():
zip_output()
send_data()
remote_deploy_zip()
| [
"[email protected]"
] | |
9bc4963cc714fa4ada7a45a913bad50ae400a4be | ba694353a3cb1cfd02a6773b40f693386d0dba39 | /sdk/python/pulumi_google_native/dataproc/v1/region_cluster_iam_binding.py | 9dacce9cc9f27501ccd0cb7560610489ec9892cd | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | pulumi/pulumi-google-native | cc57af8bd3d1d6b76f1f48333ed1f1b31d56f92b | 124d255e5b7f5440d1ef63c9a71e4cc1d661cd10 | refs/heads/master | 2023-08-25T00:18:00.300230 | 2023-07-20T04:25:48 | 2023-07-20T04:25:48 | 323,680,373 | 69 | 16 | Apache-2.0 | 2023-09-13T00:28:04 | 2020-12-22T16:39:01 | Python | UTF-8 | Python | false | false | 13,700 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ... import iam as _iam
__all__ = ['RegionClusterIamBindingArgs', 'RegionClusterIamBinding']
@pulumi.input_type
class RegionClusterIamBindingArgs:
def __init__(__self__, *,
members: pulumi.Input[Sequence[pulumi.Input[str]]],
name: pulumi.Input[str],
role: pulumi.Input[str],
condition: Optional[pulumi.Input['_iam.v1.ConditionArgs']] = None):
"""
The set of arguments for constructing a RegionClusterIamBinding resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] members: Identities that will be granted the privilege in role. Each entry can have one of the following values:
* user:{emailid}: An email address that represents a specific Google account. For example, [email protected] or [email protected].
* serviceAccount:{emailid}: An email address that represents a service account. For example, [email protected].
* group:{emailid}: An email address that represents a Google group. For example, [email protected].
* domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
:param pulumi.Input[str] name: The name of the resource to manage IAM policies for.
:param pulumi.Input[str] role: The role that should be applied. Only one `IamBinding` can be used per role.
:param pulumi.Input['_iam.v1.ConditionArgs'] condition: An IAM Condition for a given binding.
"""
pulumi.set(__self__, "members", members)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "role", role)
if condition is not None:
pulumi.set(__self__, "condition", condition)
@property
@pulumi.getter
def members(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Identities that will be granted the privilege in role. Each entry can have one of the following values:
* user:{emailid}: An email address that represents a specific Google account. For example, [email protected] or [email protected].
* serviceAccount:{emailid}: An email address that represents a service account. For example, [email protected].
* group:{emailid}: An email address that represents a Google group. For example, [email protected].
* domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
"""
return pulumi.get(self, "members")
@members.setter
def members(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "members", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the resource to manage IAM policies for.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def role(self) -> pulumi.Input[str]:
"""
The role that should be applied. Only one `IamBinding` can be used per role.
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: pulumi.Input[str]):
pulumi.set(self, "role", value)
@property
@pulumi.getter
def condition(self) -> Optional[pulumi.Input['_iam.v1.ConditionArgs']]:
"""
An IAM Condition for a given binding.
"""
return pulumi.get(self, "condition")
@condition.setter
def condition(self, value: Optional[pulumi.Input['_iam.v1.ConditionArgs']]):
pulumi.set(self, "condition", value)
class RegionClusterIamBinding(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
condition: Optional[pulumi.Input[pulumi.InputType['_iam.v1.ConditionArgs']]] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['_iam.v1.ConditionArgs']] condition: An IAM Condition for a given binding.
:param pulumi.Input[Sequence[pulumi.Input[str]]] members: Identities that will be granted the privilege in role. Each entry can have one of the following values:
* user:{emailid}: An email address that represents a specific Google account. For example, [email protected] or [email protected].
* serviceAccount:{emailid}: An email address that represents a service account. For example, [email protected].
* group:{emailid}: An email address that represents a Google group. For example, [email protected].
* domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
:param pulumi.Input[str] name: The name of the resource to manage IAM policies for.
:param pulumi.Input[str] role: The role that should be applied. Only one `IamBinding` can be used per role.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RegionClusterIamBindingArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.
:param str resource_name: The name of the resource.
:param RegionClusterIamBindingArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RegionClusterIamBindingArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
condition: Optional[pulumi.Input[pulumi.InputType['_iam.v1.ConditionArgs']]] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RegionClusterIamBindingArgs.__new__(RegionClusterIamBindingArgs)
__props__.__dict__["condition"] = condition
if members is None and not opts.urn:
raise TypeError("Missing required property 'members'")
__props__.__dict__["members"] = members
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
if role is None and not opts.urn:
raise TypeError("Missing required property 'role'")
__props__.__dict__["role"] = role
__props__.__dict__["etag"] = None
__props__.__dict__["project"] = None
super(RegionClusterIamBinding, __self__).__init__(
'google-native:dataproc/v1:RegionClusterIamBinding',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RegionClusterIamBinding':
"""
Get an existing RegionClusterIamBinding resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RegionClusterIamBindingArgs.__new__(RegionClusterIamBindingArgs)
__props__.__dict__["condition"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["members"] = None
__props__.__dict__["name"] = None
__props__.__dict__["project"] = None
__props__.__dict__["role"] = None
return RegionClusterIamBinding(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def condition(self) -> pulumi.Output[Optional['_iam.v1.outputs.Condition']]:
"""
An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.
"""
return pulumi.get(self, "condition")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
The etag of the resource's IAM policy.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def members(self) -> pulumi.Output[Sequence[str]]:
"""
Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, [email protected] . serviceAccount:{emailid}: An email address that represents a Google service account. For example, [email protected]. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, [email protected]. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, [email protected]?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, [email protected]?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, [email protected]?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.
"""
return pulumi.get(self, "members")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource to manage IAM policies for.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The project in which the resource belongs. If it is not provided, a default will be supplied.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter
def role(self) -> pulumi.Output[str]:
"""
Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.
"""
return pulumi.get(self, "role")
| [
"[email protected]"
] | |
65aa5e6b623352d8c3f47bed622fbb172f4621c8 | a35cc78514193c995709dd3c9ea80ae1ead3f6cd | /tests/test_all_classes.py | e4ff65fae2f2ec74f3d91a843a79a337d6bac6b5 | [
"MIT"
] | permissive | candrsn/graphviz | dc4015f9df241e5a8ff4a1cb3cd52b668df59de3 | bf81b68be671504360b0a696795891853930e85d | refs/heads/master | 2023-09-06T05:59:58.982098 | 2021-11-24T11:18:57 | 2021-11-24T11:18:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,971 | py | import locale
import re
import pytest
import graphviz
ALL_CLASSES = [graphviz.Graph, graphviz.Digraph, graphviz.Source]
@pytest.fixture(params=ALL_CLASSES)
def cls(request):
return request.param
@pytest.fixture
def dot(cls):
if cls.__name__ == 'Source':
return cls('digraph { hello -> world }\n')
return cls()
def test_copy(cls, dot):
assert type(dot) is cls
assert dot.copy() is not dot
assert dot.copy() is not dot.copy()
assert type(dot.copy()) is type(dot)
assert dot.copy().__dict__ == dot.__dict__ == dot.copy().__dict__
def test_str(dot):
assert str(dot) == dot.source
@pytest.mark.parametrize(
'parameter, expected_exception, match',
[('engine', ValueError, r'unknown engine'),
('format', ValueError, r'unknown format'),
('renderer', ValueError, r'unknown renderer'),
('formatter', ValueError, r'unknown formatter'),
('encoding', LookupError, r'encoding')])
def test_invalid_parameter_raises_valuerror(dot, parameter,
expected_exception, match):
with pytest.raises(expected_exception, match=match):
setattr(dot, parameter, 'invalid_parameter')
def test_encoding_none(dot):
dot_copy = dot.copy()
dot_copy.encoding = None
assert dot_copy.encoding == locale.getpreferredencoding()
@pytest.mark.exe
@pytest.mark.parametrize(
'kwargs', [{'engine': 'spam'}])
def test_render_raises_before_save(tmp_path, cls, kwargs, filename='dot.gv'):
args = ['graph { spam }'] if cls.__name__ == 'Source' else []
dot = cls(*args, filename=filename, directory=tmp_path)
expected_source = tmp_path / filename
assert not expected_source.exists()
with pytest.raises(ValueError, match=r''):
dot.render(**kwargs)
assert not expected_source.exists()
pdf = dot.render(engine='dot')
assert pdf == f'{expected_source}.pdf'
assert expected_source.exists()
assert expected_source.stat().st_size
@pytest.mark.parametrize(
'kwargs',
[{'engine': 'spam'}, {'format': 'spam'},
{'renderer': 'spam'}, {'formatter': 'spam'}])
def test_render_raises_before_save_mocked(tmp_path, mock_render, cls, kwargs,
filename='dot.gv'):
args = [''] if cls.__name__ == 'Source' else []
dot = cls(*args, filename=filename, directory=tmp_path)
expected_source = tmp_path / filename
assert not expected_source.exists()
first_arg = next(iter(kwargs))
with pytest.raises(ValueError, match=f'unknown {first_arg}'):
dot.render(**kwargs)
assert not expected_source.exists()
def test_render_mocked(mocker, mock_render, dot):
mock_save = mocker.patch.object(dot, 'save', autospec=True)
mock_view = mocker.patch.object(dot, '_view', autospec=True)
mock_remove = mocker.patch('os.remove', autospec=True)
assert dot.render(cleanup=True, view=True) is mock_render.return_value
mock_save.assert_called_once_with(None, None, skip_existing=None)
mock_render.assert_called_once_with(dot.engine, dot.format,
mock_save.return_value,
renderer=None, formatter=None,
quiet=False)
mock_remove.assert_called_once_with(mock_save.return_value)
mock_view.assert_called_once_with(mock_render.return_value, dot.format, False)
def test_format_renderer_formatter_mocked(mocker, mock_render,
quiet, cls,
filename='format.gv', format='jpg',
renderer='cairo', formatter='core'):
args = [''] if cls.__name__ == 'Source' else []
dot = cls(*args, filename=filename, format=format,
renderer=renderer, formatter=formatter)
assert dot.format == format
assert dot.renderer == renderer
assert dot.formatter == formatter
mock_save = mocker.patch.object(dot, 'save', autospec=True)
assert dot.render(quiet=quiet) is mock_render.return_value
mock_save.assert_called_once_with(None, None, skip_existing=None)
mock_render.assert_called_once_with('dot', format, mock_save.return_value,
renderer=renderer, formatter=formatter,
quiet=quiet)
def test_save_mocked(mocker, dot, filename='nonfilename', directory='nondirectory'):
mock_makedirs = mocker.patch('os.makedirs', autospec=True)
mock_open = mocker.patch('builtins.open', mocker.mock_open())
assert dot.save(filename, directory) == dot.filepath
assert dot.filename == filename
assert dot.directory == directory
mock_makedirs.assert_called_once_with(dot.directory, 0o777, exist_ok=True)
mock_open.assert_called_once_with(dot.filepath, 'w',
encoding=dot.encoding)
expected_calls = ([mocker.call(dot.source)] if type(dot).__name__ == 'Source'
else [mocker.call(mocker.ANY), mocker.call('}\n')])
assert mock_open.return_value.write.call_args_list == expected_calls
@pytest.mark.parametrize(
'encoding', [None, 'ascii', 'utf-8'])
def test_pipe_mocked(mocker, mock_pipe_lines, mock_pipe_lines_string, quiet,
dot, encoding):
input_encoding = 'utf-8'
dot.encoding = input_encoding
result = dot.pipe(encoding=encoding, quiet=quiet)
expected_args = ['dot', 'pdf', mocker.ANY]
expected_kwargs = {'quiet': quiet,
'renderer': None,
'formatter': None}
if encoding == input_encoding:
assert result is mock_pipe_lines_string.return_value
mock_pipe_lines_string.assert_called_once_with(*expected_args,
encoding=encoding,
**expected_kwargs)
return
if encoding is None:
assert result is mock_pipe_lines.return_value
else:
assert result is mock_pipe_lines.return_value.decode.return_value
mock_pipe_lines.return_value.decode.assert_called_once_with(encoding)
mock_pipe_lines.assert_called_once_with(*expected_args,
input_encoding=input_encoding,
**expected_kwargs)
def test_pipe_lines_mocked(mocker, mock_pipe_lines, dot, format_='svg'):
assert dot.format != format_
assert dot.pipe(format=format_) is mock_pipe_lines.return_value
mock_pipe_lines.assert_called_once_with(dot.engine, format_, mocker.ANY,
renderer=None, formatter=None,
input_encoding='utf-8',
quiet=False)
_, _, data = mock_pipe_lines.call_args.args
expected_lines = dot.source.splitlines(keepends=True)
assert list(data) == expected_lines
def test_repr_mimebundle_image_svg_xml_mocked(mocker, dot):
mock_pipe = mocker.patch.object(dot, 'pipe', autospec=True)
assert dot._repr_mimebundle_({'image/svg+xml'}) == {'image/svg+xml': mock_pipe.return_value}
mock_pipe.assert_called_once_with(format='svg', encoding=dot.encoding)
def test_repr_mimebundle_image_png_mocked(mocker, dot):
mock_pipe = mocker.patch.object(dot, 'pipe', autospec=True)
assert dot._repr_mimebundle_({'image/png'}) == {'image/png': mock_pipe.return_value}
mock_pipe.assert_called_once_with(format='png')
def test_repr_mimebundle_image_jpeg_mocked(mocker, dot):
mock_pipe = mocker.patch.object(dot, 'pipe', autospec=True)
assert dot._repr_mimebundle_({'image/jpeg'}) == {'image/jpeg': mock_pipe.return_value}
mock_pipe.assert_called_once_with(format='jpeg')
@pytest.mark.exe
def test_unflatten(cls, dot):
result = dot.unflatten()
assert isinstance(result, graphviz.Source)
normalized = re.sub(r'\s+', ' ', result.source.strip())
if cls.__name__ == 'Source':
assert normalized == 'digraph { hello -> world; }'
else:
assert normalized.startswith('digraph {' if dot.directed else 'graph {')
def test_unflatten_mocked(sentinel, mock_unflatten, dot):
kwargs = {'stagger': sentinel.stagger,
'fanout': sentinel.fanout,
'chain': sentinel.chain}
result = dot.unflatten(**kwargs)
assert result is not None
assert isinstance(result, graphviz.Source)
assert type(result) is graphviz.Source
assert result.source is mock_unflatten.return_value
assert result.filename == dot.filename
assert result.directory == dot.directory
assert result.engine == dot.engine
assert result.format == dot.format
assert result.renderer == dot.renderer
assert result.formatter == dot.formatter
assert result.encoding == dot.encoding
assert result._loaded_from_path is None
mock_unflatten.assert_called_once_with(dot.source,
encoding=dot.encoding,
**kwargs)
def test_view_mocked(mocker, dot):
mock_render = mocker.patch.object(dot, 'render', autospec=True)
kwargs = {'filename': 'filename', 'directory': 'directory',
'cleanup': True, 'quiet': True, 'quiet_view': True}
assert dot.view(**kwargs) is mock_render.return_value
mock_render.assert_called_once_with(view=True, **kwargs)
def test__view_unknown_platform(unknown_platform, dot):
with pytest.raises(RuntimeError, match=r'support'):
dot._view('name', 'png', False)
def test__view_mocked(mocker, sentinel, mock_platform, dot):
_view_platform = mocker.patch.object(dot, f'_view_{mock_platform}',
autospec=True)
kwargs = {'quiet': False}
assert dot._view(sentinel.name, 'png', **kwargs) is None
_view_platform.assert_called_once_with(sentinel.name, **kwargs)
| [
"[email protected]"
] | |
a78b0ad15ac86982ce568a9763b73aef095c2af4 | ef1458fae5fbd6b7a9281ccd4d9bc8289f3dd38b | /examples_UQ/MM2_surrogate_diam_batchgrid.py | f2a70af511b46ac0ec7e3de95da9b74e99547a93 | [
"BSD-3-Clause"
] | permissive | vt100/mystic | a42910537c3de90d1c2a5637bad5d866308e8863 | 7589eee4b9a7cb6056114ee6770579d173d9007b | refs/heads/master | 2021-01-17T22:28:57.743493 | 2015-07-17T15:25:35 | 2015-07-17T15:25:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,409 | py | #!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2009-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
#######################################################################
# scaling and mpi info; also optimizer configuration parameters
# hard-wired: use fmin solver
#######################################################################
#scale = 1.0
#npop = 20
nbins = [2,2,2]
#maxiter = 1000
#maxfun = 1e+6
#convergence_tol = 1e-4
#######################################################################
# the model function
#######################################################################
#from surrogate import marc_surr as model
from surrogate import ballistic_limit as limit
#######################################################################
# the subdiameter calculation
#######################################################################
def costFactory(i):
"""a cost factory for the cost function"""
def cost(rv):
"""compute the diameter as a calculation of cost
Input:
- rv -- 1-d array of model parameters
Output:
- diameter -- scale * | F(x) - F(x')|**2
"""
from surrogate import marc_surr as model
# prepare x and xprime
rv = list(rv)
params = rv[:-1] #XXX: assumes Xi' is at rv[-1]
params_prime = rv[:i]+rv[-1:]+rv[i+1:-1] #XXX: assumes Xi' is at rv[-1]
# get the F(x) response
Fx = model(params)
# get the F(x') response
Fxp = model(params_prime)
# compute diameter
scale = 1.0
return -scale * (Fx - Fxp)**2
return cost
#######################################################################
# make a pseudo-global optimizer from a steepest descent optimizer
#######################################################################
def optimize(cost,lower,upper,nbins):
from mystic.tools import random_seed
from pathos.pools import ProcessPool as Pool
random_seed(123)
# generate arrays of points defining a grid in parameter space
grid_dimensions = len(lower)
bins = []
for i in range(grid_dimensions):
step = abs(upper[i] - lower[i])/nbins[i]
bins.append( [lower[i] + (j+0.5)*step for j in range(nbins[i])] )
# build a grid of starting points
from pool_helper import local_optimize
from mystic.math.grid import gridpts
initial_values = gridpts(bins)
# run optimizer for each grid point
lb = [lower for i in range(len(initial_values))]
ub = [upper for i in range(len(initial_values))]
cf = [cost for i in range(len(initial_values))]
nnodes = len(lb)
# map:: params, energy, func_evals = local_optimize(cost,x0,lb,ub)
results = Pool(nnodes).map(local_optimize, cf, initial_values, lb, ub)
#print "results = %s" % results
# get the results with the lowest energy
best = list(results[0][0]), results[0][1]
func_evals = results[0][2]
for result in results[1:]:
func_evals += result[2] # add function evaluations
if result[1] < best[1]: # compare energy
best = list(result[0]), result[1]
# return best
print "solved: %s" % best[0]
scale = 1.0
diameter_squared = -best[1] / scale #XXX: scale != 0
return diameter_squared, func_evals
#######################################################################
# loop over model parameters to calculate concentration of measure
#######################################################################
def UQ(start,end,lower,upper):
#from pathos.pools import ProcessPool as Pool
from pathos.pools import ThreadPool as Pool
#from pool_helper import func_pickle # if fails to pickle, try using a helper
# run optimizer for each subdiameter
lb = [lower + [lower[i]] for i in range(start,end+1)]
ub = [upper + [upper[i]] for i in range(start,end+1)]
nb = [nbins[:] for i in range(start,end+1)]
for i in range(len(nb)): nb[i][-1] = nb[i][i]
cf = [costFactory(i) for i in range(start,end+1)]
#cf = [func_pickle(i) for i in cf]
#cf = [cost.name for cost in cf]
nnodes = len(lb)
#construct cost function and run optimizer
results = Pool(nnodes).map(optimize, cf,lb,ub,nb)
#print "results = %s" % results
results = zip(*results)
diameters = list(results[0])
function_evaluations = list(results[1])
total_func_evals = sum(function_evaluations)
total_diameter = sum(diameters)
print "subdiameters (squared): %s" % diameters
print "diameter (squared): %s" % total_diameter
print "func_evals: %s => %s" % (function_evaluations, total_func_evals)
return total_diameter
#######################################################################
# rank, bounds, and restart information
#######################################################################
if __name__ == '__main__':
from math import sqrt
function_name = "marc_surr"
lower_bounds = [60.0, 0.0, 2.1]
upper_bounds = [105.0, 30.0, 2.8]
# h = thickness = [60,105]
# a = obliquity = [0,30]
# v = speed = [2.1,2.8]
RVstart = 0; RVend = 2
RVmax = len(lower_bounds) - 1
# when not a random variable, set the value to the lower bound
for i in range(0,RVstart):
upper_bounds[i] = lower_bounds[i]
for i in range(RVend+1,RVmax+1):
upper_bounds[i] = lower_bounds[i]
lbounds = lower_bounds[RVstart:1+RVend]
ubounds = upper_bounds[RVstart:1+RVend]
#FIXME: these are *copies*... actual values contained in 'local_optimize'
maxiter = 1000
maxfun = 1e+6
convergence_tol = 1e-4
print "...SETTINGS..."
print "nbins = %s" % nbins
print "maxiter = %s" % maxiter
print "maxfun = %s" % maxfun
print "convergence_tol = %s" % convergence_tol
#print "crossover = %s" % crossover
#print "percent_change = %s" % percent_change
print "..............\n\n"
print " model: f(x) = %s(x)" % function_name
param_string = "["
for i in range(RVmax+1):
param_string += "'x%s'" % str(i+1)
if i == (RVmax):
param_string += "]"
else:
param_string += ", "
print " parameters: %s" % param_string
print " varying 'xi', with i = %s" % range(RVstart+1,RVend+2)
print " lower bounds: %s" % lower_bounds
print " upper bounds: %s" % upper_bounds
# print " ..."
nbins.append(None) #XXX: kind of hackish
diameter = UQ(RVstart,RVend,lower_bounds,upper_bounds)
# EOF
| [
"mmckerns@968178ea-60bd-409e-af13-df8a517b6005"
] | mmckerns@968178ea-60bd-409e-af13-df8a517b6005 |
31f23cfa6c6ca1b240f30f9db28cd4e53accc1dc | d9e26e516ab3863b6e7d00c4e3cdecf1af7028eb | /tests/test_parsers/test_xaf_association_parser.py | 3a3efa1edd3f4a9aaba83169a8a2fb2a24c27680 | [
"Apache-2.0"
] | permissive | INCATools/ontology-access-kit | 2f08a64b7308e8307d1aaac2a81764e7d98b5928 | 8d2a124f7af66fe2e796f9e0ece55585438796a5 | refs/heads/main | 2023-08-30T14:28:57.201198 | 2023-08-29T17:40:19 | 2023-08-29T17:40:19 | 475,072,415 | 67 | 15 | Apache-2.0 | 2023-09-07T01:06:04 | 2022-03-28T15:50:45 | Jupyter Notebook | UTF-8 | Python | false | false | 949 | py | import logging
import unittest
from oaklib.datamodels.association import Association
from oaklib.parsers.parser_base import ColumnReference
from oaklib.parsers.xaf_association_parser import XafAssociationParser
from tests import INPUT_DIR
GAF = INPUT_DIR / "test-uniprot.gaf"
class XafAssociationParserTest(unittest.TestCase):
"""Tests parsing of GAF and GAF-like formats."""
def test_parser(self):
"""Tests parsing associations."""
parser = XafAssociationParser(
subject_column=ColumnReference(1), object_column=ColumnReference(4)
)
with open(GAF) as file:
assocs = list(parser.parse(file))
for association in assocs:
logging.info(association)
self.assertIn(
Association(
subject="Q9BPZ7", predicate=None, object="GO:0005737", property_values=[]
),
assocs,
)
| [
"[email protected]"
] | |
f0a8b55b7fdb9455813100ac46d0ddf18ded61b7 | 2b42c0f490128aab8aacf9ad572d280e7b702d92 | /postgresqleu/confreg/management/commands/confreg_expire_waitlist.py | 22da3c271f3d6e29755039f78dc2b8cf352ec509 | [] | no_license | danielgustafsson/pgeu-website | fc5bd38749aaf4bbdd4a112307b856e1fbc777b3 | 202e5c5118d12727902b4adc88eb465330b9705d | refs/heads/master | 2021-01-18T02:37:08.440516 | 2016-07-19T21:41:49 | 2016-07-19T21:41:49 | 63,783,904 | 0 | 0 | null | 2016-07-20T13:30:56 | 2016-07-20T13:30:55 | null | UTF-8 | Python | false | false | 2,470 | py | #!/usr/bin/env python
#
# Expire waitlist offers that have expired, so others can get the
# seats.
#
# Copyright (C) 2015, PostgreSQL Europe
#
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from django.conf import settings
from datetime import datetime
from django.template import Context
from django.template.loader import get_template
from postgresqleu.mailqueue.util import send_simple_mail
from postgresqleu.confreg.models import RegistrationWaitlistEntry, RegistrationWaitlistHistory
class Command(BaseCommand):
help = 'Expire conference waitlist offers'
@transaction.atomic
def handle(self, *args, **options):
# Any entries that actually have an invoice will be canceled by the invoice
# system, as the expiry time of the invoice is set synchronized. In this
# run, we only care about offers that have not been picked up at all.
wlentries = RegistrationWaitlistEntry.objects.filter(registration__payconfirmedat__isnull=True, registration__invoice__isnull=True, offerexpires__lt=datetime.now())
template = get_template('confreg/mail/waitlist_expired.txt')
for w in wlentries:
reg = w.registration
# Create a history entry so we know exactly when it happened
RegistrationWaitlistHistory(waitlist=w,
text="Offer expired at {0}".format(w.offerexpires)).save()
# Notify conference organizers
send_simple_mail(reg.conference.contactaddr,
reg.conference.contactaddr,
'Waitlist expired',
u'User {0} {1} <{2}> did not complete the registration before the waitlist offer expired.'.format(reg.firstname, reg.lastname, reg.email),
sendername=reg.conference.conferencename)
# Also send an email to the user
send_simple_mail(reg.conference.contactaddr,
reg.email,
'Your waitlist offer for {0}'.format(reg.conference.conferencename),
template.render(Context({
'conference': reg.conference,
'reg': reg,
'offerexpires': w.offerexpires,
'SITEBASE': settings.SITEBASE,
})),
sendername = reg.conference.conferencename,
receivername = u"{0} {1}".format(reg.firstname, reg.lastname),
)
# Now actually expire the offer
w.offeredon = None
w.offerexpires = None
# Move the user to the back of the waitlist (we have a history entry for the
# initial registration date, so it's still around)
w.enteredon = datetime.now()
w.save()
| [
"[email protected]"
] | |
196a5cac2749c64a7321ae7a5fb94fee74f7cb79 | 8bd63bc56b39d26458ad54b7f18c4b149c1e3ce2 | /SPC Static Data/code/2016/08/000548/yxqjredakaziwxpbobc.py | 650dbe3ec83b1a4d423b852312ca60b20ad508c5 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-public-domain-disclaimer"
] | permissive | isabella232/scipy-central-rescue | 43270c0e1850b989fbe9a5b1a06c3be11d16464a | 2b331610d52c189ae96bea4f4ce2ec343146b608 | refs/heads/master | 2021-09-06T09:17:30.627497 | 2018-02-04T19:41:11 | 2018-02-04T19:41:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | ZiWUk3 http://www.FyLitCl7Pf7kjQdDUOLQOuaxTXbj5iNG.com | [
"[email protected]"
] | |
ca6779ba52b3bd0f2ee2aaa437414e97d2550a24 | a7b66311c2ce113789933ec3162f1128b2862f13 | /app/waterQual/30yr/reason/cyclicCmap.py | c7e0ae852714e2a90f261dece677fc8ec7437826 | [
"MIT"
] | permissive | ChanJeunlam/geolearn | 214b2c42359ea1164b39117fad2d7470adeb6d35 | 791caa54eb70920823ea7d46714dc8a3e7fa7445 | refs/heads/master | 2023-07-16T04:13:15.526364 | 2021-08-16T05:24:18 | 2021-08-16T05:24:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
sd = np.datetime64('2000-01-01')
ed = np.datetime64('2000-12-31')
t = pd.date_range(sd, ed)
td = t.dayofyear.values-1
fig, ax = plt.subplots(1, 1)
nt = td.max()
# tLst = ['2000-01-01', '2000-03-01', '2000-06-01', '2000-09-01']
tLst = ['2000-{:02d}-01'.format(m+1) for m in range(12)]
for k in range(len(tLst)):
tt = pd.to_datetime(tLst[k]).dayofyear-1
xx = np.cos(tt/nt*np.pi*2)
yy = np.sin(tt/nt*np.pi*2)
ax.plot([0, xx], [0, yy], 'k-')
ax.text(xx, yy, tLst[k][5:])
x = np.cos(td/nt*np.pi*2)
y = np.sin(td/nt*np.pi*2)
ax.scatter(x, y, c=td, cmap='hsv',s=100)
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_xticks([])
ax.set_aspect('equal', 'box')
fig.show()
| [
"[email protected]"
] | |
099b061c314d4808116c57cd8ae5896e3fd376c5 | eeb8f0d9d0b0413f945e57e3de119c3964fb6a89 | /epitools-env/lib/python3.8/site-packages/nltk/test/unit/test_wordnet.py | fc9e41ba74d3a57a4397efade33d14edf99e6ed9 | [] | no_license | Hillary05/EPITECH-DOCUMENTATION | 13ff301fa657ff9ffd55ef61e64647453eda7a8c | 4ea0bbef1d27003b7d5902cbdfdd41fbc9173b2c | refs/heads/master | 2023-05-10T12:43:09.237217 | 2021-06-29T11:41:05 | 2021-06-29T11:41:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,255 | py | # -*- coding: utf-8 -*-
"""
Unit tests for nltk.corpus.wordnet
See also nltk/test/wordnet.doctest
"""
import collections
import os
import unittest
from nose import SkipTest
from nltk.corpus.reader.wordnet import WordNetCorpusReader
from nltk.corpus import wordnet as wn
from nltk.corpus import wordnet_ic as wnic
from nltk.data import find as find_data
wn.ensure_loaded()
S = wn.synset
L = wn.lemma
class WordnNetDemo(unittest.TestCase):
def test_retrieve_synset(self):
move_synset = S('go.v.21')
self.assertEqual(move_synset.name(), "move.v.15")
self.assertEqual(move_synset.lemma_names(), ['move', 'go'])
self.assertEqual(
move_synset.definition(), "have a turn; make one's move in a game"
)
self.assertEqual(move_synset.examples(), ['Can I go now?'])
def test_retrieve_synsets(self):
self.assertEqual(sorted(wn.synsets('zap', pos='n')), [S('zap.n.01')])
self.assertEqual(
sorted(wn.synsets('zap', pos='v')),
[S('microwave.v.01'), S('nuke.v.01'), S('zap.v.01'), S('zap.v.02')],
)
def test_hyperhyponyms(self):
# Not every synset as hypernyms()
self.assertEqual(S('travel.v.01').hypernyms(), [])
self.assertEqual(S('travel.v.02').hypernyms(), [S('travel.v.03')])
self.assertEqual(S('travel.v.03').hypernyms(), [])
# Test hyper-/hyponyms.
self.assertEqual(S('breakfast.n.1').hypernyms(), [S('meal.n.01')])
first_five_meal_hypo = [
S('banquet.n.02'),
S('bite.n.04'),
S('breakfast.n.01'),
S('brunch.n.01'),
S('buffet.n.02'),
]
self.assertEqual(sorted(S('meal.n.1').hyponyms()[:5]), first_five_meal_hypo)
self.assertEqual(S('Austen.n.1').instance_hypernyms(), [S('writer.n.01')])
first_five_composer_hypo = [
S('ambrose.n.01'),
S('bach.n.01'),
S('barber.n.01'),
S('bartok.n.01'),
S('beethoven.n.01'),
]
self.assertEqual(
S('composer.n.1').instance_hyponyms()[:5], first_five_composer_hypo
)
# Test root hyper-/hyponyms
self.assertEqual(S('person.n.01').root_hypernyms(), [S('entity.n.01')])
self.assertEqual(S('sail.v.01').root_hypernyms(), [S('travel.v.01')])
self.assertEqual(
S('fall.v.12').root_hypernyms(), [S('act.v.01'), S('fall.v.17')]
)
def test_derivationally_related_forms(self):
# Test `derivationally_related_forms()`
self.assertEqual(
L('zap.v.03.nuke').derivationally_related_forms(),
[L('atomic_warhead.n.01.nuke')],
)
self.assertEqual(
L('zap.v.03.atomize').derivationally_related_forms(),
[L('atomization.n.02.atomization')],
)
self.assertEqual(
L('zap.v.03.atomise').derivationally_related_forms(),
[L('atomization.n.02.atomisation')],
)
self.assertEqual(L('zap.v.03.zap').derivationally_related_forms(), [])
def test_meronyms_holonyms(self):
# Test meronyms, holonyms.
self.assertEqual(
S('dog.n.01').member_holonyms(), [S('canis.n.01'), S('pack.n.06')]
)
self.assertEqual(S('dog.n.01').part_meronyms(), [S('flag.n.07')])
self.assertEqual(S('faculty.n.2').member_meronyms(), [S('professor.n.01')])
self.assertEqual(S('copilot.n.1').member_holonyms(), [S('crew.n.01')])
self.assertEqual(
S('table.n.2').part_meronyms(),
[S('leg.n.03'), S('tabletop.n.01'), S('tableware.n.01')],
)
self.assertEqual(S('course.n.7').part_holonyms(), [S('meal.n.01')])
self.assertEqual(
S('water.n.1').substance_meronyms(), [S('hydrogen.n.01'), S('oxygen.n.01')]
)
self.assertEqual(
S('gin.n.1').substance_holonyms(),
[
S('gin_and_it.n.01'),
S('gin_and_tonic.n.01'),
S('martini.n.01'),
S('pink_lady.n.01'),
],
)
def test_antonyms(self):
# Test antonyms.
self.assertEqual(
L('leader.n.1.leader').antonyms(), [L('follower.n.01.follower')]
)
self.assertEqual(
L('increase.v.1.increase').antonyms(), [L('decrease.v.01.decrease')]
)
def test_misc_relations(self):
# Test misc relations.
self.assertEqual(S('snore.v.1').entailments(), [S('sleep.v.01')])
self.assertEqual(
S('heavy.a.1').similar_tos(),
[
S('dense.s.03'),
S('doughy.s.01'),
S('heavier-than-air.s.01'),
S('hefty.s.02'),
S('massive.s.04'),
S('non-buoyant.s.01'),
S('ponderous.s.02'),
],
)
self.assertEqual(S('light.a.1').attributes(), [S('weight.n.01')])
self.assertEqual(S('heavy.a.1').attributes(), [S('weight.n.01')])
# Test pertainyms.
self.assertEqual(
L('English.a.1.English').pertainyms(), [L('england.n.01.England')]
)
def test_lch(self):
# Test LCH.
self.assertEqual(
S('person.n.01').lowest_common_hypernyms(S('dog.n.01')),
[S('organism.n.01')],
)
self.assertEqual(
S('woman.n.01').lowest_common_hypernyms(S('girlfriend.n.02')),
[S('woman.n.01')],
)
def test_domains(self):
# Test domains.
self.assertEqual(S('code.n.03').topic_domains(), [S('computer_science.n.01')])
self.assertEqual(S('pukka.a.01').region_domains(), [S('india.n.01')])
self.assertEqual(S('freaky.a.01').usage_domains(), [S('slang.n.02')])
def test_in_topic_domains(self):
# Test in domains.
self.assertEqual(
S('computer_science.n.01').in_topic_domains()[0], S('access.n.05')
)
self.assertEqual(S('germany.n.01').in_region_domains()[23], S('trillion.n.02'))
self.assertEqual(S('slang.n.02').in_usage_domains()[1], S('airhead.n.01'))
def test_wordnet_similarities(self):
# Path based similarities.
self.assertAlmostEqual(S('cat.n.01').path_similarity(S('cat.n.01')), 1.0)
self.assertAlmostEqual(S('dog.n.01').path_similarity(S('cat.n.01')), 0.2)
self.assertAlmostEqual(
S('dog.n.01').lch_similarity(S('cat.n.01')), 2.028, places=3
)
self.assertAlmostEqual(
S('dog.n.01').wup_similarity(S('cat.n.01')), 0.8571, places=3
)
# Information Content similarities.
brown_ic = wnic.ic('ic-brown.dat')
self.assertAlmostEqual(
S('dog.n.01').jcn_similarity(S('cat.n.01'), brown_ic), 0.4497, places=3
)
semcor_ic = wnic.ic('ic-semcor.dat')
self.assertAlmostEqual(
S('dog.n.01').lin_similarity(S('cat.n.01'), semcor_ic), 0.8863, places=3
)
def test_omw_lemma_no_trailing_underscore(self):
expected = sorted([
u'popolna_sprememba_v_mišljenju',
u'popoln_obrat',
u'preobrat',
u'preobrat_v_mišljenju'
])
self.assertEqual(sorted(S('about-face.n.02').lemma_names(lang='slv')), expected)
def test_iterable_type_for_all_lemma_names(self):
# Duck-test for iterables.
# See https://stackoverflow.com/a/36230057/610569
cat_lemmas = wn.all_lemma_names(lang='cat')
eng_lemmas = wn.all_lemma_names(lang='eng')
self.assertTrue(hasattr(eng_lemmas, '__iter__'))
self.assertTrue(hasattr(eng_lemmas, '__next__') or hasattr(eng_lemmas, 'next'))
self.assertTrue(eng_lemmas.__iter__() is eng_lemmas)
self.assertTrue(hasattr(cat_lemmas, '__iter__'))
self.assertTrue(hasattr(cat_lemmas, '__next__') or hasattr(eng_lemmas, 'next'))
self.assertTrue(cat_lemmas.__iter__() is cat_lemmas)
| [
"[email protected]"
] | |
830c884047ab74b87a3b7d60e5412388c2189e4c | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2968/60797/295581.py | e68e8eb6ee4d1332321e959eacdf1950a4ee97e5 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | class Solution:
def isPalindrome(self, data):
for i in range(len(data)):
if data[i]!=data[len(data)-1-i]:
return False
return True
def find(self, data):
re = 0
for i in range(len(data)):
for i in range(i,len(data)):
if self.isPalindrome(data[i,j+1]):
re +=1
return re
if __name__ == '__main__':
ss = input()
data = ss
q = int(input())
for i in range(q):
line = input().split()
if line[0]==1:
data = data+line[1]
elif line[0]==2:
data = line[1][::-1]+data
elif line[0]==3:
s = Solution()
re = s.find(data)
print(re)
| [
"[email protected]"
] | |
c11c53b590a3c16bb3b7c28cae7424a53dea3d87 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/94/usersdata/169/57980/submittedfiles/mediaLista.py | c8d5077de447c7b9ad6542447f7fcc7e6877b905 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | # -*- coding: utf-8 -*-
def média(lista):
soma=0
for i in range(0,len(lista),1):
soma=soma+lista[i]
resultado=soma/len(lista)
return resultado
n=int(input('Digite a Quantidade de Números:'))
l1=[]
for i in range(0,n,1):
v=float(input('Digite o Valores de Inteiros da Lista:'))
l1.append(v)
print('%.2f' %l1[0])
print('%.2f' %l1[i])
print('%.2f' %média(l1))
print(l1)
| [
"[email protected]"
] | |
72021e0c88e6bf20d75296bf0ad2a7b52c200de2 | 0a973640f0b02d7f3cf9211fcce33221c3a50c88 | /.history/src/easy-money_20210204111955.py | 97432fa7c42883918a521fd5e172518f9ebad357 | [] | no_license | JiajunChen123/IPO_under_review_crawler | 5468b9079950fdd11c5e3ce45af2c75ccb30323c | 031aac915ebe350ec816c05a29b5827fde588567 | refs/heads/main | 2023-02-26T08:23:09.622725 | 2021-02-04T10:11:16 | 2021-02-04T10:11:16 | 332,619,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,181 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : easy-money.py
@Time : 2021/02/04 09:03:02
@Author : Jiajun Chen
@Version : 1.0
@Contact : [email protected]
@License : (C)Copyright 2017-2018, Liugroup-NLPR-CASIA
'''
# 东方财富网 首发申报及上会信息
import re
import pickle
from datetime import datetime, timedelta
from urllib.parse import urlencode
import pandas as pd
import requests
import re
import time
from bs4 import BeautifulSoup
import configparser
import os.path
from utils import save_pickle,load_pickle
# config = configparser.ConfigParser()
# config.read('./src/Config.ini')
# # headers = config['eastmoney']['headers']
# base_url = config['eastmoney']['base_url']
base_url = 'https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx?'
lastDate = '2021-1-21'
eastmoney_raw_data_path = './data/EastMoney/eastmoney_raw_data.csv'
zzsc_csv_path = './data/EastMoney/eastmoney_zzsc.csv'
zzsc_pkl_path = './saved_config/eastmoney_zzsc.pkl'
szzxb_stocksInfo_path = './saved_config/szzxb_stocksInfo.pkl'
shzb_stocksInfo_path = './saved_config/shzb_stocksInfo.pkl'
zb_zxb_stocksInfo_path = './saved_config/zb_zxb_stocksInfo.pkl'
eastmoney_meeting_path = './data/EastMoney/eastmoney_data_meeting.csv'
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'}
def update_date():
'''
获取最新更新日期
'''
r = requests.get('http://data.eastmoney.com/xg/xg/sbqy.html',
headers=headers)
r.encoding = 'gbk'
soup = BeautifulSoup(r.text, 'html.parser')
newDate = soup.find('option').get_text()
return newDate
def dateList_gen():
'''
fetch all existing date_data
'''
r = requests.get('http://data.eastmoney.com/xg/xg/sbqy.html',
headers=headers)
r.encoding = 'gbk'
soup = BeautifulSoup(r.text, 'html.parser')
dateList = [i.text for i in soup.findAll('option')]
return dateList
def update_eastmoneyData():
# 如果文件存在,执行更新
# newDate = update_date()
dataList = dateList_gen()
if not os.path.isfile('./data/EastMoney/eastmoneyRawData.csv'):
columns = ['机构名称', '类型', '板块', '注册地', '保荐机构', '保荐代表人', '律师事务所', '签字律师', '会计师事务所',
'签字会计师', '是否提交财务自查报告', '所属行业', '日期', 'xxx', '时间戳', '简称', '文件链接']
with open('./data/EastMoney/eastmoneyRawData.csv','w') as f:
writer = csv.DictWriter(f, fieldnames=columns)
writer.writeheader()
for date in reversed(dataList):
if not os.path.isfile('./data/EastMoney/首发信息/{}.csv'.format(date)):
print('find new date:{}, fetching.....'.format(date))
df =get_eastmoneyData(date)
df.to_csv('./data/EastMoney/eastmoneyRawData.csv', mode='a', header=False,index=False,encoding='utf-8-sig')
return
def get_eastmoneyData(date):
query = {'type': 'NS',
'sty': 'NSFR',
'st': '1',
'sr': '-1',
'p': '1',
'ps': '5000',
'js': 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt': '1',
'fd' : date,
'rt': '53721774'
}
rs = requests.get(base_url, params=query, headers=headers)
js = rs.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(js[:-1])
temp = [i.split(',') for i in data]
columns = [
'会计师事务所', '保荐代表人', '保荐机构', 'xxx', '律师事务所', '日期', '所属行业', '板块',
'是否提交财务自查报告', '注册地', '类型', '机构名称', '签字会计师', '签字律师', '时间戳', '简称'
]
df = pd.DataFrame(temp, columns=columns)
df['文件链接'] = df['时间戳'].apply(
lambda x: "https://notice.eastmoney.com/pdffile/web/H2_" + x + "_1.pdf"
)
df = df[[
'机构名称', '类型', '板块', '注册地', '保荐机构', '保荐代表人', '律师事务所', '签字律师', '会计师事务所',
'签字会计师', '是否提交财务自查报告', '所属行业', '日期', 'xxx', '时间戳', '简称', '文件链接'
]]
df = df[df['板块'] != '创业板']
df.replace({'是否提交财务自查报告': ' '}, '是')
df.replace({'是否提交财务自查报告': '不适用'}, '是')
df['机构名称'] = df['机构名称'].replace(r'\*', '', regex=True)
df['机构名称'] = df['机构名称'].replace(r'股份有限公司', '', regex=True)
df.to_csv('C:/Users/chen/Desktop/IPO_info/data/EastMoney/首发信息/{}.csv'.format(date),index=False, encoding='utf-8-sig')
return df
def update_zzscData():
newDate = update_date()
if newDate != lastDate:
try:
zzsc_dict = load_pickle(zzsc_pkl_path)
data = get_zzscData(newDate)
for i in data:
name = i.split(',')[1]
if name not in zzsc_dict:
zzsc_dict[name] = i.split(',')[2]
else:
continue
except:
zzsc_dict = gen_zzscDict()
else:
zzsc_df = pd.DataFrame(zzsc_dict.items(), columns=['机构名称', '决定终止审查时间'])
zzsc_df['机构名称'] = zzsc_df['机构名称'].replace(r'\*', '', regex=True)
zzsc_df['机构名称'] = zzsc_df['机构名称'].replace(r'股份有限公司', '', regex=True)
zzsc_df['机构名称'] = zzsc_df['机构名称'].replace(r'\(', '(', regex=True)
zzsc_df['机构名称'] = zzsc_df['机构名称'].replace(r'\)', ')', regex=True)
zzsc_df.to_csv(zzsc_csv_path,
encoding='utf-8-sig',
index=False)
save_pickle(zzsc_dict,zzsc_pkl_path)
return zzsc_df
def gen_zzscDict():
dateList = upda()
zzsc_dict = {}
for date in dateList:
data = get_zzscData(date)
for i in data:
name = i.split(',')[1]
if name not in zzsc_dict:
zzsc_dict[name] = i.split(',')[2]
else:
continue
save_pickle(zzsc_dict,zzsc_pkl_path)
return zzsc_dict
def get_zzscData(date):
query = {
'type': 'NS',
'sty': 'NSSE',
'st': '1',
'sr': '-1',
'p': '1',
'ps': '500',
'js': 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt': '4',
'stat': 'zzsc',
'fd': date,
'rt': '53727636'
}
url = base_url + urlencode(query)
rss = requests.get(url, headers=headers)
if rss.text == 'var IBhynDx={pages:0,data:[{stats:false}]}':
return ''
jss = rss.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(jss[:-1])
return data
def get_meetingData(newDate):
if newDate != lastDate or not os.path.isfile(eastmoney_meeting_path):
meetingInfo = []
for marketType in ['2', '4']: # 2 为主板, 4 为中小板
query = {
'type': 'NS',
'sty': 'NSSH',
'st': '1',
'sr': '-1',
'p': '1',
'ps': '5000',
'js': 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt': marketType,
'rt': '53723990'
}
url = base_url + urlencode(query)
rss = requests.get(url, headers=headers)
jss = rss.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(jss[:-1])
meetingInfo.extend(data)
temp = [j.split(',') for j in meetingInfo]
columns = [
'时间戳', 'yyy', '公司代码', '机构名称', '详情链接', '申报日期', '上会日期', '申购日期', '上市日期',
'9', '拟发行数量', '发行前总股本', '发行后总股本', '13', '占发行后总股本比例', '当前状态', '上市地点',
'主承销商', '承销方式', '发审委委员', '网站', '简称'
]
df = pd.DataFrame(temp, columns=columns)
df['文件链接'] = df['时间戳'].apply(
lambda x: "https://notice.eastmoney.com/pdffile/web/H2_" + x + "_1.pdf"
)
df['详情链接'] = df['公司代码'].apply(
lambda x: "data.eastmoney.com/xg/gh/detail/" + x + ".html")
df = df[[
'机构名称', '当前状态', '上市地点', '拟发行数量', '申报日期', '上会日期', '申购日期', '上市日期',
'主承销商', '承销方式', '9', '发行前总股本', '发行后总股本', '13', '占发行后总股本比例', '发审委委员',
'网站', '公司代码', 'yyy', '时间戳', '简称', '详情链接', '文件链接'
]]
df['机构名称'] = df['机构名称'].replace(r'\*', '', regex=True)
df['机构名称'] = df['机构名称'].replace(r'股份有限公司', '', regex=True)
df['机构名称'] = df['机构名称'].replace(r'\(', '(', regex=True)
df['机构名称'] = df['机构名称'].replace(r'\)', ')', regex=True)
df.to_csv(
eastmoney_meeting_path,
index=False,
encoding='utf-8-sig')
else:
df = pd.read_csv(eastmoney_meeting_path,keep_default_na=False)
return df
def eastmoney_cleanUP():
east_money = pd.read_csv(eastmoney_raw_data_path, keep_default_na=False)
east_money.replace({'是否提交财务自查报告': ' '}, '是')
east_money.replace({'是否提交财务自查报告': '不适用'}, '是')
east_money['机构名称'] = east_money['机构名称'].replace(r'\*', '', regex=True)
east_money['机构名称'] = east_money['机构名称'].replace(r'股份有限公司', '', regex=True)
east_money['机构名称'] = east_money['机构名称'].replace(r'\(', '(', regex=True)
east_money['机构名称'] = east_money['机构名称'].replace(r'\)', ')', regex=True)
east_money = east_money[east_money['板块'] != '创业板']
east_money['类型'] = pd.Categorical(east_money['类型'],
categories=["已受理","已反馈","预先披露更新","中止审查","已提交发审会讨论,暂缓表决",
"已上发审会,暂缓表决","已通过发审会"],ordered=True)
east_money.sort_values(['机构名称','保荐机构','类型','日期'], inplace=True)
# east_money.to_csv('./pre_cleab.csv',encoding='utf-8-sig',index=False)
east_money.drop_duplicates(subset=['机构名称','保荐机构', '类型',],
keep='first',
inplace=True)
east_money.to_csv(
'./data/EastMoney/eastmoney_data_cleaned_v2.csv',
encoding='utf-8-sig',
index=False)
return east_money
def gen_finalData(cleaned_easymoney_df, meetingInfo_df, zzsc_df):
'''
主板、中小板 = {'机构名称':'',
'简称':'',
'Wind代码':'',
'统一社会信用代码':'',
'板块':'',
'注册地':'',
'所属行业':'',
'经营范围':'',
'预先披露':'[日期]',
'已反馈':'[日期]',
'预先披露更新':'[日期]',
'发审会':{'中止审查':'[日期]',
'已上发审会,暂缓表决':'[日期]',
'已提交发审会讨论,暂缓表决:'[日期]',
'已通过发审会':'[日期]'},
'终止审查':'[日期]',
'上市日期':'[日期]',
'保荐机构':'',
'律师事务所':,
'会计师事务所':'',
'发行信息':{'拟发行数量':'',
'发行前总股本':'',
'发行后总股本':''},
'反馈文件':'[链接]'
}
'''
all_data = {} # 总数据
ekk = cleaned_easymoney_df.values.tolist()
for i in ekk:
i
if i[0] not in all_data:
all_data[i[0]] = {
'机构名称': i[0] + '股份有限公司',
'简称': i[15],
'Wind代码': '',
'统一社会信用代码': '',
'板块': i[2],
'注册地': '',
'所属行业': '',
'经营范围': '',
'预先披露': '',
'已反馈': '',
'预先披露更新': '',
'发审会': {
'中止审查': '',
'已上发审会,暂缓表决': '',
'已提交发审会讨论,暂缓表决': '',
'已通过发审会': ''
},
'终止审查': '',
'上市日期': '',
'保荐机构': i[4],
'保荐代表人': '',
'律师事务所': i[6],
'签字律师': '',
'会计师事务所': i[8],
'签字会计师': '',
'发行信息': {
'拟发行数量(万)': '',
'发行前总股本(万)': '',
'发行后总股本(万)': ''
},
'反馈文件': ''
}
if i[1] == '已受理':
all_data[i[0]]['预先披露'] = i[12]
elif i[1] == '已反馈':
all_data[i[0]]['已反馈'] = i[12]
elif i[1] == '预先披露更新':
all_data[i[0]]['预先披露更新'] = i[12]
elif i[1] == '已通过发审会':
all_data[i[0]]['发审会']['已通过发审会'] = i[12]
elif i[1] == '已提交发审会讨论,暂缓表决':
all_data[i[0]]['发审会']['已提交发审会讨论,暂缓表决'] = i[12]
elif i[1] == '已上发审会,暂缓表决':
all_data[i[0]]['发审会']['已上发审会,暂缓表决'] = i[12]
elif i[1] == '中止审查':
all_data[i[0]]['发审会']['中止审查'] = i[12]
if all_data[i[0]]['注册地'] == '' and i[3] != '':
all_data[i[0]]['注册地'] = i[3]
if all_data[i[0]]['所属行业'] == '' and i[11] != '':
all_data[i[0]]['所属行业'] = i[11]
if all_data[i[0]]['保荐代表人'] == '' and i[5] != '':
all_data[i[0]]['保荐代表人'] = i[5]
if all_data[i[0]]['签字律师'] == '' and i[7] != '':
all_data[i[0]]['签字律师'] = i[7]
if all_data[i[0]]['签字会计师'] == '' and i[9] != '':
all_data[i[0]]['签字会计师'] = i[9]
# 添加上会信息
ekk2 = meetingInfo_df.values.tolist()
error_set = {}
for i in ekk2:
i[0] = i[0].replace(r'股份有限公司', '')
if i[0] not in all_data:
print("Error: Cannot find ", i[0])
error_set.update({i[0]: i[5]})
continue
if i[1] == '上会未通过':
all_data[i[0]]['发审会']['上会未通过'] = i[5]
elif i[1] == '取消审核':
all_data[i[0]]['发审会']['取消审核'] = i[5]
elif i[1] == '上会通过':
all_data[i[0]]['发审会']['已通过发审会'] = i[5]
if i[7] != '':
all_data[i[0]]['上市时间'] = i[7]
all_data[i[0]]['发行信息']['拟发行数量'] = "{:.2f}".format(int(i[3]) / 10000)
all_data[i[0]]['发行信息']['发行前总股本'] = "{:.2f}".format(int(i[11]) / 10000)
all_data[i[0]]['发行信息']['发行后总股本'] = "{:.2f}".format(int(i[12]) / 10000)
# 添加终止审查信息
ekk3 = zzsc_df.values.tolist()
for i in ekk3:
name = i[0].replace(r'股份有限公司', '')
if name not in all_data:
print("Error: Cannot find in zzsc", i[0])
error_set.update({name: i[1]})
continue
all_data[name]['终止审查'] = i[1]
save_pickle(all_data, zb_zxb_stocksInfo_path)
return all_data
# def update_all():
# try:
# with open('','rb') as file:
# zb_zxb_dict = pickle.load(file)
# _,temp = update_eastmoneyData()
# for i in temp:
# if i not in zb_zxb_dict:
# pass
# else:
# # columns = [
# # '会计师事务所', '保荐代表人', '保荐机构', 'xxx', '律师事务所', '日期', '所属行业', '板块',
# # '是否提交财务自查报告', '注册地', '类型', '机构名称', '签字会计师', '签字律师', '时间戳', '简称'
# # ]
# i[]
def update_stockInfo():
try:
allStocksInfo = load_pickle(zb_zxb_stocksInfo_path)
except:
east_money_df = eastmoney_cleanUP()
meetingInfo_df = get_meetingData()
zzsc_df = update_zzscData()
allStocksInfo = gen_finalData(east_money_df,meetingInfo_df,zzsc_df)
else:
if __name__ == '__main__':
# newDate = update_date()
# # update_eastmoneyData(newDate)
# east_money_df = eastmoney_cleanUP()
# meetingInfo_df = get_meetingData(newDate)
# zzsc_df = update_zzscData(newDate)
# # dateList = date_gen()
# # get_eastmoneyData(dateList)
# # east_money_df = eastmoney_cleanUP()
# # east_money_df = pd.read_csv('./EastMoney/easymoney_data_new.csv',keep_default_na=False)
# # meetingInfo_df = pd.read_csv('./EastMoney/eastmoney_data_meeting.csv',keep_default_na=False)
# # meetingInfo_df = get_meetingData()
# # zzsc_df = pd.read_csv('./EastMoney/zzsc.csv')
# all_data,_,_ = gen_finalData(east_money_df,meetingInfo_df,zzsc_df)
# print('Complete!')
eastmoney_cleanUP() | [
"[email protected]"
] | |
61e71bab3030183eea612146af02e8cf41f6e23b | af7ab3c9d189caf4a22b2a83a03da4560dba6166 | /generated/administration_data/OrderStateNVL.py | c1789a2ccf2eca631fc260554e312bc933aef3d5 | [] | no_license | Eggwise/unit4_python_api | 43418d16d84abb73ddd843b8b268883f02ff996b | 421195392e408bd9e14bda0851817c5ab835ebaf | refs/heads/master | 2021-01-15T13:43:21.486918 | 2016-09-19T14:10:02 | 2016-09-19T14:10:02 | 68,611,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | import requests, json
from generated.base.unit4_base import Unit4Base
class OrderStateNVL(Unit4Base):
def get_orderStateNVL(self, database, ):
request_args = locals()
url_template = 'api/{database}/OrderStateNVL'
url = url_template.format(**request_args)
#print(url)
url = self.authorize(url)
response = requests.get(url=url)
print(response.text)
return json.loads(response.text)
| [
"[email protected]"
] | |
0df5002081686537d5ff5e42c6673d79d491c180 | 473fc28d466ddbe9758ca49c7d4fb42e7d82586e | /app/src/main/java/com/syd/source/aosp/external/nanopb-c/tests/package_name/SConscript | 8f1b9021d3f9cf30f232d3b3a4fab1b5ba771c6a | [
"Zlib"
] | permissive | lz-purple/Source | a7788070623f2965a8caa3264778f48d17372bab | e2745b756317aac3c7a27a4c10bdfe0921a82a1c | refs/heads/master | 2020-12-23T17:03:12.412572 | 2020-01-31T01:54:37 | 2020-01-31T01:54:37 | 237,205,127 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,351 | # Check that alltypes test case works also when the .proto file defines
# a package name.
Import("env")
# Build a modified alltypes.proto
def modify_proto(target, source, env):
'''Add a "package test.package;" directive to the beginning of the .proto file.'''
data = open(str(source[0]), 'r').read()
open(str(target[0]), 'w').write("package test.package;\n\n" + data)
return 0
env.Command("alltypes.proto", "#alltypes/alltypes.proto", modify_proto)
env.Command("alltypes.options", "#alltypes/alltypes.options", Copy("$TARGET", "$SOURCE"))
env.NanopbProto(["alltypes", "alltypes.options"])
# Build a modified encode_alltypes.c
def modify_c(target, source, env):
'''Add package name to type names in .c file.'''
data = open(str(source[0]), 'r').read()
type_names = ['AllTypes', 'MyEnum', 'HugeEnum']
for name in type_names:
data = data.replace(name, 'test_package_' + name)
open(str(target[0]), 'w').write(data)
return 0
env.Command("encode_alltypes.c", "#alltypes/encode_alltypes.c", modify_c)
# Encode and compare results to original alltypes testcase
enc = env.Program(["encode_alltypes.c", "alltypes.pb.c", "$COMMON/pb_encode.o"])
refdec = "$BUILD/alltypes/decode_alltypes$PROGSUFFIX"
env.RunTest(enc)
env.Compare(["encode_alltypes.output", "$BUILD/alltypes/encode_alltypes.output"])
| [
"[email protected]"
] | ||
cfe3d298f48a17667eeabcf5f110cf65a8e926b9 | 52b79e4cd1e26969a3ebb3bca8620519071bea98 | /answers/05_basic_scripts/task_5_2a.py | 83e4f23de538e9e128e63d8b8691d6ce8a62b918 | [] | no_license | hariram32/pyneng-answers-en | 631bc149b8a219a2de86de82681ffba3d1ff30ee | 84b7240b00d3a4ab9011952db662f716d1cd31b8 | refs/heads/main | 2023-03-16T00:12:38.954431 | 2021-03-09T15:40:10 | 2021-03-09T15:40:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,401 | py | # -*- coding: utf-8 -*-
"""
Task 5.2a
Copy and modify the script from task 5.2 so that, if the user entered a host address
rather than a network address, convert the host address to a network address
and print the network address and mask, as in task 5.2.
An example of a network address (all host bits are equal to zero):
* 10.0.1.0/24
* 190.1.0.0/16
Host address example:
* 10.0.1.1/24 - host from network 10.0.1.0/24
* 10.0.5.195/28 - host from network 10.0.5.192/28
If the user entered the address 10.0.1.1/24, the output should look like this:
Network:
10 0 1 0
00001010 00000000 00000001 00000000
Mask:
/24
255 255 255 0
11111111 11111111 11111111 00000000
Check the script work on different host/mask combinations, for example:
10.0.5.195/28, 10.0.1.1/24
Hint:
The network address can be calculated from the binary host address and the netmask.
If the mask is 28, then the network address is the first 28 bits host addresses + 4 zeros.
For example, the host address 10.1.1.195/28 in binary will be:
bin_ip = "00001010000000010000000111000011"
Then the network address will be the first 28 characters from bin_ip + 0000
(4 because in total there can be 32 bits in the address, and 32 - 28 = 4)
00001010000000010000000111000000
Restriction: All tasks must be done using the topics covered in this and previous chapters.
"""
network = input("Введите адрес сети: ")
ip, mask = network.split("/")
ip_list = ip.split(".")
mask = int(mask)
oct1, oct2, oct3, oct4 = [
int(ip_list[0]),
int(ip_list[1]),
int(ip_list[2]),
int(ip_list[3]),
]
bin_ip_str = "{:08b}{:08b}{:08b}{:08b}".format(oct1, oct2, oct3, oct4)
bin_network_str = bin_ip_str[:mask] + "0" * (32 - mask)
net1, net2, net3, net4 = [
int(bin_network_str[0:8], 2),
int(bin_network_str[8:16], 2),
int(bin_network_str[16:24], 2),
int(bin_network_str[24:32], 2),
]
bin_mask = "1" * mask + "0" * (32 - mask)
m1, m2, m3, m4 = [
int(bin_mask[0:8], 2),
int(bin_mask[8:16], 2),
int(bin_mask[16:24], 2),
int(bin_mask[24:32], 2),
]
ip_output = """
Network:
{0:<8} {1:<8} {2:<8} {3:<8}
{0:08b} {1:08b} {2:08b} {3:08b}"""
mask_output = """
Mask:
/{0}
{1:<8} {2:<8} {3:<8} {4:<8}
{1:08b} {2:08b} {3:08b} {4:08b}
"""
print(ip_output.format(net1, net2, net3, net4))
print(mask_output.format(mask, m1, m2, m3, m4))
| [
"[email protected]"
] | |
43559fe498959af1ed9d3b2c78c4f80b28e8a436 | f384d811159201c1d375cc36b2402c643b7cd66c | /bughipster/website/login.py | cff3e38fd64a121e83e8073cb6831e46721eaad6 | [
"BSD-3-Clause"
] | permissive | pombredanne/django-bug-hipster | f23202a78a99ef0487fd05e7cae7882b1696a1ad | 5e9cfe1efd22494b8c82176a5d7f145f899f2ed2 | refs/heads/master | 2021-01-15T11:33:43.420214 | 2015-06-10T20:21:19 | 2015-06-10T20:21:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,144 | py | """
bughipster.website.login
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by Xavier Ordoquy, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django import http
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import (
authenticate, get_user_model, login as auth_login)
from django.utils.text import capfirst
# Basically, the LoginForm is the django.auth.contrib.forms.AuthenticationForm
# which has been changed to match the bugzilla field names.
# The LoginForm class is under Copyright (c) Django Software Foundation and
# individual contributors.
class AuthenticationForm(forms.Form):
Bugzilla_login = forms.CharField(max_length=254)
Bugzilla_password = forms.CharField(
label=_("Password"), widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Please enter a correct %(username)s and password. "
"Note that both fields may be case-sensitive."),
'inactive': _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
The 'request' parameter is set for custom auth use by subclasses.
The form data comes in via the standard 'data' kwarg.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
# Set the label for the "username" field.
UserModel = get_user_model()
self.username_field = UserModel._meta.get_field(
UserModel.USERNAME_FIELD)
if self.fields['Bugzilla_login'].label is None:
self.fields['Bugzilla_login'].label = capfirst(
self.username_field.verbose_name)
def clean(self):
username = self.cleaned_data.get('Bugzilla_login')
password = self.cleaned_data.get('Bugzilla_password')
if username and password:
self.user_cache = authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
else:
self.confirm_login_allowed(self.user_cache)
return self.cleaned_data
def confirm_login_allowed(self, user):
"""
Controls whether the given User may log in. This is a policy setting,
independent of end-user authentication. This default behavior is to
allow login by active users, and reject login by inactive users.
If the given user cannot log in, this method should raise a
``forms.ValidationError``.
If the given user may log in, this method should return None.
"""
if not user.is_active:
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
)
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class LoginMixin(object):
def post(self, request, *args, **kwargs):
if 'GoAheadAndLogIn' in request.POST:
login_form = AuthenticationForm(data=request.POST or None)
if login_form.is_valid():
auth_login(request, login_form.get_user())
return http.HttpResponseRedirect(request.get_full_path())
# We failed to login. Warn the user to go back ala Bugzilla style
context = self.get_context_data(
title="Invalid Username Or Password", **kwargs)
return self.response_class(
request=self.request,
template="login-failed.html",
context=context,
using=self.template_engine)
# By default, just call the parent class
return super(LoginMixin, self).post(request, *args, **kwargs)
| [
"[email protected]"
] | |
79606a8a3e7d6b7f8105eb9cda5e6d30f178bd77 | 17660b97a12343c177d766377afbd16787762fa7 | /14/00/2.py | 739793fd98237fcbbefe6a9afb49a27303ac16d0 | [
"CC0-1.0"
] | permissive | pylangstudy/201706 | a39caac815f65f226a6b34743f0a0a4eac33ec8e | f1cc6af6b18e5bd393cda27f5166067c4645d4d3 | refs/heads/master | 2023-01-07T21:25:12.756348 | 2017-06-30T00:35:24 | 2017-06-30T00:35:24 | 93,048,112 | 0 | 1 | null | 2022-12-21T12:10:46 | 2017-06-01T10:59:59 | Python | UTF-8 | Python | false | false | 102 | py | try:
f = open('some.txt', mode='r', encoding='utf-8')
print(f.read())
finally:
f.close()
| [
"[email protected]"
] | |
873ce238a7dfd0ef0948e5a9922e5e08f39636e5 | 39225163672910ad704e730e20d21a54c8e3be0f | /examples/demo_skyview.py | 36fa93ae3c25f1dcce418ccc803ded48a25244ea | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | astrofrog/pywcsgrid2 | 4f422962523a928b4bacf259e7241e1eadcb50cc | d861e5a4987848a2ba3bed6b0d1278457589071a | refs/heads/master | 2021-01-21T00:52:49.615248 | 2012-04-16T15:06:50 | 2012-04-16T15:06:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,682 | py | import pyfits
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1.axes_grid import AxesGrid
#from pywcsgrid2.axes_wcs import GridHelperWcs, AxesWcs
import pywcsgrid2
# read in the first image
xray_name="pspc_skyview.fits"
f_xray = pyfits.open(xray_name)
header_xray = f_xray[0].header
# the second image
radio_name="radio_21cm.fits"
f_radio = pyfits.open(radio_name)
header_radio = f_radio[0].header
# grid helper
grid_helper = pywcsgrid2.GridHelper(wcs=header_xray)
# AxesGrid to display tow images side-by-side
fig = plt.figure(1, (6,3.5))
grid = AxesGrid(fig, (0.15, 0.15, 0.8, 0.75), nrows_ncols=(1, 2),
axes_pad=0.1, share_all=True,
cbar_mode="each", cbar_location="top", cbar_pad=0,
axes_class=(pywcsgrid2.Axes, dict(grid_helper=grid_helper)))
ax1 = grid[0]
# use imshow for a simply image display.
im = ax1.imshow(f_xray[0].data, origin="lower", vmin=0., cmap=cm.gray_r,
interpolation="nearest")
im.set_clim(4.e-05, 0.00018)
ticklocs = [6, 9, 12, 15]
cax1 = grid.cbar_axes[0]
cbar1 = cax1.colorbar(im)
cax1.toggle_label(True)
cax1.set_xticks([t*1.e-5 for t in ticklocs])
cax1.set_xticklabels(["$%d$" % t for t in ticklocs])
#cax1.xaxis.get_major_formatter().set_offset_string(r"$\times 10^{-5}$")
cax1.annotate(r"$\times 10^{-5}$",
xy=(1,1), xycoords="axes fraction",
xytext=(0, 15), textcoords="offset points",
va="bottom", ha="right", size="small")
ax2 = grid[1]
d = f_radio[0].data
# The second image have a different wcs. While imshow works, it will
# interpolate the second image into the image coordinate of the first
# image. You may use pcolormesh when the pixel size of the second
# image is larger than that of the first image. Or you may use
# inshow_affine.
#im2 = ax2[header_radio].pcolormesh(d, cmap=cm.gray_r)
im2 = ax2[header_radio].imshow_affine(d,
cmap=cm.gray_r, origin="lower")
grid.cbar_axes[1].colorbar(im2)
grid.cbar_axes[1].toggle_label(True)
# draw contour. The data points of the contour lines are created in
# the image coordinate of the second image and then are transformed to
# the image coordinate of the first image.
cont = ax2[header_radio].contour(d, colors="k")
# draw contour of the second image in the first axes.
cont2 = ax1[header_radio].contour(d, colors="k")
ax1.add_inner_title("X-ray", loc=2)
ax2.add_inner_title("Radio", loc=2)
ax1.locator_params("both", nbins=2) # since ax1 and ax2 shares a
# grid_helper, it affects not only
# ax1 but also ax2.
plt.show()
| [
"[email protected]"
] | |
13d8e1126016f032ec40167184632c2550e1b5fa | ec99f2c09b2c9a3860a2e5fdea061089cd147482 | /webprogrammering/docs/eks4/eks4.py | 1c9c60da03a7b5752964059290b3911bfa5a2618 | [] | no_license | sprotg/2019_3d | 752d3cc19cbff99effeccc9207d5ca26de4ad97b | 0250f9cd8045272ca6bf58dc59981adf28371c51 | refs/heads/master | 2020-07-05T16:03:03.465870 | 2020-03-16T13:49:47 | 2020-03-16T13:49:47 | 202,692,753 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | from flask import Flask
from flask import render_template
from flask import request
app = Flask(__name__)
@app.route("/")
@app.route('/index')
def index():
return render_template('formular.html')
@app.route("/modtag_data", methods=['POST'])
def modtag():
modtaget_navn = request.form['navn']
return render_template("vis.html", navn = modtaget_navn)
if __name__ == "__main__":
app.run(debug=True)
| [
"[email protected]"
] | |
ee2addb23de8f4b619906a7926cf93adef98483b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03681/s217062643.py | 4121026c13621b021fe355d631d8f4c15cb0ffe3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | import math
N_inu, M_saru = map(int, input().split())
d = abs(N_inu - M_saru)
if (d > 1):
ans = 0
else:
if (d == 1):
ans = math.factorial(N_inu) * math.factorial(M_saru)
else:
ans = 2 * math.factorial(N_inu) * math.factorial(M_saru)
ans = ans % (10 ** 9 + 7)
print(ans) | [
"[email protected]"
] | |
917f40447e4de7c7a4b51c43d671f0a910974707 | 85fc4f742f16befdd1cea2bc0dcfb57f1f8107d8 | /tests/cli/test_detect_embargo.py | 8c129519d22286d8f54c8825cd5d2281aeb09b28 | [
"LGPL-3.0-only",
"Apache-2.0"
] | permissive | sdodson/doozer | fcd01e6775478dc02c3703774197a41b755ce033 | 1522990fa987c6f1b4eaa500c63772ac0e0768b5 | refs/heads/master | 2023-01-19T16:06:26.801202 | 2020-08-25T20:10:38 | 2020-08-25T20:10:38 | 291,060,114 | 0 | 0 | Apache-2.0 | 2020-08-28T13:57:32 | 2020-08-28T13:57:31 | null | UTF-8 | Python | false | false | 6,526 | py | import io
import json
from unittest import TestCase
from unittest.mock import MagicMock, patch
import yaml
from doozerlib.cli import detect_embargo
class TestDetectEmbargoCli(TestCase):
def test_detect_embargoes_in_nvrs(self):
builds = [
{"id": 1, "nvr": "foo-1.2.3-1.p0"},
{"id": 2, "nvr": "bar-1.2.3-1.p1"}
]
nvrs = [b["nvr"] for b in builds]
expected = [builds[1]]
with patch("doozerlib.brew.get_build_objects", return_value=builds), \
patch("doozerlib.embargo_detector.EmbargoDetector.find_embargoed_builds", return_value=[2]):
actual = detect_embargo.detect_embargoes_in_nvrs(MagicMock(), nvrs)
self.assertListEqual(actual, expected)
def test_detect_embargoes_in_tags(self):
included_tags = ["a-candidate", "b-candidate"]
included_builds = [
[{"id": 11, "nvr": "foo11-1.2.3-1.p0"}, {"id": 12, "nvr": "foo12-1.2.3-1.p1"}, {"id": 13, "nvr": "foo13-1.2.3-1.p1"}],
[{"id": 21, "nvr": "foo21-1.2.3-1.p0"}, {"id": 22, "nvr": "foo22-1.2.3-1.p1"}, {"id": 23, "nvr": "foo23-1.2.3-1.p1"}],
]
excluded_tags = ["a", "b"]
excluded_builds = [
[{"id": 12, "nvr": "foo12-1.2.3-1.p1"}],
[{"id": 22, "nvr": "foo22-1.2.3-1.p1"}],
]
builds_to_detect = [b for builds in included_builds for b in builds if b["id"] in {11, 13, 21, 23}]
event_id = 42
expected = [b for builds in included_builds for b in builds if b["id"] in {13, 23}]
with patch("doozerlib.brew.get_latest_builds", return_value=included_builds), \
patch("doozerlib.brew.get_tagged_builds", return_value=excluded_builds), \
patch("doozerlib.embargo_detector.EmbargoDetector.find_embargoed_builds", return_value=[13, 23]) as find_embargoed_builds:
actual = detect_embargo.detect_embargoes_in_tags(MagicMock(), "all", included_tags, excluded_tags, event_id)
find_embargoed_builds.assert_called_once_with(builds_to_detect)
self.assertEqual(actual, expected)
def test_detect_embargoes_in_pullspecs(self):
pullspecs = ["example.com/repo:foo", "example.com/repo:bar"]
builds = [
{"id": 1, "nvr": "foo-1.2.3-1.p0"},
{"id": 2, "nvr": "bar-1.2.3-1.p1"}
]
nvrs = [("foo", "1.2.3", "1.p0"), ("bar", "1.2.3", "1.p1")]
expected = ([pullspecs[1]], [builds[1]])
fake_runtime = MagicMock()
fake_runtime.parallel_exec.return_value.get.return_value = nvrs
with patch("doozerlib.cli.detect_embargo.detect_embargoes_in_nvrs", return_value=[builds[1]]) as detect_embargoes_in_nvrs:
actual = detect_embargo.detect_embargoes_in_pullspecs(fake_runtime, pullspecs)
detect_embargoes_in_nvrs.assert_called_once_with(fake_runtime, [f"{n}-{v}-{r}" for n, v, r in nvrs])
self.assertEqual(actual, expected)
def test_detect_embargoes_in_releases(self):
releases = ["a", "b"]
release_pullspecs = {
"a": ["example.com/repo:dead", "example.com/repo:beef"],
"b": ["example.com/repo:foo", "example.com/repo:bar"],
}
builds = [
{"id": 1, "nvr": "foo-1.2.3-1.p0"},
{"id": 2, "nvr": "bar-1.2.3-1.p1"}
]
expected = ([releases[1]], [release_pullspecs["b"][1]], [builds[1]])
fake_runtime = MagicMock()
fake_runtime.parallel_exec.return_value.get.return_value = [release_pullspecs[k] for k in releases]
with patch("doozerlib.cli.detect_embargo.detect_embargoes_in_pullspecs") as detect_embargoes_in_pullspecs:
detect_embargoes_in_pullspecs.side_effect = lambda _, pullspecs: (["example.com/repo:bar"], [builds[1]]) if "example.com/repo:bar" in pullspecs else ([], [])
actual = detect_embargo.detect_embargoes_in_releases(fake_runtime, releases)
detect_embargoes_in_pullspecs.assert_called()
detect_embargoes_in_pullspecs.reset_mock()
self.assertEqual(actual, expected)
@patch("doozerlib.exectools.cmd_assert")
def test_get_nvr_by_pullspec(self, fake_cmd_assert):
pullspec = "registry-proxy.engineering.redhat.com/rh-osbs/openshift-ose-cluster-autoscaler:v4.3.25-202006081335"
expected = ("atomic-openshift-cluster-autoscaler-container", "v4.3.25", "202006081335")
fake_cmd_assert.return_value = ("""
{"config":{"Labels": {"com.redhat.component":"atomic-openshift-cluster-autoscaler-container", "version":"v4.3.25", "release":"202006081335"}}}
""", "")
actual = detect_embargo.get_nvr_by_pullspec(pullspec)
self.assertEqual(actual, expected)
@patch("doozerlib.exectools.cmd_assert")
def test_get_image_pullspecs_from_release_payload(self, fake_cmd_assert):
fake_cmd_assert.return_value = ("""
{"references":{"spec":{"tags":[{"name":"foo","from":{"name":"registry.example.com/foo:abc"}}, {"name":"bar","from":{"name":"registry.example.com/bar:def"}}]}}}
""", "")
actual = list(detect_embargo.get_image_pullspecs_from_release_payload("doesn't matter"))
expected = ["registry.example.com/foo:abc", "registry.example.com/bar:def"]
self.assertListEqual(actual, expected)
@patch("builtins.exit")
@patch('sys.stdout', new_callable=io.StringIO)
def test_print_result_and_exit(self, mock_stdout, mock_exit):
embargoed_builds = [{"id": 1}, {"id": 2}]
embargoed_pullspecs = ["a", "b"]
embargoed_releases = ["d", "e"]
expected = {
"has_embargoes": True,
"builds": embargoed_builds,
"pullspecs": embargoed_pullspecs,
"releases": embargoed_releases
}
detect_embargo.print_result_and_exit(embargoed_builds, embargoed_pullspecs, embargoed_releases, True, False)
mock_exit.assert_called_once_with(0)
actual = yaml.safe_load(mock_stdout.getvalue())
self.assertEqual(actual, expected)
mock_exit.reset_mock()
mock_stdout.truncate(0)
mock_stdout.seek(0)
detect_embargo.print_result_and_exit(embargoed_builds, embargoed_pullspecs, embargoed_releases, False, True)
mock_exit.assert_called_once_with(0)
actual = json.loads(mock_stdout.getvalue())
self.assertEqual(actual, expected)
mock_exit.reset_mock()
detect_embargo.print_result_and_exit(None, None, None, False, False)
mock_exit.assert_called_once_with(2)
| [
"[email protected]"
] | |
7e2eb39af0422a8717078f0128efb39342ff9ce9 | f13acd0d707ea9ab0d2f2f010717b35adcee142f | /AtCoder_Virtual_Contest/macle_20220726/a/main.py | 4f52c6d938d8e5669ac0da85c6db79449fbe884d | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | KATO-Hiro/AtCoder | 126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7 | bf43320bc1af606bfbd23c610b3432cddd1806b9 | refs/heads/master | 2023-08-18T20:06:42.876863 | 2023-08-17T23:45:21 | 2023-08-17T23:45:21 | 121,067,516 | 4 | 0 | CC0-1.0 | 2023-09-14T21:59:38 | 2018-02-11T00:32:45 | Python | UTF-8 | Python | false | false | 433 | py | # -*- coding: utf-8 -*-
def main():
from collections import Counter
import sys
input = sys.stdin.readline
n, m = map(int, input().split())
a = list(map(int, input().split()))
b = list(map(int, input().split()))
c = Counter(a + b)
ans = list()
for key, value in c.items():
if value == 1:
ans.append(key)
print(*sorted(ans))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
5d6d47498a0f1f651904fa28ec48d7fc3776e771 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_3/fnkgar002/question1.py | 4131eddc1868a0edb6d9606e520abe1fb271fe61 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | height = eval(input("Enter the height of the rectangle: \n"))
width = eval(input("Enter the width of the rectangle: \n"))
for i in range(height):
print(width*"*") | [
"[email protected]"
] | |
d93528fdd7836b06e0a1f4ca52add943561c062d | a37c93503ecb205b16f860664629a41b7c48250e | /planning/forms/plan.py | 2a4aba07d5e85ce78233ae171ac740a56fb976b1 | [] | no_license | samuelitwaru/PMS | f86681eaec2e34142447c3e66ab8d0939f4e0dd0 | 7bf7c9c511dd727479020540eef2a86ef561369e | refs/heads/master | 2023-01-10T04:35:16.852447 | 2020-10-22T09:03:48 | 2020-10-22T09:03:48 | 290,486,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,597 | py | from django import forms
from django.contrib import messages
from models import Expense, Profile, ConsolidationGroup, Funder, ProcurementType
from utils import get_pdu_head, get_user_department
from ..utils import create_new_funder
from templatetags.app_tags import currency
class CreatePlanForm(forms.Form):
subject_of_procurement = forms.CharField(initial="Supply of Computers")
expense = forms.ChoiceField()
quantity = forms.IntegerField(initial=1)
unit_of_measure = forms.CharField(initial='Months')
estimated_unit_cost = forms.IntegerField(label="Estimated Unit Cost", initial=1000000)
source_of_funding = forms.CharField(widget=forms.RadioSelect(attrs={"class":"source_of_funding_radio"}))
# other_funder = forms.CharField(label="Specify other Funder", max_length=64, required=False, widget=forms.TextInput())
date_required_q1 = forms.BooleanField(label="Quarter 1", required=False)
date_required_q2 = forms.BooleanField(label="Quarter 2", required=False)
date_required_q3 = forms.BooleanField(label="Quarter 3", required=False)
date_required_q4 = forms.BooleanField(label="Quarter 4", required=False)
def __init__(self, request=None, procurement_type=None, user=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = user
self.request = request
self.procurement_type = procurement_type
self.fields["source_of_funding"].widget.choices = self.get_source_of_funding_choices()
self.fields["expense"].choices = self.get_expense_choices()
def get_expense_choices(self):
return [(expense.id, f"{expense.name}") for expense in self.procurement_type.expense_set.all()]
def get_source_of_funding_choices(self):
return [(funder.id, funder.name) for funder in Funder.objects.all()]
def clean(self):
cleaned_data = super().clean()
q1 = cleaned_data.get("date_required_q1")
q2 = cleaned_data.get("date_required_q2")
q3 = cleaned_data.get("date_required_q3")
q4 = cleaned_data.get("date_required_q4")
estimated_unit_cost = cleaned_data.get("estimated_unit_cost")
quantity = cleaned_data.get("quantity")
total_estimated_cost = estimated_unit_cost * quantity
user_department = get_user_department(self.user)
budget_sealing = user_department.budget_sealing
total_estimated_departmental_plan_cost = total_estimated_cost + user_department.total_estimated_plan_cost()
if total_estimated_departmental_plan_cost > budget_sealing:
messages.error(self.request, f"You are exceeding the budget limit ({currency(budget_sealing)})", extra_tags="danger")
self.add_error('estimated_unit_cost', f"You are exceeding the budget limit ({currency(budget_sealing)})")
if not (q1 or q2 or q3 or q4):
self.add_error('date_required_q4', "Select at least 1 Quarter")
source_of_funding = cleaned_data.get("source_of_funding")
funder = Funder.objects.filter(id=source_of_funding).first()
# if not funder:
# other_funder = cleaned_data.get("other_funder")
# if other_funder:
# funder = create_new_funder(other_funder)
# else:
# self.add_error('other_funder', "Funder must be specified.")
cleaned_data["source_of_funding"] = funder
expense = Expense.objects.get(id=(cleaned_data.get("expense")))
cleaned_data["chart_of_account"] = expense
cleaned_data["procurement_type"] = expense.procurement_type
class SelectPlanProcurementTypeForm(forms.Form):
procurement_type = forms.CharField(widget=forms.RadioSelect())
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["procurement_type"].widget.choices = [(proc_type.id, proc_type.name) for proc_type in ProcurementType.objects.all()]
def clean(self):
cleaned_data = super().clean()
procurement_type = cleaned_data.get("procurement_type")
class UpdatePlanForm(forms.Form):
id = forms.IntegerField()
expense = forms.ChoiceField()
subject_of_procurement = forms.CharField()
quantity = forms.IntegerField()
unit_of_measure = forms.CharField()
estimated_unit_cost = forms.IntegerField()
source_of_funding = forms.CharField(widget=forms.RadioSelect(attrs={"class":"source_of_funding_radio"}))
# other_funder = forms.CharField(label="Specify other Funder", max_length=64, required=False, widget=forms.TextInput())
date_required_q1 = forms.BooleanField(required=False)
date_required_q2 = forms.BooleanField(required=False)
date_required_q3 = forms.BooleanField(required=False)
date_required_q4 = forms.BooleanField(required=False)
def __init__(self, plan=None, user=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = user
self.fields["source_of_funding"].widget.choices = self.get_source_of_funding_choices()
self.plan = plan
self.fields["expense"].choices = self.get_expense_choices()
def get_expense_choices(self):
return [(expense.id, f"{expense.name}") for expense in self.plan.procurement_type.expense_set.all()]
def get_source_of_funding_choices(self):
return [(funder.id, funder.name) for funder in Funder.objects.all()] + [("0", "Other")]
def clean(self):
cleaned_data = super().clean()
q1 = cleaned_data.get("date_required_q1")
q2 = cleaned_data.get("date_required_q2")
q3 = cleaned_data.get("date_required_q3")
q4 = cleaned_data.get("date_required_q4")
if not (q1 or q2 or q3 or q4):
self.add_error('date_required', "Select at least 1 Quarter")
plan_id = cleaned_data.get("id")
estimated_unit_cost = cleaned_data.get("estimated_unit_cost")
quantity = cleaned_data.get("quantity")
total_estimated_cost = estimated_unit_cost * quantity
user_department = get_user_department(self.user)
budget_sealing = user_department.budget_sealing
total_estimated_departmental_plan_cost = total_estimated_cost + user_department.total_estimated_plan_cost(exclude_id=plan_id)
if total_estimated_departmental_plan_cost > budget_sealing:
self.add_error('estimated_unit_cost', f"You are exceeding the budget limit ({currency(budget_sealing)})")
raise forms.ValidationError(f"You have exceeding the budget limit ({budget_sealing})")
source_of_funding = cleaned_data.get("source_of_funding")
funder = Funder.objects.filter(id=source_of_funding).first()
# if not funder:
# other_funder = cleaned_data.get("other_funder")
# if other_funder:
# funder = create_new_funder(other_funder)
# else:
# self.add_error('other_funder', "Funder must be specified.")
cleaned_data["source_of_funding"] = funder
expense = Expense.objects.get(id=(cleaned_data.get("expense")))
cleaned_data["expense"] = expense
cleaned_data["procurement_type"] = expense.procurement_type
class DeletePlanForm(forms.Form):
id = forms.IntegerField()
class SendPlanToPDUMemberForm(forms.Form):
id = forms.IntegerField(widget=forms.HiddenInput)
pdu_member = forms.ChoiceField()
def get_pdu_member_choices(self):
return [(profile.id, profile.display_name) for profile in Profile.objects.filter(is_in_pdu=True).exclude(user=get_pdu_head())]
def clean(self):
cleaned_data = super().clean()
pdu_member = cleaned_data.get('pdu_member')
if pdu_member:
cleaned_data['pdu_member'] = Profile.objects.get(id=pdu_member)
class ConsolidatePlanForm(forms.Form):
id = forms.IntegerField(widget=forms.HiddenInput)
consolidation_group = forms.ChoiceField()
def __init__(self, plan=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["consolidation_group"].choices = self.get_consolidation_group_choices
self.plan = plan
def get_consolidation_group_choices(self):
return [(group.id, f"{group.subject_of_procurement}") for group in ConsolidationGroup.objects.filter(procurement_type=self.plan.procurement_type).all()]
def clean(self):
cleaned_data = super().clean()
consolidation_group = cleaned_data.get('consolidation_group')
if consolidation_group:
cleaned_data['consolidation_group'] = ConsolidationGroup.objects.get(id=consolidation_group)
| [
"[email protected]"
] | |
d5c8b24693ea9229cea030cafd7ed4a4cd3d7633 | 5ac726f23d0490d3d43819578cca590b62d0ff02 | /wise_intern/Tracker/views.py | 5b6bec7f3785285577f5079f1593baeedcb6eba6 | [] | no_license | youssriaboelseod/Software-HumanResources | 52ab324bf43479d8bea20690f71690615c68ef25 | 821fdf195915af3f6b6ec16ef2fb6d9f70d986f7 | refs/heads/master | 2022-12-28T01:29:52.310176 | 2020-10-10T05:05:22 | 2020-10-10T05:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,707 | py | from django.shortcuts import render
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Tracker
from django.contrib.auth.decorators import login_required
from django.views.generic import CreateView, DetailView, UpdateView, DeleteView
from Job_openings.models import Jobb
from Candidate.models import Candidate
from django.db.models import Q
# Create your views here.
@login_required
def tracker(request):
if request.user.is_superuser:
tracker = Tracker.objects.all().order_by('-date_posted')
else:
tracker = Tracker.objects.filter(user=request.user).order_by('-date_posted')
search_term = ''
if 'search' in request.GET:
search_term = request.GET['search']
tracker = tracker.filter(
Q(company_applied__company_name__icontains=search_term) |
Q(candidate_name__candidate_name__icontains=search_term) |
Q(position_applied__position__icontains=search_term) |
Q(phone__icontains=search_term)
)
context = {
'tracker': tracker, 'search_term': search_term, 'tracker_page': 'active',
}
return render(request, 'Tracker/tracker.html', context)
class TrackerCreateView(LoginRequiredMixin, CreateView):
model = Tracker
fields = ['current_CTC', 'expected_CTC', 'vendor', 'notice_period', 'email','user','phone', 'company_applied', 'position_applied', 'candidate_status', 'relevant_experience', 'total_experience', 'candidate_name']
def get_initial(self):
candidate_id = self.request.GET.get('candidate_id')
if candidate_id:
try:
candidate = Candidate.objects.get(id=candidate_id)
except Candidate.DoesNotExist:
return super().get_initial()
return {'candidate_name': candidate,
'phone': candidate.phone,
'email': candidate.email,
'user':self.request.user,
}
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
class TrackerDetailView(LoginRequiredMixin, DetailView):
model = Tracker
class TrackerUpdateView(LoginRequiredMixin, UpdateView):
model = Tracker
fields = ['current_CTC', 'expected_CTC', 'notice_period', 'vendor', 'company_applied', 'phone','user', 'email', 'position_applied', 'candidate_status', 'relevant_experience', 'total_experience', 'candidate_name']
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
class TrackerDeleteView(LoginRequiredMixin, DeleteView):
model = Tracker
success_url = '/dashboard/tracker/'
| [
"[email protected]"
] | |
fdc8c637b2a2360cfe62203c066d247991a0ccea | 02dde23ab7414142d9df823b873b5b2c87a691b9 | /cinemago_app/app/__init__.py | 3ac0b56ad4140b6fe00e3b6625258701d241f878 | [] | no_license | andrei-papou/cinemago | a4094b80a18dcec0072a1a20a5b8aa51c6da522f | 580c71f0737de1070a0332d9c154100acbb22303 | refs/heads/master | 2021-05-30T04:51:16.196188 | 2016-01-18T19:37:19 | 2016-01-18T19:37:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,721 | py | from schematics.exceptions import ValidationError
from itsdangerous import BadSignature, SignatureExpired
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.mail import Mail
from flask.ext.restful import Api
from config import config
from .exceptions import (
BadRequest,
Unauthorized,
Forbidden,
NotFound,
ScrapyServerError,
bad_request_handler,
bad_signature_handler,
unauthorized_handler,
signature_expired_handler,
forbidden_handler,
not_found_handler,
validation_error_handler,
scrapy_server_error_handler,
)
db = SQLAlchemy()
mail = Mail()
api = Api()
def create_app(config_mode):
app = Flask(__name__)
app.config.from_object(config[config_mode])
# url import
from . import routes
db.init_app(app)
mail.init_app(app)
api.init_app(app)
# blueprints here
from .seanses import seanses as seanses_blueprint
app.register_blueprint(seanses_blueprint)
from .admin import admin as admin_blueprint
app.register_blueprint(admin_blueprint)
from .scrapy_layer import scrapy_layer as scrapy_layer_blueprint
app.register_blueprint(scrapy_layer_blueprint)
# exception handlers registration
app.errorhandler(BadRequest)(bad_request_handler)
app.errorhandler(Unauthorized)(unauthorized_handler)
app.errorhandler(Forbidden)(forbidden_handler)
app.errorhandler(NotFound)(not_found_handler)
app.errorhandler(ValidationError)(validation_error_handler)
app.errorhandler(BadSignature)(bad_signature_handler)
app.errorhandler(SignatureExpired)(signature_expired_handler)
app.errorhandler(ScrapyServerError)(scrapy_server_error_handler)
return app
| [
"[email protected]"
] | |
a64f634024c805d7142b15712548433cb9621863 | e299ad494a144cc6cfebcd45b10ddcc8efab54a9 | /test/python_api/default-constructor/sb_breakpoint.py | 2bdc539a001dcdeae2d958de621b5ef705e55df6 | [
"NCSA"
] | permissive | apple-oss-distributions/lldb | 3dbd2fea5ce826b2bebec2fe88fadbca771efbdf | 10de1840defe0dff10b42b9c56971dbc17c1f18c | refs/heads/main | 2023-08-02T21:31:38.525968 | 2014-04-11T21:20:22 | 2021-10-06T05:26:12 | 413,590,587 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | """
Fuzz tests an object after the default construction to make sure it does not crash lldb.
"""
import sys
import lldb
def fuzz_obj(obj):
obj.GetID()
obj.ClearAllBreakpointSites()
obj.FindLocationByAddress(sys.maxint)
obj.FindLocationIDByAddress(sys.maxint)
obj.FindLocationByID(0)
obj.GetLocationAtIndex(0)
obj.SetEnabled(True)
obj.IsEnabled()
obj.GetHitCount()
obj.SetIgnoreCount(1)
obj.GetIgnoreCount()
obj.SetCondition("i >= 10")
obj.GetCondition()
obj.SetThreadID(0)
obj.GetThreadID()
obj.SetThreadIndex(0)
obj.GetThreadIndex()
obj.SetThreadName("worker thread")
obj.GetThreadName()
obj.SetQueueName("my queue")
obj.GetQueueName()
obj.SetCallback(None, None)
obj.GetNumResolvedLocations()
obj.GetNumLocations()
obj.GetDescription(lldb.SBStream())
for bp_loc in obj:
s = str(bp_loc)
| [
"[email protected]"
] | |
579959a5d4e3ef7cb1e17f81d9439f8a8a0d30d7 | e987cd566edc75997f9b02377514d4f3a0dba12c | /sys/src/Python/distribTools/__init__.py | 8efdcba9816036b872c9cde5b2439ecd585cba74 | [] | no_license | 7u83/maxdb-buildtools | f942adff2cd55d0a046b6ef3e18f6645b011a26e | ce9a56943f6195d6755e983035aa96cbe95e6cb2 | refs/heads/master | 2020-05-04T18:23:30.849371 | 2015-02-15T19:25:49 | 2015-02-15T19:25:49 | 30,428,297 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 903 | py |
#
# ========== licence begin LGPL
# Copyright (C) 2002 SAP AG
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ========== licence end
#
__all__ = [
'packBinaryToolsLib',
]
| [
"[email protected]"
] | |
18be60559ba6c0ac41c4a74f49b594fc1ad3161c | b7d1922311613b2dc47bc5c0284ff48bc5294f8c | /03-Lists_Basics/Exercises/More_exercises/1-Zeros_to_Back.py | bf646b60f3126a824475643c5c4c6ee8c218e616 | [
"MIT"
] | permissive | eclipse-ib/Software-University-Fundamentals_Module | c32bfa5f249f79de622016269a026d1114341e11 | 994ef75c70d1bae8e615dbb789aeffd6e0a42c34 | refs/heads/main | 2023-01-30T08:51:26.239640 | 2020-12-13T19:54:18 | 2020-12-13T19:54:18 | 306,145,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | numbers = input().split(", ")
new_numbers = []
for i in numbers:
if int(i) == 0:
pass
else:
new_numbers.append(int(i))
for i in numbers:
if int(i) == 0:
new_numbers.append(0)
print(f"{new_numbers}")
| [
"[email protected]"
] | |
766c4da836e8e2e56ca4b04d73c586eff2605af9 | 54ab0f79f5d68f4732ca7d205f72ecef99862303 | /torch/jit/_monkeytype_config.py | 9957541ff25d17f5d68863e9405b366e5fcaa0e9 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | csarofeen/pytorch | a9dd0f8ffa0642d72df2d5e109a8b4d9c2389cbc | e8557ec5e064608577f81e51ccfe7c36c917cb0f | refs/heads/devel | 2023-04-30T02:42:13.558738 | 2023-03-14T00:50:01 | 2023-03-14T00:50:01 | 88,071,101 | 35 | 10 | NOASSERTION | 2023-06-21T17:37:30 | 2017-04-12T16:02:31 | C++ | UTF-8 | Python | false | false | 7,129 | py |
import torch
import inspect
import typing
import pathlib
import sys
from typing import Optional, Iterable, List, Dict
from collections import defaultdict
from types import CodeType
_IS_MONKEYTYPE_INSTALLED = True
try:
import monkeytype # type: ignore[import]
from monkeytype import trace as monkeytype_trace
from monkeytype.db.base import CallTraceThunk, CallTraceStore, CallTraceStoreLogger # type: ignore[import]
from monkeytype.config import _startswith, LIB_PATHS # type: ignore[import]
from monkeytype.tracing import CallTrace, CodeFilter # type: ignore[import]
except ImportError:
_IS_MONKEYTYPE_INSTALLED = False
# Checks whether a class is defind in `torch.*` modules
def is_torch_native_class(cls):
if not hasattr(cls, '__module__'):
return False
parent_modules = cls.__module__.split('.')
if not parent_modules:
return False
root_module = sys.modules.get(parent_modules[0])
return root_module is torch
def get_type(type):
"""
Helper function which converts the given type to a torchScript acceptable format.
"""
if isinstance(type, str):
return type
elif inspect.getmodule(type) == typing:
# If the type is a type imported from typing
# like Tuple, List, Dict then replace `typing.`
# with a null string. This needs to be done since
# typing.List is not accepted by TorchScript.
type_to_string = str(type)
return type_to_string.replace(type.__module__ + '.', '')
elif is_torch_native_class(type):
# If the type is a subtype of torch module, then TorchScript expects a fully qualified name
# for the type which is obtained by combining the module name and type name.
return type.__module__ + '.' + type.__name__
else:
# For all other types use the name for the type.
return type.__name__
def get_optional_of_element_type(types):
"""
Helper function to extracts the type of the element to be annotated to Optional
from the list of consolidated types and returns `Optional[element type]`.
TODO: To remove this check once Union support lands.
"""
elem_type = types[1] if type(None) == types[0] else types[0]
elem_type = get_type(elem_type)
# Optional type is internally converted to Union[type, NoneType], which
# is not supported yet in TorchScript. Hence, representing the optional type as string.
return 'Optional[' + elem_type + ']'
def get_qualified_name(func):
return func.__qualname__
if _IS_MONKEYTYPE_INSTALLED:
class JitTypeTraceStoreLogger(CallTraceStoreLogger):
"""A JitTypeCallTraceLogger that stores logged traces in a CallTraceStore."""
def __init__(self, store: CallTraceStore):
super().__init__(store)
def log(self, trace: CallTrace) -> None:
self.traces.append(trace)
class JitTypeTraceStore(CallTraceStore):
def __init__(self):
super().__init__()
# A dictionary keeping all collected CallTrace
# key is fully qualified name of called function
# value is list of all CallTrace
self.trace_records: Dict[str, list] = defaultdict(list)
def add(self, traces: Iterable[CallTrace]):
for t in traces:
qualified_name = get_qualified_name(t.func)
self.trace_records[qualified_name].append(t)
def filter(
self,
qualified_name: str,
qualname_prefix: Optional[str] = None,
limit: int = 2000
) -> List[CallTraceThunk]:
return self.trace_records[qualified_name]
def analyze(self, qualified_name: str) -> Dict:
# Analyze the types for the given module
# and create a dictionary of all the types
# for arguments.
records = self.trace_records[qualified_name]
all_args = defaultdict(set)
for record in records:
for arg, arg_type in record.arg_types.items():
all_args[arg].add(arg_type)
return all_args
def consolidate_types(self, qualified_name: str) -> Dict:
all_args = self.analyze(qualified_name)
# If there are more types for an argument,
# then consolidate the type to `Any` and replace the entry
# by type `Any`.
for arg, types in all_args.items():
types = list(types)
type_length = len(types)
if type_length == 2 and type(None) in types:
# TODO: To remove this check once Union suppport in TorchScript lands.
all_args[arg] = get_optional_of_element_type(types)
elif type_length > 1:
all_args[arg] = 'Any'
elif type_length == 1:
all_args[arg] = get_type(types[0])
return all_args
def get_args_types(self, qualified_name: str) -> Dict:
return self.consolidate_types(qualified_name)
class JitTypeTraceConfig(monkeytype.config.Config):
def __init__(self, s: JitTypeTraceStore):
super().__init__()
self.s = s
def trace_logger(self) -> JitTypeTraceStoreLogger:
"""
Returns a JitCallTraceStoreLogger that logs to the configured
trace store.
"""
return JitTypeTraceStoreLogger(self.trace_store())
def trace_store(self) -> CallTraceStore:
return self.s
def code_filter(self) -> Optional[CodeFilter]:
return jit_code_filter
else:
# When MonkeyType is not installed, we provide dummy class definitions
# for the below classes.
class JitTypeTraceStoreLogger: # type: ignore[no-redef]
def __init__(self):
pass
class JitTypeTraceStore: # type: ignore[no-redef]
def __init__(self):
self.trace_records = None
class JitTypeTraceConfig: # type: ignore[no-redef]
def __init__(self):
pass
monkeytype_trace = None # noqa: F811
def jit_code_filter(code: CodeType) -> bool:
"""
Custom CodeFilter for Torchscript to trace forward calls.
The custom CodeFilter is required while scripting a FX Traced forward calls.
FX Traced forward calls have `code.co_filename` start with '<' which is used
to exclude tracing of stdlib and site-packages in the default code filter.
Since we need all forward calls to be traced, this custom code filter
checks for code.co_name to be 'forward' and enables tracing for all such calls.
The code filter is similar to default code filter for monkeytype and
excludes tracing of stdlib and site-packages.
"""
# Filter code without a source file and exclude this check for 'forward' calls.
if code.co_name != 'forward' and (not code.co_filename or code.co_filename[0] == '<'):
return False
filename = pathlib.Path(code.co_filename).resolve()
return not any(_startswith(filename, lib_path) for lib_path in LIB_PATHS)
| [
"[email protected]"
] | |
25d02e56b5cadf5ffa55237cedff1a496b5e5ecb | 0cd64f3f67c6a3b130a788906da84ffc3d15396a | /Library/lib/python3.9/site-packages/sympy/parsing/tests/test_c_parser.py | b6d86dbeae8c99af2852b000b01acf1a6d0696be | [
"MIT",
"BSD-3-Clause",
"0BSD",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-python-cwi",
"Python-2.0"
] | permissive | Ryorama/codeapp | 32ef44a3e8058da9858924df211bf82f5f5018f1 | cf7f5753c6c4c3431d8209cbaacf5208c3c664fa | refs/heads/main | 2023-06-26T09:24:13.724462 | 2021-07-27T17:54:25 | 2021-07-27T17:54:25 | 388,520,626 | 0 | 0 | MIT | 2021-07-22T16:01:32 | 2021-07-22T16:01:32 | null | UTF-8 | Python | false | false | 154,616 | py | from sympy.parsing.sym_expr import SymPyExpression
from sympy.testing.pytest import raises, XFAIL
from sympy.external import import_module
cin = import_module('clang.cindex', import_kwargs = {'fromlist': ['cindex']})
if cin:
from sympy.codegen.ast import (Variable, String, Return,
FunctionDefinition, Integer, Float, Declaration, CodeBlock,
FunctionPrototype, FunctionCall, NoneToken, Assignment, Type,
IntBaseType, SignedIntType, UnsignedIntType, FloatType,
AddAugmentedAssignment, SubAugmentedAssignment,
MulAugmentedAssignment, DivAugmentedAssignment,
ModAugmentedAssignment, While)
from sympy.codegen.cnodes import (PreDecrement, PostDecrement,
PreIncrement, PostIncrement)
from sympy.core import (Add, Mul, Mod, Pow, Rational,
StrictLessThan, LessThan, StrictGreaterThan, GreaterThan,
Equality, Unequality)
from sympy.logic.boolalg import And, Not, Or
from sympy import Symbol, true, false
import os
def test_variable():
c_src1 = (
'int a;' + '\n' +
'int b;' + '\n'
)
c_src2 = (
'float a;' + '\n'
+ 'float b;' + '\n'
)
c_src3 = (
'int a;' + '\n' +
'float b;' + '\n' +
'int c;'
)
c_src4 = (
'int x = 1, y = 6.78;' + '\n' +
'float p = 2, q = 9.67;'
)
res1 = SymPyExpression(c_src1, 'c').return_expr()
res2 = SymPyExpression(c_src2, 'c').return_expr()
res3 = SymPyExpression(c_src3, 'c').return_expr()
res4 = SymPyExpression(c_src4, 'c').return_expr()
assert res1[0] == Declaration(
Variable(
Symbol('a'),
type=IntBaseType(String('intc'))
)
)
assert res1[1] == Declaration(
Variable(
Symbol('b'),
type=IntBaseType(String('intc'))
)
)
assert res2[0] == Declaration(
Variable(
Symbol('a'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
)
)
)
assert res2[1] == Declaration(
Variable(
Symbol('b'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
)
)
)
assert res3[0] == Declaration(
Variable(
Symbol('a'),
type=IntBaseType(String('intc'))
)
)
assert res3[1] == Declaration(
Variable(
Symbol('b'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
)
)
)
assert res3[2] == Declaration(
Variable(
Symbol('c'),
type=IntBaseType(String('intc'))
)
)
assert res4[0] == Declaration(
Variable(
Symbol('x'),
type=IntBaseType(String('intc')),
value=Integer(1)
)
)
assert res4[1] == Declaration(
Variable(
Symbol('y'),
type=IntBaseType(String('intc')),
value=Integer(6)
)
)
assert res4[2] == Declaration(
Variable(
Symbol('p'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('2.0', precision=53)
)
)
assert res4[3] == Declaration(
Variable(
Symbol('q'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('9.67', precision=53)
)
)
@XFAIL
def test_int():
c_src1 = 'int a = 1;'
c_src2 = (
'int a = 1;' + '\n' +
'int b = 2;' + '\n'
)
c_src3 = 'int a = 2.345, b = 5.67;'
c_src4 = 'int p = 6, q = 23.45;'
c_src5 = "int x = '0', y = 'a';"
c_src6 = "int r = true, s = false;"
# cin.TypeKind.UCHAR
c_src_type1 = (
"signed char a = 1, b = 5.1;"
)
# cin.TypeKind.SHORT
c_src_type2 = (
"short a = 1, b = 5.1;"
"signed short c = 1, d = 5.1;"
"short int e = 1, f = 5.1;"
"signed short int g = 1, h = 5.1;"
)
# cin.TypeKind.INT
c_src_type3 = (
"signed int a = 1, b = 5.1;"
"int c = 1, d = 5.1;"
)
# cin.TypeKind.LONG
c_src_type4 = (
"long a = 1, b = 5.1;"
"long int c = 1, d = 5.1;"
)
# cin.TypeKind.UCHAR
c_src_type5 = "unsigned char a = 1, b = 5.1;"
# cin.TypeKind.USHORT
c_src_type6 = (
"unsigned short a = 1, b = 5.1;"
"unsigned short int c = 1, d = 5.1;"
)
# cin.TypeKind.UINT
c_src_type7 = "unsigned int a = 1, b = 5.1;"
# cin.TypeKind.ULONG
c_src_type8 = (
"unsigned long a = 1, b = 5.1;"
"unsigned long int c = 1, d = 5.1;"
)
res1 = SymPyExpression(c_src1, 'c').return_expr()
res2 = SymPyExpression(c_src2, 'c').return_expr()
res3 = SymPyExpression(c_src3, 'c').return_expr()
res4 = SymPyExpression(c_src4, 'c').return_expr()
res5 = SymPyExpression(c_src5, 'c').return_expr()
res6 = SymPyExpression(c_src6, 'c').return_expr()
res_type1 = SymPyExpression(c_src_type1, 'c').return_expr()
res_type2 = SymPyExpression(c_src_type2, 'c').return_expr()
res_type3 = SymPyExpression(c_src_type3, 'c').return_expr()
res_type4 = SymPyExpression(c_src_type4, 'c').return_expr()
res_type5 = SymPyExpression(c_src_type5, 'c').return_expr()
res_type6 = SymPyExpression(c_src_type6, 'c').return_expr()
res_type7 = SymPyExpression(c_src_type7, 'c').return_expr()
res_type8 = SymPyExpression(c_src_type8, 'c').return_expr()
assert res1[0] == Declaration(
Variable(
Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(1)
)
)
assert res2[0] == Declaration(
Variable(
Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(1)
)
)
assert res2[1] == Declaration(
Variable(
Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(2)
)
)
assert res3[0] == Declaration(
Variable(
Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(2)
)
)
assert res3[1] == Declaration(
Variable(
Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(5)
)
)
assert res4[0] == Declaration(
Variable(
Symbol('p'),
type=IntBaseType(String('intc')),
value=Integer(6)
)
)
assert res4[1] == Declaration(
Variable(
Symbol('q'),
type=IntBaseType(String('intc')),
value=Integer(23)
)
)
assert res5[0] == Declaration(
Variable(
Symbol('x'),
type=IntBaseType(String('intc')),
value=Integer(48)
)
)
assert res5[1] == Declaration(
Variable(
Symbol('y'),
type=IntBaseType(String('intc')),
value=Integer(97)
)
)
assert res6[0] == Declaration(
Variable(
Symbol('r'),
type=IntBaseType(String('intc')),
value=Integer(1)
)
)
assert res6[1] == Declaration(
Variable(
Symbol('s'),
type=IntBaseType(String('intc')),
value=Integer(0)
)
)
assert res_type1[0] == Declaration(
Variable(
Symbol('a'),
type=SignedIntType(
String('int8'),
nbits=Integer(8)
),
value=Integer(1)
)
)
assert res_type1[1] == Declaration(
Variable(
Symbol('b'),
type=SignedIntType(
String('int8'),
nbits=Integer(8)
),
value=Integer(5)
)
)
assert res_type2[0] == Declaration(
Variable(
Symbol('a'),
type=SignedIntType(
String('int16'),
nbits=Integer(16)
),
value=Integer(1)
)
)
assert res_type2[1] == Declaration(
Variable(
Symbol('b'),
type=SignedIntType(
String('int16'),
nbits=Integer(16)
),
value=Integer(5)
)
)
assert res_type2[2] == Declaration(
Variable(Symbol('c'),
type=SignedIntType(
String('int16'),
nbits=Integer(16)
),
value=Integer(1)
)
)
assert res_type2[3] == Declaration(
Variable(
Symbol('d'),
type=SignedIntType(
String('int16'),
nbits=Integer(16)
),
value=Integer(5)
)
)
assert res_type2[4] == Declaration(
Variable(
Symbol('e'),
type=SignedIntType(
String('int16'),
nbits=Integer(16)
),
value=Integer(1)
)
)
assert res_type2[5] == Declaration(
Variable(
Symbol('f'),
type=SignedIntType(
String('int16'),
nbits=Integer(16)
),
value=Integer(5)
)
)
assert res_type2[6] == Declaration(
Variable(
Symbol('g'),
type=SignedIntType(
String('int16'),
nbits=Integer(16)
),
value=Integer(1)
)
)
assert res_type2[7] == Declaration(
Variable(
Symbol('h'),
type=SignedIntType(
String('int16'),
nbits=Integer(16)
),
value=Integer(5)
)
)
assert res_type3[0] == Declaration(
Variable(
Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(1)
)
)
assert res_type3[1] == Declaration(
Variable(
Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(5)
)
)
assert res_type3[2] == Declaration(
Variable(
Symbol('c'),
type=IntBaseType(String('intc')),
value=Integer(1)
)
)
assert res_type3[3] == Declaration(
Variable(
Symbol('d'),
type=IntBaseType(String('intc')),
value=Integer(5)
)
)
assert res_type4[0] == Declaration(
Variable(
Symbol('a'),
type=SignedIntType(
String('int64'),
nbits=Integer(64)
),
value=Integer(1)
)
)
assert res_type4[1] == Declaration(
Variable(
Symbol('b'),
type=SignedIntType(
String('int64'),
nbits=Integer(64)
),
value=Integer(5)
)
)
assert res_type4[2] == Declaration(
Variable(
Symbol('c'),
type=SignedIntType(
String('int64'),
nbits=Integer(64)
),
value=Integer(1)
)
)
assert res_type4[3] == Declaration(
Variable(
Symbol('d'),
type=SignedIntType(
String('int64'),
nbits=Integer(64)
),
value=Integer(5)
)
)
assert res_type5[0] == Declaration(
Variable(
Symbol('a'),
type=UnsignedIntType(
String('uint8'),
nbits=Integer(8)
),
value=Integer(1)
)
)
assert res_type5[1] == Declaration(
Variable(
Symbol('b'),
type=UnsignedIntType(
String('uint8'),
nbits=Integer(8)
),
value=Integer(5)
)
)
assert res_type6[0] == Declaration(
Variable(
Symbol('a'),
type=UnsignedIntType(
String('uint16'),
nbits=Integer(16)
),
value=Integer(1)
)
)
assert res_type6[1] == Declaration(
Variable(
Symbol('b'),
type=UnsignedIntType(
String('uint16'),
nbits=Integer(16)
),
value=Integer(5)
)
)
assert res_type6[2] == Declaration(
Variable(
Symbol('c'),
type=UnsignedIntType(
String('uint16'),
nbits=Integer(16)
),
value=Integer(1)
)
)
assert res_type6[3] == Declaration(
Variable(
Symbol('d'),
type=UnsignedIntType(
String('uint16'),
nbits=Integer(16)
),
value=Integer(5)
)
)
assert res_type7[0] == Declaration(
Variable(
Symbol('a'),
type=UnsignedIntType(
String('uint32'),
nbits=Integer(32)
),
value=Integer(1)
)
)
assert res_type7[1] == Declaration(
Variable(
Symbol('b'),
type=UnsignedIntType(
String('uint32'),
nbits=Integer(32)
),
value=Integer(5)
)
)
assert res_type8[0] == Declaration(
Variable(
Symbol('a'),
type=UnsignedIntType(
String('uint64'),
nbits=Integer(64)
),
value=Integer(1)
)
)
assert res_type8[1] == Declaration(
Variable(
Symbol('b'),
type=UnsignedIntType(
String('uint64'),
nbits=Integer(64)
),
value=Integer(5)
)
)
assert res_type8[2] == Declaration(
Variable(
Symbol('c'),
type=UnsignedIntType(
String('uint64'),
nbits=Integer(64)
),
value=Integer(1)
)
)
assert res_type8[3] == Declaration(
Variable(
Symbol('d'),
type=UnsignedIntType(
String('uint64'),
nbits=Integer(64)
),
value=Integer(5)
)
)
@XFAIL
def test_float():
c_src1 = 'float a = 1.0;'
c_src2 = (
'float a = 1.25;' + '\n' +
'float b = 2.39;' + '\n'
)
c_src3 = 'float x = 1, y = 2;'
c_src4 = 'float p = 5, e = 7.89;'
c_src5 = 'float r = true, s = false;'
# cin.TypeKind.FLOAT
c_src_type1 = 'float x = 1, y = 2.5;'
# cin.TypeKind.DOUBLE
c_src_type2 = 'double x = 1, y = 2.5;'
# cin.TypeKind.LONGDOUBLE
c_src_type3 = 'long double x = 1, y = 2.5;'
res1 = SymPyExpression(c_src1, 'c').return_expr()
res2 = SymPyExpression(c_src2, 'c').return_expr()
res3 = SymPyExpression(c_src3, 'c').return_expr()
res4 = SymPyExpression(c_src4, 'c').return_expr()
res5 = SymPyExpression(c_src5, 'c').return_expr()
res_type1 = SymPyExpression(c_src_type1, 'c').return_expr()
res_type2 = SymPyExpression(c_src_type2, 'c').return_expr()
res_type3 = SymPyExpression(c_src_type3, 'c').return_expr()
assert res1[0] == Declaration(
Variable(
Symbol('a'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('1.0', precision=53)
)
)
assert res2[0] == Declaration(
Variable(
Symbol('a'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('1.25', precision=53)
)
)
assert res2[1] == Declaration(
Variable(
Symbol('b'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('2.3900000000000001', precision=53)
)
)
assert res3[0] == Declaration(
Variable(
Symbol('x'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('1.0', precision=53)
)
)
assert res3[1] == Declaration(
Variable(
Symbol('y'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('2.0', precision=53)
)
)
assert res4[0] == Declaration(
Variable(
Symbol('p'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('5.0', precision=53)
)
)
assert res4[1] == Declaration(
Variable(
Symbol('e'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('7.89', precision=53)
)
)
assert res5[0] == Declaration(
Variable(
Symbol('r'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('1.0', precision=53)
)
)
assert res5[1] == Declaration(
Variable(
Symbol('s'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('0.0', precision=53)
)
)
assert res_type1[0] == Declaration(
Variable(
Symbol('x'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('1.0', precision=53)
)
)
assert res_type1[1] == Declaration(
Variable(
Symbol('y'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('2.5', precision=53)
)
)
assert res_type2[0] == Declaration(
Variable(
Symbol('x'),
type=FloatType(
String('float64'),
nbits=Integer(64),
nmant=Integer(52),
nexp=Integer(11)
),
value=Float('1.0', precision=53)
)
)
assert res_type2[1] == Declaration(
Variable(
Symbol('y'),
type=FloatType(
String('float64'),
nbits=Integer(64),
nmant=Integer(52),
nexp=Integer(11)
),
value=Float('2.5', precision=53)
)
)
assert res_type3[0] == Declaration(
Variable(
Symbol('x'),
type=FloatType(
String('float80'),
nbits=Integer(80),
nmant=Integer(63),
nexp=Integer(15)
),
value=Float('1.0', precision=53)
)
)
assert res_type3[1] == Declaration(
Variable(
Symbol('y'),
type=FloatType(
String('float80'),
nbits=Integer(80),
nmant=Integer(63),
nexp=Integer(15)
),
value=Float('2.5', precision=53)
)
)
@XFAIL
def test_bool():
c_src1 = (
'bool a = true, b = false;'
)
c_src2 = (
'bool a = 1, b = 0;'
)
c_src3 = (
'bool a = 10, b = 20;'
)
c_src4 = (
'bool a = 19.1, b = 9.0, c = 0.0;'
)
res1 = SymPyExpression(c_src1, 'c').return_expr()
res2 = SymPyExpression(c_src2, 'c').return_expr()
res3 = SymPyExpression(c_src3, 'c').return_expr()
res4 = SymPyExpression(c_src4, 'c').return_expr()
assert res1[0] == Declaration(
Variable(Symbol('a'),
type=Type(String('bool')),
value=true
)
)
assert res1[1] == Declaration(
Variable(Symbol('b'),
type=Type(String('bool')),
value=false
)
)
assert res2[0] == Declaration(
Variable(Symbol('a'),
type=Type(String('bool')),
value=true)
)
assert res2[1] == Declaration(
Variable(Symbol('b'),
type=Type(String('bool')),
value=false
)
)
assert res3[0] == Declaration(
Variable(Symbol('a'),
type=Type(String('bool')),
value=true
)
)
assert res3[1] == Declaration(
Variable(Symbol('b'),
type=Type(String('bool')),
value=true
)
)
assert res4[0] == Declaration(
Variable(Symbol('a'),
type=Type(String('bool')),
value=true)
)
assert res4[1] == Declaration(
Variable(Symbol('b'),
type=Type(String('bool')),
value=true
)
)
assert res4[2] == Declaration(
Variable(Symbol('c'),
type=Type(String('bool')),
value=false
)
)
def test_function():
c_src1 = (
'void fun1()' + '\n' +
'{' + '\n' +
'int a;' + '\n' +
'}'
)
c_src2 = (
'int fun2()' + '\n' +
'{'+ '\n' +
'int a;' + '\n' +
'return a;' + '\n' +
'}'
)
c_src3 = (
'float fun3()' + '\n' +
'{' + '\n' +
'float b;' + '\n' +
'return b;' + '\n' +
'}'
)
c_src4 = (
'float fun4()' + '\n' +
'{}'
)
res1 = SymPyExpression(c_src1, 'c').return_expr()
res2 = SymPyExpression(c_src2, 'c').return_expr()
res3 = SymPyExpression(c_src3, 'c').return_expr()
res4 = SymPyExpression(c_src4, 'c').return_expr()
assert res1[0] == FunctionDefinition(
NoneToken(),
name=String('fun1'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(
Symbol('a'),
type=IntBaseType(String('intc'))
)
)
)
)
assert res2[0] == FunctionDefinition(
IntBaseType(String('intc')),
name=String('fun2'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(
Symbol('a'),
type=IntBaseType(String('intc'))
)
),
Return('a')
)
)
assert res3[0] == FunctionDefinition(
FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
name=String('fun3'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(
Symbol('b'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
)
)
),
Return('b')
)
)
assert res4[0] == FunctionPrototype(
FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
name=String('fun4'),
parameters=()
)
def test_parameters():
c_src1 = (
'void fun1( int a)' + '\n' +
'{' + '\n' +
'int i;' + '\n' +
'}'
)
c_src2 = (
'int fun2(float x, float y)' + '\n' +
'{'+ '\n' +
'int a;' + '\n' +
'return a;' + '\n' +
'}'
)
c_src3 = (
'float fun3(int p, float q, int r)' + '\n' +
'{' + '\n' +
'float b;' + '\n' +
'return b;' + '\n' +
'}'
)
res1 = SymPyExpression(c_src1, 'c').return_expr()
res2 = SymPyExpression(c_src2, 'c').return_expr()
res3 = SymPyExpression(c_src3, 'c').return_expr()
assert res1[0] == FunctionDefinition(
NoneToken(),
name=String('fun1'),
parameters=(
Variable(
Symbol('a'),
type=IntBaseType(String('intc'))
),
),
body=CodeBlock(
Declaration(
Variable(
Symbol('i'),
type=IntBaseType(String('intc'))
)
)
)
)
assert res2[0] == FunctionDefinition(
IntBaseType(String('intc')),
name=String('fun2'),
parameters=(
Variable(
Symbol('x'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
)
),
Variable(
Symbol('y'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
)
)
),
body=CodeBlock(
Declaration(
Variable(
Symbol('a'),
type=IntBaseType(String('intc'))
)
),
Return('a')
)
)
assert res3[0] == FunctionDefinition(
FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
name=String('fun3'),
parameters=(
Variable(
Symbol('p'),
type=IntBaseType(String('intc'))
),
Variable(
Symbol('q'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
)
),
Variable(
Symbol('r'),
type=IntBaseType(String('intc'))
)
),
body=CodeBlock(
Declaration(
Variable(
Symbol('b'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
)
)
),
Return('b')
)
)
def test_function_call():
c_src1 = (
'int fun1(int x)' + '\n' +
'{' + '\n' +
'return x;' + '\n' +
'}' + '\n' +
'void caller()' + '\n' +
'{' + '\n' +
'int x = fun1(2);' + '\n' +
'}'
)
c_src2 = (
'int fun2(int a, int b, int c)' + '\n' +
'{' + '\n' +
'return a;' + '\n' +
'}' + '\n' +
'void caller()' + '\n' +
'{' + '\n' +
'int y = fun2(2, 3, 4);' + '\n' +
'}'
)
c_src3 = (
'int fun3(int a, int b, int c)' + '\n' +
'{' + '\n' +
'return b;' + '\n' +
'}' + '\n' +
'void caller()' + '\n' +
'{' + '\n' +
'int p;' + '\n' +
'int q;' + '\n' +
'int r;' + '\n' +
'int z = fun3(p, q, r);' + '\n' +
'}'
)
c_src4 = (
'int fun4(float a, float b, int c)' + '\n' +
'{' + '\n' +
'return c;' + '\n' +
'}' + '\n' +
'void caller()' + '\n' +
'{' + '\n' +
'float x;' + '\n' +
'float y;' + '\n' +
'int z;' + '\n' +
'int i = fun4(x, y, z)' + '\n' +
'}'
)
c_src5 = (
'int fun()' + '\n' +
'{' + '\n' +
'return 1;' + '\n' +
'}' + '\n' +
'void caller()' + '\n' +
'{' + '\n' +
'int a = fun()' + '\n' +
'}'
)
res1 = SymPyExpression(c_src1, 'c').return_expr()
res2 = SymPyExpression(c_src2, 'c').return_expr()
res3 = SymPyExpression(c_src3, 'c').return_expr()
res4 = SymPyExpression(c_src4, 'c').return_expr()
res5 = SymPyExpression(c_src5, 'c').return_expr()
assert res1[0] == FunctionDefinition(
IntBaseType(String('intc')),
name=String('fun1'),
parameters=(Variable(Symbol('x'),
type=IntBaseType(String('intc'))
),
),
body=CodeBlock(
Return('x')
)
)
assert res1[1] == FunctionDefinition(
NoneToken(),
name=String('caller'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('x'),
value=FunctionCall(String('fun1'),
function_args=(
Integer(2),
)
)
)
)
)
)
assert res2[0] == FunctionDefinition(
IntBaseType(String('intc')),
name=String('fun2'),
parameters=(Variable(Symbol('a'),
type=IntBaseType(String('intc'))
),
Variable(Symbol('b'),
type=IntBaseType(String('intc'))
),
Variable(Symbol('c'),
type=IntBaseType(String('intc'))
)
),
body=CodeBlock(
Return('a')
)
)
assert res2[1] == FunctionDefinition(
NoneToken(),
name=String('caller'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('y'),
value=FunctionCall(
String('fun2'),
function_args=(
Integer(2),
Integer(3),
Integer(4)
)
)
)
)
)
)
assert res3[0] == FunctionDefinition(
IntBaseType(String('intc')),
name=String('fun3'),
parameters=(
Variable(Symbol('a'),
type=IntBaseType(String('intc'))
),
Variable(Symbol('b'),
type=IntBaseType(String('intc'))
),
Variable(Symbol('c'),
type=IntBaseType(String('intc'))
)
),
body=CodeBlock(
Return('b')
)
)
assert res3[1] == FunctionDefinition(
NoneToken(),
name=String('caller'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('p'),
type=IntBaseType(String('intc'))
)
),
Declaration(
Variable(Symbol('q'),
type=IntBaseType(String('intc'))
)
),
Declaration(
Variable(Symbol('r'),
type=IntBaseType(String('intc'))
)
),
Declaration(
Variable(Symbol('z'),
value=FunctionCall(
String('fun3'),
function_args=(
Symbol('p'),
Symbol('q'),
Symbol('r')
)
)
)
)
)
)
assert res4[0] == FunctionDefinition(
IntBaseType(String('intc')),
name=String('fun4'),
parameters=(Variable(Symbol('a'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
)
),
Variable(Symbol('b'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
)
),
Variable(Symbol('c'),
type=IntBaseType(String('intc'))
)
),
body=CodeBlock(
Return('c')
)
)
assert res4[1] == FunctionDefinition(
NoneToken(),
name=String('caller'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('x'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
)
)
),
Declaration(
Variable(Symbol('y'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
)
)
),
Declaration(
Variable(Symbol('z'),
type=IntBaseType(String('intc'))
)
),
Declaration(
Variable(Symbol('i'),
value=FunctionCall(String('fun4'),
function_args=(
Symbol('x'),
Symbol('y'),
Symbol('z')
)
)
)
)
)
)
assert res5[0] == FunctionDefinition(
IntBaseType(String('intc')),
name=String('fun'),
parameters=(),
body=CodeBlock(
Return('')
)
)
assert res5[1] == FunctionDefinition(
NoneToken(),
name=String('caller'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
value=FunctionCall(String('fun'),
function_args=()
)
)
)
)
)
def test_parse():
c_src1 = (
'int a;' + '\n' +
'int b;' + '\n'
)
c_src2 = (
'void fun1()' + '\n' +
'{' + '\n' +
'int a;' + '\n' +
'}'
)
f1 = open('..a.h', 'w')
f2 = open('..b.h', 'w')
f1.write(c_src1)
f2. write(c_src2)
f1.close()
f2.close()
res1 = SymPyExpression('..a.h', 'c').return_expr()
res2 = SymPyExpression('..b.h', 'c').return_expr()
os.remove('..a.h')
os.remove('..b.h')
assert res1[0] == Declaration(
Variable(
Symbol('a'),
type=IntBaseType(String('intc'))
)
)
assert res1[1] == Declaration(
Variable(
Symbol('b'),
type=IntBaseType(String('intc'))
)
)
assert res2[0] == FunctionDefinition(
NoneToken(),
name=String('fun1'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(
Symbol('a'),
type=IntBaseType(String('intc'))
)
)
)
)
def test_binary_operators():
c_src1 = (
'void func()'+
'{' + '\n' +
'int a;' + '\n' +
'a = 1;' + '\n' +
'}'
)
c_src2 = (
'void func()'+
'{' + '\n' +
'int a = 0;' + '\n' +
'a = a + 1;' + '\n' +
'a = 3*a - 10;' + '\n' +
'}'
)
c_src3 = (
'void func()'+
'{' + '\n' +
'int a = 10;' + '\n' +
'a = 1 + a - 3 * 6;' + '\n' +
'}'
)
c_src4 = (
'void func()'+
'{' + '\n' +
'int a;' + '\n' +
'int b;' + '\n' +
'a = 100;' + '\n' +
'b = a*a + a*a + a + 19*a + 1 + 24;' + '\n' +
'}'
)
c_src5 = (
'void func()'+
'{' + '\n' +
'int a;' + '\n' +
'int b;' + '\n' +
'int c;' + '\n' +
'int d;' + '\n' +
'a = 1;' + '\n' +
'b = 2;' + '\n' +
'c = b;' + '\n' +
'd = ((a+b)*(a+c))*((c-d)*(a+c));' + '\n' +
'}'
)
c_src6 = (
'void func()'+
'{' + '\n' +
'int a;' + '\n' +
'int b;' + '\n' +
'int c;' + '\n' +
'int d;' + '\n' +
'a = 1;' + '\n' +
'b = 2;' + '\n' +
'c = 3;' + '\n' +
'd = (a*a*a*a + 3*b*b + b + b + c*d);' + '\n' +
'}'
)
c_src7 = (
'void func()'+
'{' + '\n' +
'float a;' + '\n' +
'a = 1.01;' + '\n' +
'}'
)
c_src8 = (
'void func()'+
'{' + '\n' +
'float a;' + '\n' +
'a = 10.0 + 2.5;' + '\n' +
'}'
)
c_src9 = (
'void func()'+
'{' + '\n' +
'float a;' + '\n' +
'a = 10.0 / 2.5;' + '\n' +
'}'
)
c_src10 = (
'void func()'+
'{' + '\n' +
'int a;' + '\n' +
'a = 100 / 4;' + '\n' +
'}'
)
c_src11 = (
'void func()'+
'{' + '\n' +
'int a;' + '\n' +
'a = 20 - 100 / 4 * 5 + 10;' + '\n' +
'}'
)
c_src12 = (
'void func()'+
'{' + '\n' +
'int a;' + '\n' +
'a = (20 - 100) / 4 * (5 + 10);' + '\n' +
'}'
)
c_src13 = (
'void func()'+
'{' + '\n' +
'int a;' + '\n' +
'int b;' + '\n' +
'float c;' + '\n' +
'c = b/a;' + '\n' +
'}'
)
c_src14 = (
'void func()'+
'{' + '\n' +
'int a = 2;' + '\n' +
'int d = 5;' + '\n' +
'int n = 10;' + '\n' +
'int s;' + '\n' +
's = (a/2)*(2*a + (n-1)*d);' + '\n' +
'}'
)
c_src15 = (
'void func()'+
'{' + '\n' +
'int a;' + '\n' +
'a = 1 % 2;' + '\n' +
'}'
)
c_src16 = (
'void func()'+
'{' + '\n' +
'int a = 2;' + '\n' +
'int b;' + '\n' +
'b = a % 3;' + '\n' +
'}'
)
c_src17 = (
'void func()'+
'{' + '\n' +
'int a = 100;' + '\n' +
'int b = 3;' + '\n' +
'int c;' + '\n' +
'c = a % b;' + '\n' +
'}'
)
c_src18 = (
'void func()'+
'{' + '\n' +
'int a = 100;' + '\n' +
'int b = 3;' + '\n' +
'int mod = 1000000007;' + '\n' +
'int c;' + '\n' +
'c = (a + b * (100/a)) % mod;' + '\n' +
'}'
)
c_src19 = (
'void func()'+
'{' + '\n' +
'int a = 100;' + '\n' +
'int b = 3;' + '\n' +
'int mod = 1000000007;' + '\n' +
'int c;' + '\n' +
'c = ((a % mod + b % mod) % mod *(' \
'a % mod - b % mod) % mod) % mod;' + '\n' +
'}'
)
c_src20 = (
'void func()'+
'{' + '\n' +
'bool a' + '\n' +
'bool b;' + '\n' +
'a = 1 == 2;' + '\n' +
'b = 1 != 2;' + '\n' +
'}'
)
c_src21 = (
'void func()'+
'{' + '\n' +
'bool a;' + '\n' +
'bool b;' + '\n' +
'bool c;' + '\n' +
'bool d;' + '\n' +
'a = 1 == 2;' + '\n' +
'b = 1 <= 2;' + '\n' +
'c = 1 > 2;' + '\n' +
'd = 1 >= 2;' + '\n' +
'}'
)
c_src22 = (
'void func()'+
'{' + '\n' +
'int a = 1;' + '\n' +
'int b = 2;' + '\n' +
'bool c1;' + '\n' +
'bool c2;' + '\n' +
'bool c3;' + '\n' +
'bool c4;' + '\n' +
'bool c5;' + '\n' +
'bool c6;' + '\n' +
'bool c7;' + '\n' +
'bool c8;' + '\n' +
'c1 = a == 1;' + '\n' +
'c2 = b == 2;' + '\n' +
'c3 = 1 != a;' + '\n' +
'c4 = 1 != b;' + '\n' +
'c5 = a < 0;' + '\n' +
'c6 = b <= 10;' + '\n' +
'c7 = a > 0;' + '\n' +
'c8 = b >= 11;' + '\n' +
'}'
)
c_src23 = (
'void func()'+
'{' + '\n' +
'int a = 3;' + '\n' +
'int b = 4;' + '\n' +
'bool c1;' + '\n' +
'bool c2;' + '\n' +
'bool c3;' + '\n' +
'bool c4;' + '\n' +
'bool c5;' + '\n' +
'bool c6;' + '\n' +
'c1 = a == b;' + '\n' +
'c2 = a != b;' + '\n' +
'c3 = a < b;' + '\n' +
'c4 = a <= b;' + '\n' +
'c5 = a > b;' + '\n' +
'c6 = a >= b;' + '\n' +
'}'
)
c_src24 = (
'void func()'+
'{' + '\n' +
'float a = 1.25'
'float b = 2.5;' + '\n' +
'bool c1;' + '\n' +
'bool c2;' + '\n' +
'bool c3;' + '\n' +
'bool c4;' + '\n' +
'c1 = a == 1.25;' + '\n' +
'c2 = b == 2.54;' + '\n' +
'c3 = 1.2 != a;' + '\n' +
'c4 = 1.5 != b;' + '\n' +
'}'
)
c_src25 = (
'void func()'+
'{' + '\n' +
'float a = 1.25' + '\n' +
'float b = 2.5;' + '\n' +
'bool c1;' + '\n' +
'bool c2;' + '\n' +
'bool c3;' + '\n' +
'bool c4;' + '\n' +
'bool c5;' + '\n' +
'bool c6;' + '\n' +
'c1 = a == b;' + '\n' +
'c2 = a != b;' + '\n' +
'c3 = a < b;' + '\n' +
'c4 = a <= b;' + '\n' +
'c5 = a > b;' + '\n' +
'c6 = a >= b;' + '\n' +
'}'
)
c_src26 = (
'void func()'+
'{' + '\n' +
'bool c1;' + '\n' +
'bool c2;' + '\n' +
'bool c3;' + '\n' +
'bool c4;' + '\n' +
'bool c5;' + '\n' +
'bool c6;' + '\n' +
'c1 = true == true;' + '\n' +
'c2 = true == false;' + '\n' +
'c3 = false == false;' + '\n' +
'c4 = true != true;' + '\n' +
'c5 = true != false;' + '\n' +
'c6 = false != false;' + '\n' +
'}'
)
c_src27 = (
'void func()'+
'{' + '\n' +
'bool c1;' + '\n' +
'bool c2;' + '\n' +
'bool c3;' + '\n' +
'bool c4;' + '\n' +
'bool c5;' + '\n' +
'bool c6;' + '\n' +
'c1 = true && true;' + '\n' +
'c2 = true && false;' + '\n' +
'c3 = false && false;' + '\n' +
'c4 = true || true;' + '\n' +
'c5 = true || false;' + '\n' +
'c6 = false || false;' + '\n' +
'}'
)
c_src28 = (
'void func()'+
'{' + '\n' +
'bool a;' + '\n' +
'bool c1;' + '\n' +
'bool c2;' + '\n' +
'bool c3;' + '\n' +
'bool c4;' + '\n' +
'c1 = a && true;' + '\n' +
'c2 = false && a;' + '\n' +
'c3 = true || a;' + '\n' +
'c4 = a || false;' + '\n' +
'}'
)
c_src29 = (
'void func()'+
'{' + '\n' +
'int a;' + '\n' +
'bool c1;' + '\n' +
'bool c2;' + '\n' +
'bool c3;' + '\n' +
'bool c4;' + '\n' +
'c1 = a && 1;' + '\n' +
'c2 = a && 0;' + '\n' +
'c3 = a || 1;' + '\n' +
'c4 = 0 || a;' + '\n' +
'}'
)
c_src30 = (
'void func()'+
'{' + '\n' +
'int a;' + '\n' +
'int b;' + '\n' +
'bool c;'+ '\n' +
'bool d;'+ '\n' +
'bool c1;' + '\n' +
'bool c2;' + '\n' +
'bool c3;' + '\n' +
'bool c4;' + '\n' +
'bool c5;' + '\n' +
'bool c6;' + '\n' +
'c1 = a && b;' + '\n' +
'c2 = a && c;' + '\n' +
'c3 = c && d;' + '\n' +
'c4 = a || b;' + '\n' +
'c5 = a || c;' + '\n' +
'c6 = c || d;' + '\n' +
'}'
)
c_src_raise1 = (
'void func()'+
'{' + '\n' +
'int a;' + '\n' +
'a = -1;' + '\n' +
'}'
)
c_src_raise2 = (
'void func()'+
'{' + '\n' +
'int a;' + '\n' +
'a = -+1;' + '\n' +
'}'
)
c_src_raise3 = (
'void func()'+
'{' + '\n' +
'int a;' + '\n' +
'a = 2*-2;' + '\n' +
'}'
)
c_src_raise4 = (
'void func()'+
'{' + '\n' +
'int a;' + '\n' +
'a = (int)2.0;' + '\n' +
'}'
)
c_src_raise5 = (
'void func()'+
'{' + '\n' +
'int a=100;' + '\n' +
'a = (a==100)?(1):(0);' + '\n' +
'}'
)
res1 = SymPyExpression(c_src1, 'c').return_expr()
res2 = SymPyExpression(c_src2, 'c').return_expr()
res3 = SymPyExpression(c_src3, 'c').return_expr()
res4 = SymPyExpression(c_src4, 'c').return_expr()
res5 = SymPyExpression(c_src5, 'c').return_expr()
res6 = SymPyExpression(c_src6, 'c').return_expr()
res7 = SymPyExpression(c_src7, 'c').return_expr()
res8 = SymPyExpression(c_src8, 'c').return_expr()
res9 = SymPyExpression(c_src9, 'c').return_expr()
res10 = SymPyExpression(c_src10, 'c').return_expr()
res11 = SymPyExpression(c_src11, 'c').return_expr()
res12 = SymPyExpression(c_src12, 'c').return_expr()
res13 = SymPyExpression(c_src13, 'c').return_expr()
res14 = SymPyExpression(c_src14, 'c').return_expr()
res15 = SymPyExpression(c_src15, 'c').return_expr()
res16 = SymPyExpression(c_src16, 'c').return_expr()
res17 = SymPyExpression(c_src17, 'c').return_expr()
res18 = SymPyExpression(c_src18, 'c').return_expr()
res19 = SymPyExpression(c_src19, 'c').return_expr()
res20 = SymPyExpression(c_src20, 'c').return_expr()
res21 = SymPyExpression(c_src21, 'c').return_expr()
res22 = SymPyExpression(c_src22, 'c').return_expr()
res23 = SymPyExpression(c_src23, 'c').return_expr()
res24 = SymPyExpression(c_src24, 'c').return_expr()
res25 = SymPyExpression(c_src25, 'c').return_expr()
res26 = SymPyExpression(c_src26, 'c').return_expr()
res27 = SymPyExpression(c_src27, 'c').return_expr()
res28 = SymPyExpression(c_src28, 'c').return_expr()
res29 = SymPyExpression(c_src29, 'c').return_expr()
res30 = SymPyExpression(c_src30, 'c').return_expr()
assert res1[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc'))
)
),
Assignment(Variable(Symbol('a')), Integer(1))
)
)
assert res2[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(0))),
Assignment(
Variable(Symbol('a')),
Add(Symbol('a'),
Integer(1))
),
Assignment(Variable(Symbol('a')),
Add(
Mul(
Integer(3),
Symbol('a')),
Integer(-10)
)
)
)
)
assert res3[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(10)
)
),
Assignment(
Variable(Symbol('a')),
Add(
Symbol('a'),
Integer(-17)
)
)
)
)
assert res4[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc'))
)
),
Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc'))
)
),
Assignment(
Variable(Symbol('a')),
Integer(100)),
Assignment(
Variable(Symbol('b')),
Add(
Mul(
Integer(2),
Pow(
Symbol('a'),
Integer(2))
),
Mul(
Integer(20),
Symbol('a')),
Integer(25)
)
)
)
)
assert res5[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc'))
)
),
Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc'))
)
),
Declaration(
Variable(Symbol('c'),
type=IntBaseType(String('intc'))
)
),
Declaration(
Variable(Symbol('d'),
type=IntBaseType(String('intc'))
)
),
Assignment(
Variable(Symbol('a')),
Integer(1)),
Assignment(
Variable(Symbol('b')),
Integer(2)
),
Assignment(
Variable(Symbol('c')),
Symbol('b')),
Assignment(
Variable(Symbol('d')),
Mul(
Add(
Symbol('a'),
Symbol('b')),
Pow(
Add(
Symbol('a'),
Symbol('c')
),
Integer(2)
),
Add(
Symbol('c'),
Mul(
Integer(-1),
Symbol('d')
)
)
)
)
)
)
assert res6[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc'))
)
),
Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc'))
)
),
Declaration(
Variable(Symbol('c'),
type=IntBaseType(String('intc'))
)
),
Declaration(
Variable(Symbol('d'),
type=IntBaseType(String('intc'))
)
),
Assignment(
Variable(Symbol('a')),
Integer(1)
),
Assignment(
Variable(Symbol('b')),
Integer(2)
),
Assignment(
Variable(Symbol('c')),
Integer(3)
),
Assignment(
Variable(Symbol('d')),
Add(
Pow(
Symbol('a'),
Integer(4)
),
Mul(
Integer(3),
Pow(
Symbol('b'),
Integer(2)
)
),
Mul(
Integer(2),
Symbol('b')
),
Mul(
Symbol('c'),
Symbol('d')
)
)
)
)
)
assert res7[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
)
)
),
Assignment(
Variable(Symbol('a')),
Float('1.01', precision=53)
)
)
)
assert res8[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
)
)
),
Assignment(
Variable(Symbol('a')),
Float('12.5', precision=53)
)
)
)
assert res9[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
)
)
),
Assignment(
Variable(Symbol('a')),
Float('4.0', precision=53)
)
)
)
assert res10[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc'))
)
),
Assignment(
Variable(Symbol('a')),
Integer(25)
)
)
)
assert res11[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc'))
)
),
Assignment(
Variable(Symbol('a')),
Integer(-95)
)
)
)
assert res12[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc'))
)
),
Assignment(
Variable(Symbol('a')),
Integer(-300)
)
)
)
assert res13[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc'))
)
),
Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc'))
)
),
Declaration(
Variable(Symbol('c'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
)
)
),
Assignment(
Variable(Symbol('c')),
Mul(
Pow(
Symbol('a'),
Integer(-1)
),
Symbol('b')
)
)
)
)
assert res14[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(2)
)
),
Declaration(
Variable(Symbol('d'),
type=IntBaseType(String('intc')),
value=Integer(5)
)
),
Declaration(
Variable(Symbol('n'),
type=IntBaseType(String('intc')),
value=Integer(10)
)
),
Declaration(
Variable(Symbol('s'),
type=IntBaseType(String('intc'))
)
),
Assignment(
Variable(Symbol('s')),
Mul(
Rational(1, 2),
Symbol('a'),
Add(
Mul(
Integer(2),
Symbol('a')
),
Mul(
Symbol('d'),
Add(
Symbol('n'),
Integer(-1)
)
)
)
)
)
)
)
assert res15[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc'))
)
),
Assignment(
Variable(Symbol('a')),
Integer(1)
)
)
)
assert res16[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(2)
)
),
Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc'))
)
),
Assignment(
Variable(Symbol('b')),
Mod(
Symbol('a'),
Integer(3)
)
)
)
)
assert res17[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(100)
)
),
Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(3)
)
),
Declaration(
Variable(Symbol('c'),
type=IntBaseType(String('intc'))
)
),
Assignment(
Variable(Symbol('c')),
Mod(
Symbol('a'),
Symbol('b')
)
)
)
)
assert res18[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(100)
)
),
Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(3)
)
),
Declaration(
Variable(Symbol('mod'),
type=IntBaseType(String('intc')),
value=Integer(1000000007)
)
),
Declaration(
Variable(Symbol('c'),
type=IntBaseType(String('intc'))
)
),
Assignment(
Variable(Symbol('c')),
Mod(
Add(
Symbol('a'),
Mul(
Integer(100),
Pow(
Symbol('a'),
Integer(-1)
),
Symbol('b')
)
),
Symbol('mod')
)
)
)
)
assert res19[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(100)
)
),
Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(3)
)
),
Declaration(
Variable(Symbol('mod'),
type=IntBaseType(String('intc')),
value=Integer(1000000007)
)
),
Declaration(
Variable(Symbol('c'),
type=IntBaseType(String('intc'))
)
),
Assignment(
Variable(Symbol('c')),
Mod(
Mul(
Add(
Symbol('a'),
Mul(Integer(-1),
Symbol('b')
)
),
Add(
Symbol('a'),
Symbol('b')
)
),
Symbol('mod')
)
)
)
)
assert res20[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('b'),
type=Type(String('bool'))
)
),
Assignment(
Variable(Symbol('a')),
false
),
Assignment(
Variable(Symbol('b')),
true
)
)
)
assert res21[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('b'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('d'),
type=Type(String('bool'))
)
),
Assignment(
Variable(Symbol('a')),
false
),
Assignment(
Variable(Symbol('b')),
true
),
Assignment(
Variable(Symbol('c')),
false
),
Assignment(
Variable(Symbol('d')),
false
)
)
)
assert res22[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(1)
)
),
Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(2)
)
),
Declaration(
Variable(Symbol('c1'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c2'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c3'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c4'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c5'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c6'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c7'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c8'),
type=Type(String('bool'))
)
),
Assignment(
Variable(Symbol('c1')),
Equality(
Symbol('a'),
Integer(1)
)
),
Assignment(
Variable(Symbol('c2')),
Equality(
Symbol('b'),
Integer(2)
)
),
Assignment(
Variable(Symbol('c3')),
Unequality(
Integer(1),
Symbol('a')
)
),
Assignment(
Variable(Symbol('c4')),
Unequality(
Integer(1),
Symbol('b')
)
),
Assignment(
Variable(Symbol('c5')),
StrictLessThan(
Symbol('a'),
Integer(0)
)
),
Assignment(
Variable(Symbol('c6')),
LessThan(
Symbol('b'),
Integer(10)
)
),
Assignment(
Variable(Symbol('c7')),
StrictGreaterThan(
Symbol('a'),
Integer(0)
)
),
Assignment(
Variable(Symbol('c8')),
GreaterThan(
Symbol('b'),
Integer(11)
)
)
)
)
assert res23[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(3)
)
),
Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(4)
)
),
Declaration(
Variable(Symbol('c1'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c2'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c3'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c4'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c5'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c6'),
type=Type(String('bool'))
)
),
Assignment(
Variable(Symbol('c1')),
Equality(
Symbol('a'),
Symbol('b')
)
),
Assignment(
Variable(Symbol('c2')),
Unequality(
Symbol('a'),
Symbol('b')
)
),
Assignment(
Variable(Symbol('c3')),
StrictLessThan(
Symbol('a'),
Symbol('b')
)
),
Assignment(
Variable(Symbol('c4')),
LessThan(
Symbol('a'),
Symbol('b')
)
),
Assignment(
Variable(Symbol('c5')),
StrictGreaterThan(
Symbol('a'),
Symbol('b')
)
),
Assignment(
Variable(Symbol('c6')),
GreaterThan(
Symbol('a'),
Symbol('b')
)
)
)
)
assert res24[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
)
)
),
Declaration(
Variable(Symbol('c1'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c2'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c3'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c4'),
type=Type(String('bool'))
)
),
Assignment(
Variable(Symbol('c1')),
Equality(
Symbol('a'),
Float('1.25', precision=53)
)
),
Assignment(
Variable(Symbol('c3')),
Unequality(
Float('1.2', precision=53),
Symbol('a')
)
)
)
)
assert res25[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('1.25', precision=53)
)
),
Declaration(
Variable(Symbol('b'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('2.5', precision=53)
)
),
Declaration(
Variable(Symbol('c1'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c2'),
type=Type(String('bool')
)
)
),
Declaration(
Variable(Symbol('c3'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c4'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c5'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c6'),
type=Type(String('bool'))
)
),
Assignment(
Variable(Symbol('c1')),
Equality(
Symbol('a'),
Symbol('b')
)
),
Assignment(
Variable(Symbol('c2')),
Unequality(
Symbol('a'),
Symbol('b')
)
),
Assignment(
Variable(Symbol('c3')),
StrictLessThan(
Symbol('a'),
Symbol('b')
)
),
Assignment(
Variable(Symbol('c4')),
LessThan(
Symbol('a'),
Symbol('b')
)
),
Assignment(
Variable(Symbol('c5')),
StrictGreaterThan(
Symbol('a'),
Symbol('b')
)
),
Assignment(
Variable(Symbol('c6')),
GreaterThan(
Symbol('a'),
Symbol('b')
)
)
)
)
assert res26[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(), body=CodeBlock(
Declaration(
Variable(Symbol('c1'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c2'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c3'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c4'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c5'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c6'),
type=Type(String('bool'))
)
),
Assignment(
Variable(Symbol('c1')),
true
),
Assignment(
Variable(Symbol('c2')),
false
),
Assignment(
Variable(Symbol('c3')),
true
),
Assignment(
Variable(Symbol('c4')),
false
),
Assignment(
Variable(Symbol('c5')),
true
),
Assignment(
Variable(Symbol('c6')),
false
)
)
)
assert res27[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('c1'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c2'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c3'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c4'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c5'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c6'),
type=Type(String('bool'))
)
),
Assignment(
Variable(Symbol('c1')),
true
),
Assignment(
Variable(Symbol('c2')),
false
),
Assignment(
Variable(Symbol('c3')),
false
),
Assignment(
Variable(Symbol('c4')),
true
),
Assignment(
Variable(Symbol('c5')),
true
),
Assignment(
Variable(Symbol('c6')),
false)
)
)
assert res28[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c1'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c2'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c3'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c4'),
type=Type(String('bool'))
)
),
Assignment(
Variable(Symbol('c1')),
Symbol('a')
),
Assignment(
Variable(Symbol('c2')),
false
),
Assignment(
Variable(Symbol('c3')),
true
),
Assignment(
Variable(Symbol('c4')),
Symbol('a')
)
)
)
assert res29[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc'))
)
),
Declaration(
Variable(Symbol('c1'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c2'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c3'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c4'),
type=Type(String('bool'))
)
),
Assignment(
Variable(Symbol('c1')),
Symbol('a')
),
Assignment(
Variable(Symbol('c2')),
false
),
Assignment(
Variable(Symbol('c3')),
true
),
Assignment(
Variable(Symbol('c4')),
Symbol('a')
)
)
)
assert res30[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc'))
)
),
Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc'))
)
),
Declaration(
Variable(Symbol('c'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('d'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c1'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c2'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c3'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c4'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c5'),
type=Type(String('bool'))
)
),
Declaration(
Variable(Symbol('c6'),
type=Type(String('bool'))
)
),
Assignment(
Variable(Symbol('c1')),
And(
Symbol('a'),
Symbol('b')
)
),
Assignment(
Variable(Symbol('c2')),
And(
Symbol('a'),
Symbol('c')
)
),
Assignment(
Variable(Symbol('c3')),
And(
Symbol('c'),
Symbol('d')
)
),
Assignment(
Variable(Symbol('c4')),
Or(
Symbol('a'),
Symbol('b')
)
),
Assignment(
Variable(Symbol('c5')),
Or(
Symbol('a'),
Symbol('c')
)
),
Assignment(
Variable(Symbol('c6')),
Or(
Symbol('c'),
Symbol('d')
)
)
)
)
raises(NotImplementedError, lambda: SymPyExpression(c_src_raise1, 'c'))
raises(NotImplementedError, lambda: SymPyExpression(c_src_raise2, 'c'))
raises(NotImplementedError, lambda: SymPyExpression(c_src_raise3, 'c'))
raises(NotImplementedError, lambda: SymPyExpression(c_src_raise4, 'c'))
raises(NotImplementedError, lambda: SymPyExpression(c_src_raise5, 'c'))
@XFAIL
def test_var_decl():
c_src1 = (
'int b = 100;' + '\n' +
'int a = b;' + '\n'
)
c_src2 = (
'int a = 1;' + '\n' +
'int b = a + 1;' + '\n'
)
c_src3 = (
'float a = 10.0 + 2.5;' + '\n' +
'float b = a * 20.0;' + '\n'
)
c_src4 = (
'int a = 1 + 100 - 3 * 6;' + '\n'
)
c_src5 = (
'int a = (((1 + 100) * 12) - 3) * (6 - 10);' + '\n'
)
c_src6 = (
'int b = 2;' + '\n' +
'int c = 3;' + '\n' +
'int a = b + c * 4;' + '\n'
)
c_src7 = (
'int b = 1;' + '\n' +
'int c = b + 2;' + '\n' +
'int a = 10 * b * b * c;' + '\n'
)
c_src8 = (
'void func()'+
'{' + '\n' +
'int a = 1;' + '\n' +
'int b = 2;' + '\n' +
'int temp = a;' + '\n' +
'a = b;' + '\n' +
'b = temp;' + '\n' +
'}'
)
c_src9 = (
'int a = 1;' + '\n' +
'int b = 2;' + '\n' +
'int c = a;' + '\n' +
'int d = a + b + c;' + '\n' +
'int e = a*a*a + 3*a*a*b + 3*a*b*b + b*b*b;' + '\n'
'int f = (a + b + c) * (a + b - c);' + '\n' +
'int g = (a + b + c + d)*(a + b + c + d)*(a * (b - c));'
+ '\n'
)
c_src10 = (
'float a = 10.0;' + '\n' +
'float b = 2.5;' + '\n' +
'float c = a*a + 2*a*b + b*b;' + '\n'
)
c_src11 = (
'float a = 10.0 / 2.5;' + '\n'
)
c_src12 = (
'int a = 100 / 4;' + '\n'
)
c_src13 = (
'int a = 20 - 100 / 4 * 5 + 10;' + '\n'
)
c_src14 = (
'int a = (20 - 100) / 4 * (5 + 10);' + '\n'
)
c_src15 = (
'int a = 4;' + '\n' +
'int b = 2;' + '\n' +
'float c = b/a;' + '\n'
)
c_src16 = (
'int a = 2;' + '\n' +
'int d = 5;' + '\n' +
'int n = 10;' + '\n' +
'int s = (a/2)*(2*a + (n-1)*d);' + '\n'
)
c_src17 = (
'int a = 1 % 2;' + '\n'
)
c_src18 = (
'int a = 2;' + '\n' +
'int b = a % 3;' + '\n'
)
c_src19 = (
'int a = 100;' + '\n' +
'int b = 3;' + '\n' +
'int c = a % b;' + '\n'
)
c_src20 = (
'int a = 100;' + '\n' +
'int b = 3;' + '\n' +
'int mod = 1000000007;' + '\n' +
'int c = (a + b * (100/a)) % mod;' + '\n'
)
c_src21 = (
'int a = 100;' + '\n' +
'int b = 3;' + '\n' +
'int mod = 1000000007;' + '\n' +
'int c = ((a % mod + b % mod) % mod *(' \
'a % mod - b % mod) % mod) % mod;' + '\n'
)
c_src22 = (
'bool a = 1 == 2, b = 1 != 2;'
)
c_src23 = (
'bool a = 1 < 2, b = 1 <= 2, c = 1 > 2, d = 1 >= 2;'
)
c_src24 = (
'int a = 1, b = 2;' + '\n' +
'bool c1 = a == 1;' + '\n' +
'bool c2 = b == 2;' + '\n' +
'bool c3 = 1 != a;' + '\n' +
'bool c4 = 1 != b;' + '\n' +
'bool c5 = a < 0;' + '\n' +
'bool c6 = b <= 10;' + '\n' +
'bool c7 = a > 0;' + '\n' +
'bool c8 = b >= 11;'
)
c_src25 = (
'int a = 3, b = 4;' + '\n' +
'bool c1 = a == b;' + '\n' +
'bool c2 = a != b;' + '\n' +
'bool c3 = a < b;' + '\n' +
'bool c4 = a <= b;' + '\n' +
'bool c5 = a > b;' + '\n' +
'bool c6 = a >= b;'
)
c_src26 = (
'float a = 1.25, b = 2.5;' + '\n' +
'bool c1 = a == 1.25;' + '\n' +
'bool c2 = b == 2.54;' + '\n' +
'bool c3 = 1.2 != a;' + '\n' +
'bool c4 = 1.5 != b;'
)
c_src27 = (
'float a = 1.25, b = 2.5;' + '\n' +
'bool c1 = a == b;' + '\n' +
'bool c2 = a != b;' + '\n' +
'bool c3 = a < b;' + '\n' +
'bool c4 = a <= b;' + '\n' +
'bool c5 = a > b;' + '\n' +
'bool c6 = a >= b;'
)
c_src28 = (
'bool c1 = true == true;' + '\n' +
'bool c2 = true == false;' + '\n' +
'bool c3 = false == false;' + '\n' +
'bool c4 = true != true;' + '\n' +
'bool c5 = true != false;' + '\n' +
'bool c6 = false != false;'
)
c_src29 = (
'bool c1 = true && true;' + '\n' +
'bool c2 = true && false;' + '\n' +
'bool c3 = false && false;' + '\n' +
'bool c4 = true || true;' + '\n' +
'bool c5 = true || false;' + '\n' +
'bool c6 = false || false;'
)
c_src30 = (
'bool a = false;' + '\n' +
'bool c1 = a && true;' + '\n' +
'bool c2 = false && a;' + '\n' +
'bool c3 = true || a;' + '\n' +
'bool c4 = a || false;'
)
c_src31 = (
'int a = 1;' + '\n' +
'bool c1 = a && 1;' + '\n' +
'bool c2 = a && 0;' + '\n' +
'bool c3 = a || 1;' + '\n' +
'bool c4 = 0 || a;'
)
c_src32 = (
'int a = 1, b = 0;' + '\n' +
'bool c = false, d = true;'+ '\n' +
'bool c1 = a && b;' + '\n' +
'bool c2 = a && c;' + '\n' +
'bool c3 = c && d;' + '\n' +
'bool c4 = a || b;' + '\n' +
'bool c5 = a || c;' + '\n' +
'bool c6 = c || d;'
)
c_src_raise1 = (
"char a = 'b';"
)
c_src_raise2 = (
'int a[] = {10, 20};'
)
res1 = SymPyExpression(c_src1, 'c').return_expr()
res2 = SymPyExpression(c_src2, 'c').return_expr()
res3 = SymPyExpression(c_src3, 'c').return_expr()
res4 = SymPyExpression(c_src4, 'c').return_expr()
res5 = SymPyExpression(c_src5, 'c').return_expr()
res6 = SymPyExpression(c_src6, 'c').return_expr()
res7 = SymPyExpression(c_src7, 'c').return_expr()
res8 = SymPyExpression(c_src8, 'c').return_expr()
res9 = SymPyExpression(c_src9, 'c').return_expr()
res10 = SymPyExpression(c_src10, 'c').return_expr()
res11 = SymPyExpression(c_src11, 'c').return_expr()
res12 = SymPyExpression(c_src12, 'c').return_expr()
res13 = SymPyExpression(c_src13, 'c').return_expr()
res14 = SymPyExpression(c_src14, 'c').return_expr()
res15 = SymPyExpression(c_src15, 'c').return_expr()
res16 = SymPyExpression(c_src16, 'c').return_expr()
res17 = SymPyExpression(c_src17, 'c').return_expr()
res18 = SymPyExpression(c_src18, 'c').return_expr()
res19 = SymPyExpression(c_src19, 'c').return_expr()
res20 = SymPyExpression(c_src20, 'c').return_expr()
res21 = SymPyExpression(c_src21, 'c').return_expr()
res22 = SymPyExpression(c_src22, 'c').return_expr()
res23 = SymPyExpression(c_src23, 'c').return_expr()
res24 = SymPyExpression(c_src24, 'c').return_expr()
res25 = SymPyExpression(c_src25, 'c').return_expr()
res26 = SymPyExpression(c_src26, 'c').return_expr()
res27 = SymPyExpression(c_src27, 'c').return_expr()
res28 = SymPyExpression(c_src28, 'c').return_expr()
res29 = SymPyExpression(c_src29, 'c').return_expr()
res30 = SymPyExpression(c_src30, 'c').return_expr()
res31 = SymPyExpression(c_src31, 'c').return_expr()
res32 = SymPyExpression(c_src32, 'c').return_expr()
assert res1[0] == Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(100)
)
)
assert res1[1] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Symbol('b')
)
)
assert res2[0] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(1)
)
)
assert res2[1] == Declaration(Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Add(
Symbol('a'),
Integer(1)
)
)
)
assert res3[0] == Declaration(
Variable(Symbol('a'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('12.5', precision=53)
)
)
assert res3[1] == Declaration(
Variable(Symbol('b'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Mul(
Float('20.0', precision=53),
Symbol('a')
)
)
)
assert res4[0] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(83)
)
)
assert res5[0] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(-4836)
)
)
assert res6[0] == Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(2)
)
)
assert res6[1] == Declaration(
Variable(Symbol('c'),
type=IntBaseType(String('intc')),
value=Integer(3)
)
)
assert res6[2] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Add(
Symbol('b'),
Mul(
Integer(4),
Symbol('c')
)
)
)
)
assert res7[0] == Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(1)
)
)
assert res7[1] == Declaration(
Variable(Symbol('c'),
type=IntBaseType(String('intc')),
value=Add(
Symbol('b'),
Integer(2)
)
)
)
assert res7[2] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Mul(
Integer(10),
Pow(
Symbol('b'),
Integer(2)
),
Symbol('c')
)
)
)
assert res8[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(1)
)
),
Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(2)
)
),
Declaration(
Variable(Symbol('temp'),
type=IntBaseType(String('intc')),
value=Symbol('a')
)
),
Assignment(
Variable(Symbol('a')),
Symbol('b')
),
Assignment(
Variable(Symbol('b')),
Symbol('temp')
)
)
)
assert res9[0] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(1)
)
)
assert res9[1] == Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(2)
)
)
assert res9[2] == Declaration(
Variable(Symbol('c'),
type=IntBaseType(String('intc')),
value=Symbol('a')
)
)
assert res9[3] == Declaration(
Variable(Symbol('d'),
type=IntBaseType(String('intc')),
value=Add(
Symbol('a'),
Symbol('b'),
Symbol('c')
)
)
)
assert res9[4] == Declaration(
Variable(Symbol('e'),
type=IntBaseType(String('intc')),
value=Add(
Pow(
Symbol('a'),
Integer(3)
),
Mul(
Integer(3),
Pow(
Symbol('a'),
Integer(2)
),
Symbol('b')
),
Mul(
Integer(3),
Symbol('a'),
Pow(
Symbol('b'),
Integer(2)
)
),
Pow(
Symbol('b'),
Integer(3)
)
)
)
)
assert res9[5] == Declaration(
Variable(Symbol('f'),
type=IntBaseType(String('intc')),
value=Mul(
Add(
Symbol('a'),
Symbol('b'),
Mul(
Integer(-1),
Symbol('c')
)
),
Add(
Symbol('a'),
Symbol('b'),
Symbol('c')
)
)
)
)
assert res9[6] == Declaration(
Variable(Symbol('g'),
type=IntBaseType(String('intc')),
value=Mul(
Symbol('a'),
Add(
Symbol('b'),
Mul(
Integer(-1),
Symbol('c')
)
),
Pow(
Add(
Symbol('a'),
Symbol('b'),
Symbol('c'),
Symbol('d')
),
Integer(2)
)
)
)
)
assert res10[0] == Declaration(
Variable(Symbol('a'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('10.0', precision=53)
)
)
assert res10[1] == Declaration(
Variable(Symbol('b'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('2.5', precision=53)
)
)
assert res10[2] == Declaration(
Variable(Symbol('c'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Add(
Pow(
Symbol('a'),
Integer(2)
),
Mul(
Integer(2),
Symbol('a'),
Symbol('b')
),
Pow(
Symbol('b'),
Integer(2)
)
)
)
)
assert res11[0] == Declaration(
Variable(Symbol('a'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('4.0', precision=53)
)
)
assert res12[0] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(25)
)
)
assert res13[0] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(-95)
)
)
assert res14[0] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(-300)
)
)
assert res15[0] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(4)
)
)
assert res15[1] == Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(2)
)
)
assert res15[2] == Declaration(
Variable(Symbol('c'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Mul(
Pow(
Symbol('a'),
Integer(-1)
),
Symbol('b')
)
)
)
assert res16[0] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(2)
)
)
assert res16[1] == Declaration(
Variable(Symbol('d'),
type=IntBaseType(String('intc')),
value=Integer(5)
)
)
assert res16[2] == Declaration(
Variable(Symbol('n'),
type=IntBaseType(String('intc')),
value=Integer(10)
)
)
assert res16[3] == Declaration(
Variable(Symbol('s'),
type=IntBaseType(String('intc')),
value=Mul(
Rational(1, 2),
Symbol('a'),
Add(
Mul(
Integer(2),
Symbol('a')
),
Mul(
Symbol('d'),
Add(
Symbol('n'),
Integer(-1)
)
)
)
)
)
)
assert res17[0] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(1)
)
)
assert res18[0] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(2)
)
)
assert res18[1] == Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Mod(
Symbol('a'),
Integer(3)
)
)
)
assert res19[0] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(100)
)
)
assert res19[1] == Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(3)
)
)
assert res19[2] == Declaration(
Variable(Symbol('c'),
type=IntBaseType(String('intc')),
value=Mod(
Symbol('a'),
Symbol('b')
)
)
)
assert res20[0] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(100)
)
)
assert res20[1] == Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(3)
)
)
assert res20[2] == Declaration(
Variable(Symbol('mod'),
type=IntBaseType(String('intc')),
value=Integer(1000000007)
)
)
assert res20[3] == Declaration(
Variable(Symbol('c'),
type=IntBaseType(String('intc')),
value=Mod(
Add(
Symbol('a'),
Mul(
Integer(100),
Pow(
Symbol('a'),
Integer(-1)
),
Symbol('b')
)
),
Symbol('mod')
)
)
)
assert res21[0] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(100)
)
)
assert res21[1] == Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(3)
)
)
assert res21[2] == Declaration(
Variable(Symbol('mod'),
type=IntBaseType(String('intc')),
value=Integer(1000000007)
)
)
assert res21[3] == Declaration(
Variable(Symbol('c'),
type=IntBaseType(String('intc')),
value=Mod(
Mul(
Add(
Symbol('a'),
Mul(
Integer(-1),
Symbol('b')
)
),
Add(
Symbol('a'),
Symbol('b')
)
),
Symbol('mod')
)
)
)
assert res22[0] == Declaration(
Variable(Symbol('a'),
type=Type(String('bool')),
value=false
)
)
assert res22[1] == Declaration(
Variable(Symbol('b'),
type=Type(String('bool')),
value=true
)
)
assert res23[0] == Declaration(
Variable(Symbol('a'),
type=Type(String('bool')),
value=true
)
)
assert res23[1] == Declaration(
Variable(Symbol('b'),
type=Type(String('bool')),
value=true
)
)
assert res23[2] == Declaration(
Variable(Symbol('c'),
type=Type(String('bool')),
value=false
)
)
assert res23[3] == Declaration(
Variable(Symbol('d'),
type=Type(String('bool')),
value=false
)
)
assert res24[0] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(1)
)
)
assert res24[1] == Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(2)
)
)
assert res24[2] == Declaration(
Variable(Symbol('c1'),
type=Type(String('bool')),
value=Equality(
Symbol('a'),
Integer(1)
)
)
)
assert res24[3] == Declaration(
Variable(Symbol('c2'),
type=Type(String('bool')),
value=Equality(
Symbol('b'),
Integer(2)
)
)
)
assert res24[4] == Declaration(
Variable(Symbol('c3'),
type=Type(String('bool')),
value=Unequality(
Integer(1),
Symbol('a')
)
)
)
assert res24[5] == Declaration(
Variable(Symbol('c4'),
type=Type(String('bool')),
value=Unequality(
Integer(1),
Symbol('b')
)
)
)
assert res24[6] == Declaration(
Variable(Symbol('c5'),
type=Type(String('bool')),
value=StrictLessThan(Symbol('a'),
Integer(0)
)
)
)
assert res24[7] == Declaration(
Variable(Symbol('c6'),
type=Type(String('bool')),
value=LessThan(
Symbol('b'),
Integer(10)
)
)
)
assert res24[8] == Declaration(
Variable(Symbol('c7'),
type=Type(String('bool')),
value=StrictGreaterThan(
Symbol('a'),
Integer(0)
)
)
)
assert res24[9] == Declaration(
Variable(Symbol('c8'),
type=Type(String('bool')),
value=GreaterThan(
Symbol('b'),
Integer(11)
)
)
)
assert res25[0] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(3)
)
)
assert res25[1] == Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(4)
)
)
assert res25[2] == Declaration(Variable(Symbol('c1'),
type=Type(String('bool')),
value=Equality(
Symbol('a'),
Symbol('b')
)
)
)
assert res25[3] == Declaration(
Variable(Symbol('c2'),
type=Type(String('bool')),
value=Unequality(
Symbol('a'),
Symbol('b')
)
)
)
assert res25[4] == Declaration(
Variable(Symbol('c3'),
type=Type(String('bool')),
value=StrictLessThan(
Symbol('a'),
Symbol('b')
)
)
)
assert res25[5] == Declaration(
Variable(Symbol('c4'),
type=Type(String('bool')),
value=LessThan(
Symbol('a'),
Symbol('b')
)
)
)
assert res25[6] == Declaration(
Variable(Symbol('c5'),
type=Type(String('bool')),
value=StrictGreaterThan(
Symbol('a'),
Symbol('b')
)
)
)
assert res25[7] == Declaration(
Variable(Symbol('c6'),
type=Type(String('bool')),
value=GreaterThan(
Symbol('a'),
Symbol('b')
)
)
)
assert res26[0] == Declaration(
Variable(Symbol('a'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('1.25', precision=53)
)
)
assert res26[1] == Declaration(
Variable(Symbol('b'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('2.5', precision=53)
)
)
assert res26[2] == Declaration(
Variable(Symbol('c1'),
type=Type(String('bool')),
value=Equality(
Symbol('a'),
Float('1.25', precision=53)
)
)
)
assert res26[3] == Declaration(
Variable(Symbol('c2'),
type=Type(String('bool')),
value=Equality(
Symbol('b'),
Float('2.54', precision=53)
)
)
)
assert res26[4] == Declaration(
Variable(Symbol('c3'),
type=Type(String('bool')),
value=Unequality(
Float('1.2', precision=53),
Symbol('a')
)
)
)
assert res26[5] == Declaration(
Variable(Symbol('c4'),
type=Type(String('bool')),
value=Unequality(
Float('1.5', precision=53),
Symbol('b')
)
)
)
assert res27[0] == Declaration(
Variable(Symbol('a'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('1.25', precision=53)
)
)
assert res27[1] == Declaration(
Variable(Symbol('b'),
type=FloatType(
String('float32'),
nbits=Integer(32),
nmant=Integer(23),
nexp=Integer(8)
),
value=Float('2.5', precision=53)
)
)
assert res27[2] == Declaration(
Variable(Symbol('c1'),
type=Type(String('bool')),
value=Equality(
Symbol('a'),
Symbol('b')
)
)
)
assert res27[3] == Declaration(
Variable(Symbol('c2'),
type=Type(String('bool')),
value=Unequality(
Symbol('a'),
Symbol('b')
)
)
)
assert res27[4] == Declaration(
Variable(Symbol('c3'),
type=Type(String('bool')),
value=StrictLessThan(
Symbol('a'),
Symbol('b')
)
)
)
assert res27[5] == Declaration(
Variable(Symbol('c4'),
type=Type(String('bool')),
value=LessThan(
Symbol('a'),
Symbol('b')
)
)
)
assert res27[6] == Declaration(
Variable(Symbol('c5'),
type=Type(String('bool')),
value=StrictGreaterThan(
Symbol('a'),
Symbol('b')
)
)
)
assert res27[7] == Declaration(
Variable(Symbol('c6'),
type=Type(String('bool')),
value=GreaterThan(
Symbol('a'),
Symbol('b')
)
)
)
assert res28[0] == Declaration(
Variable(Symbol('c1'),
type=Type(String('bool')),
value=true
)
)
assert res28[1] == Declaration(
Variable(Symbol('c2'),
type=Type(String('bool')),
value=false
)
)
assert res28[2] == Declaration(
Variable(Symbol('c3'),
type=Type(String('bool')),
value=true
)
)
assert res28[3] == Declaration(
Variable(Symbol('c4'),
type=Type(String('bool')),
value=false
)
)
assert res28[4] == Declaration(
Variable(Symbol('c5'),
type=Type(String('bool')),
value=true
)
)
assert res28[5] == Declaration(
Variable(Symbol('c6'),
type=Type(String('bool')),
value=false
)
)
assert res29[0] == Declaration(
Variable(Symbol('c1'),
type=Type(String('bool')),
value=true
)
)
assert res29[1] == Declaration(
Variable(Symbol('c2'),
type=Type(String('bool')),
value=false
)
)
assert res29[2] == Declaration(
Variable(Symbol('c3'),
type=Type(String('bool')),
value=false
)
)
assert res29[3] == Declaration(
Variable(Symbol('c4'),
type=Type(String('bool')),
value=true
)
)
assert res29[4] == Declaration(
Variable(Symbol('c5'),
type=Type(String('bool')),
value=true
)
)
assert res29[5] == Declaration(
Variable(Symbol('c6'),
type=Type(String('bool')),
value=false
)
)
assert res30[0] == Declaration(
Variable(Symbol('a'),
type=Type(String('bool')),
value=false
)
)
assert res30[1] == Declaration(
Variable(Symbol('c1'),
type=Type(String('bool')),
value=Symbol('a')
)
)
assert res30[2] == Declaration(
Variable(Symbol('c2'),
type=Type(String('bool')),
value=false
)
)
assert res30[3] == Declaration(
Variable(Symbol('c3'),
type=Type(String('bool')),
value=true
)
)
assert res30[4] == Declaration(
Variable(Symbol('c4'),
type=Type(String('bool')),
value=Symbol('a')
)
)
assert res31[0] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(1)
)
)
assert res31[1] == Declaration(
Variable(Symbol('c1'),
type=Type(String('bool')),
value=Symbol('a')
)
)
assert res31[2] == Declaration(
Variable(Symbol('c2'),
type=Type(String('bool')),
value=false
)
)
assert res31[3] == Declaration(
Variable(Symbol('c3'),
type=Type(String('bool')),
value=true
)
)
assert res31[4] == Declaration(
Variable(Symbol('c4'),
type=Type(String('bool')),
value=Symbol('a')
)
)
assert res32[0] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(1)
)
)
assert res32[1] == Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(0)
)
)
assert res32[2] == Declaration(
Variable(Symbol('c'),
type=Type(String('bool')),
value=false
)
)
assert res32[3] == Declaration(
Variable(Symbol('d'),
type=Type(String('bool')),
value=true
)
)
assert res32[4] == Declaration(
Variable(Symbol('c1'),
type=Type(String('bool')),
value=And(
Symbol('a'),
Symbol('b')
)
)
)
assert res32[5] == Declaration(
Variable(Symbol('c2'),
type=Type(String('bool')),
value=And(
Symbol('a'),
Symbol('c')
)
)
)
assert res32[6] == Declaration(
Variable(Symbol('c3'),
type=Type(String('bool')),
value=And(
Symbol('c'),
Symbol('d')
)
)
)
assert res32[7] == Declaration(
Variable(Symbol('c4'),
type=Type(String('bool')),
value=Or(
Symbol('a'),
Symbol('b')
)
)
)
assert res32[8] == Declaration(
Variable(Symbol('c5'),
type=Type(String('bool')),
value=Or(
Symbol('a'),
Symbol('c')
)
)
)
assert res32[9] == Declaration(
Variable(Symbol('c6'),
type=Type(String('bool')),
value=Or(
Symbol('c'),
Symbol('d')
)
)
)
raises(NotImplementedError, lambda: SymPyExpression(c_src_raise1, 'c'))
raises(NotImplementedError, lambda: SymPyExpression(c_src_raise2, 'c'))
def test_paren_expr():
c_src1 = (
'int a = (1);'
'int b = (1 + 2 * 3);'
)
c_src2 = (
'int a = 1, b = 2, c = 3;'
'int d = (a);'
'int e = (a + 1);'
'int f = (a + b * c - d / e);'
)
res1 = SymPyExpression(c_src1, 'c').return_expr()
res2 = SymPyExpression(c_src2, 'c').return_expr()
assert res1[0] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(1)
)
)
assert res1[1] == Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(7)
)
)
assert res2[0] == Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(1)
)
)
assert res2[1] == Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(2)
)
)
assert res2[2] == Declaration(
Variable(Symbol('c'),
type=IntBaseType(String('intc')),
value=Integer(3)
)
)
assert res2[3] == Declaration(
Variable(Symbol('d'),
type=IntBaseType(String('intc')),
value=Symbol('a')
)
)
assert res2[4] == Declaration(
Variable(Symbol('e'),
type=IntBaseType(String('intc')),
value=Add(
Symbol('a'),
Integer(1)
)
)
)
assert res2[5] == Declaration(
Variable(Symbol('f'),
type=IntBaseType(String('intc')),
value=Add(
Symbol('a'),
Mul(
Symbol('b'),
Symbol('c')
),
Mul(
Integer(-1),
Symbol('d'),
Pow(
Symbol('e'),
Integer(-1)
)
)
)
)
)
def test_unary_operators():
c_src1 = (
'void func()'+
'{' + '\n' +
'int a = 10;' + '\n' +
'int b = 20;' + '\n' +
'++a;' + '\n' +
'--b;' + '\n' +
'a++;' + '\n' +
'b--;' + '\n' +
'}'
)
c_src2 = (
'void func()'+
'{' + '\n' +
'int a = 10;' + '\n' +
'int b = -100;' + '\n' +
'int c = +19;' + '\n' +
'int d = ++a;' + '\n' +
'int e = --b;' + '\n' +
'int f = a++;' + '\n' +
'int g = b--;' + '\n' +
'bool h = !false;' + '\n' +
'bool i = !d;' + '\n' +
'bool j = !0;' + '\n' +
'bool k = !10.0;' + '\n' +
'}'
)
c_src_raise1 = (
'void func()'+
'{' + '\n' +
'int a = 10;' + '\n' +
'int b = ~a;' + '\n' +
'}'
)
c_src_raise2 = (
'void func()'+
'{' + '\n' +
'int a = 10;' + '\n' +
'int b = *&a;' + '\n' +
'}'
)
res1 = SymPyExpression(c_src1, 'c').return_expr()
res2 = SymPyExpression(c_src2, 'c').return_expr()
assert res1[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(10)
)
),
Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(20)
)
),
PreIncrement(Symbol('a')),
PreDecrement(Symbol('b')),
PostIncrement(Symbol('a')),
PostDecrement(Symbol('b'))
)
)
assert res2[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(10)
)
),
Declaration(
Variable(Symbol('b'),
type=IntBaseType(String('intc')),
value=Integer(-100)
)
),
Declaration(
Variable(Symbol('c'),
type=IntBaseType(String('intc')),
value=Integer(19)
)
),
Declaration(
Variable(Symbol('d'),
type=IntBaseType(String('intc')),
value=PreIncrement(Symbol('a'))
)
),
Declaration(
Variable(Symbol('e'),
type=IntBaseType(String('intc')),
value=PreDecrement(Symbol('b'))
)
),
Declaration(
Variable(Symbol('f'),
type=IntBaseType(String('intc')),
value=PostIncrement(Symbol('a'))
)
),
Declaration(
Variable(Symbol('g'),
type=IntBaseType(String('intc')),
value=PostDecrement(Symbol('b'))
)
),
Declaration(
Variable(Symbol('h'),
type=Type(String('bool')),
value=true
)
),
Declaration(
Variable(Symbol('i'),
type=Type(String('bool')),
value=Not(Symbol('d'))
)
),
Declaration(
Variable(Symbol('j'),
type=Type(String('bool')),
value=true
)
),
Declaration(
Variable(Symbol('k'),
type=Type(String('bool')),
value=false
)
)
)
)
raises(NotImplementedError, lambda: SymPyExpression(c_src_raise1, 'c'))
raises(NotImplementedError, lambda: SymPyExpression(c_src_raise2, 'c'))
def test_compound_assignment_operator():
c_src = (
'void func()'+
'{' + '\n' +
'int a = 100;' + '\n' +
'a += 10;' + '\n' +
'a -= 10;' + '\n' +
'a *= 10;' + '\n' +
'a /= 10;' + '\n' +
'a %= 10;' + '\n' +
'}'
)
res = SymPyExpression(c_src, 'c').return_expr()
assert res[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(
Symbol('a'),
type=IntBaseType(String('intc')),
value=Integer(100)
)
),
AddAugmentedAssignment(
Variable(Symbol('a')),
Integer(10)
),
SubAugmentedAssignment(
Variable(Symbol('a')),
Integer(10)
),
MulAugmentedAssignment(
Variable(Symbol('a')),
Integer(10)
),
DivAugmentedAssignment(
Variable(Symbol('a')),
Integer(10)
),
ModAugmentedAssignment(
Variable(Symbol('a')),
Integer(10)
)
)
)
def test_while_stmt():
c_src1 = (
'void func()'+
'{' + '\n' +
'int i = 0;' + '\n' +
'while(i < 10)' + '\n' +
'{' + '\n' +
'i++;' + '\n' +
'}'
'}'
)
c_src2 = (
'void func()'+
'{' + '\n' +
'int i = 0;' + '\n' +
'while(i < 10)' + '\n' +
'i++;' + '\n' +
'}'
)
c_src3 = (
'void func()'+
'{' + '\n' +
'int i = 10;' + '\n' +
'int cnt = 0;' + '\n' +
'while(i > 0)' + '\n' +
'{' + '\n' +
'i--;' + '\n' +
'cnt++;' + '\n' +
'}' + '\n' +
'}'
)
c_src4 = (
'int digit_sum(int n)'+
'{' + '\n' +
'int sum = 0;' + '\n' +
'while(n > 0)' + '\n' +
'{' + '\n' +
'sum += (n % 10);' + '\n' +
'n /= 10;' + '\n' +
'}' + '\n' +
'return sum;' + '\n' +
'}'
)
c_src5 = (
'void func()'+
'{' + '\n' +
'while(1);' + '\n' +
'}'
)
res1 = SymPyExpression(c_src1, 'c').return_expr()
res2 = SymPyExpression(c_src2, 'c').return_expr()
res3 = SymPyExpression(c_src3, 'c').return_expr()
res4 = SymPyExpression(c_src4, 'c').return_expr()
res5 = SymPyExpression(c_src5, 'c').return_expr()
assert res1[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(Symbol('i'),
type=IntBaseType(String('intc')),
value=Integer(0)
)
),
While(
StrictLessThan(
Symbol('i'),
Integer(10)
),
body=CodeBlock(
PostIncrement(
Symbol('i')
)
)
)
)
)
assert res2[0] == res1[0]
assert res3[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
Declaration(
Variable(
Symbol('i'),
type=IntBaseType(String('intc')),
value=Integer(10)
)
),
Declaration(
Variable(
Symbol('cnt'),
type=IntBaseType(String('intc')),
value=Integer(0)
)
),
While(
StrictGreaterThan(
Symbol('i'),
Integer(0)
),
body=CodeBlock(
PostDecrement(
Symbol('i')
),
PostIncrement(
Symbol('cnt')
)
)
)
)
)
assert res4[0] == FunctionDefinition(
IntBaseType(String('intc')),
name=String('digit_sum'),
parameters=(
Variable(
Symbol('n'),
type=IntBaseType(String('intc'))
),
),
body=CodeBlock(
Declaration(
Variable(
Symbol('sum'),
type=IntBaseType(String('intc')),
value=Integer(0)
)
),
While(
StrictGreaterThan(
Symbol('n'),
Integer(0)
),
body=CodeBlock(
AddAugmentedAssignment(
Variable(
Symbol('sum')
),
Mod(
Symbol('n'),
Integer(10)
)
),
DivAugmentedAssignment(
Variable(
Symbol('n')
),
Integer(10)
)
)
),
Return('sum')
)
)
assert res5[0] == FunctionDefinition(
NoneToken(),
name=String('func'),
parameters=(),
body=CodeBlock(
While(
Integer(1),
body=CodeBlock(
NoneToken()
)
)
)
)
else:
def test_raise():
from sympy.parsing.c.c_parser import CCodeConverter
raises(ImportError, lambda: CCodeConverter())
raises(ImportError, lambda: SymPyExpression(' ', mode = 'c'))
| [
"[email protected]"
] | |
c72d6c6b6adc5ad4348b71de4fe532736a5db64c | 26ca1e0906feece27896bd267a1f58882fcb0513 | /lessons/12.12.2019/zipfunction-demo.py | 253272c92304aed5021a4a824beef37c50137168 | [] | no_license | icecore2/python-training2019 | 092984c6dec1b05e70f9f899ee213d126c45ff63 | ee39f93adabab506c9eef68c5e686ddb59953de9 | refs/heads/master | 2020-09-02T21:19:27.959213 | 2020-04-23T20:06:08 | 2020-04-23T20:06:08 | 219,306,742 | 0 | 2 | null | 2020-01-17T15:07:06 | 2019-11-03T13:40:56 | Python | UTF-8 | Python | false | false | 181 | py | names = ["Carpetta", "Shalva", "Arona"]
prices = [990,1990,80]
data = zip(names, prices)
print(data)
print(type(data))
print("------------------")
for ob in data:
print(ob) | [
"[email protected]"
] | |
6b1f643b86225f77adeb9ea7b55566123779f3d2 | a140fe192fd643ce556fa34bf2f84ddbdb97f091 | /.history/파일입출력02_20200705144347.py | 8558bc1c70efc04967999b44f14cca489e0fd51c | [] | no_license | sangha0719/py-practice | 826f13cb422ef43992a69f822b9f04c2cb6d4815 | 6d71ce64bf91cc3bccee81378577d84ba9d9c121 | refs/heads/master | 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | # score_file = open("score.txt", "r", encoding="utf8")
# while True:
# line = score_file.readline()
# if not line:
# break
# print(line, end="")
# score_file.close()
score_file = open("score.txt", "r", encoding="utf8")
d="")
score_file.close() | [
"[email protected]"
] | |
6d7b55b2264e8161c0ba4c4e3120b5ec27d882dd | 36bab4f5fd13efadd53e8a9eb5060945c36cf3fd | /src/utils/model_info.py | c25771bbfdf7d5c768b878e4a6b891aa5a55897f | [] | no_license | CheungBH/TimeSequenceProcess | a19530133a84518a472fd9693f2d13287eef632a | 8690cbc6c371bccc37c2e1c1ecd58cd5e69018b4 | refs/heads/master | 2022-07-16T10:52:13.885456 | 2020-08-07T13:37:58 | 2020-08-07T13:37:58 | 242,521,733 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,503 | py | # Code from https://github.com/simochen/model-tools.
import numpy as np
import time
import torch
import torchvision
from torch.autograd import Variable
from config.config import device
def print_model_param_nums(model, multiply_adds=True):
total = sum([param.nelement() for param in model.parameters()])
return total
def print_model_param_flops(model=None, input_height=224, input_width=224, multiply_adds=True):
prods = {}
def save_hook(name):
def hook_per(self, input, output):
prods[name] = np.prod(input[0].shape)
return hook_per
list_1=[]
def simple_hook(self, input, output):
list_1.append(np.prod(input[0].shape))
list_2={}
def simple_hook2(self, input, output):
list_2['names'] = np.prod(input[0].shape)
list_conv=[]
def conv_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = (kernel_ops * (2 if multiply_adds else 1) + bias_ops) * output_channels * output_height * output_width * batch_size
list_conv.append(flops)
list_linear=[]
def linear_hook(self, input, output):
batch_size = input[0].size(0) if input[0].dim() == 2 else 1
weight_ops = self.weight.nelement() * (2 if multiply_adds else 1)
bias_ops = self.bias.nelement()
flops = batch_size * (weight_ops + bias_ops)
list_linear.append(flops)
list_bn=[]
def bn_hook(self, input, output):
list_bn.append(input[0].nelement() * 2)
list_relu=[]
def relu_hook(self, input, output):
list_relu.append(input[0].nelement())
list_pooling=[]
def pooling_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size * self.kernel_size
bias_ops = 0
params = 0
flops = (kernel_ops + bias_ops) * output_channels * output_height * output_width * batch_size
list_pooling.append(flops)
list_upsample=[]
# For bilinear upsample
def upsample_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
flops = output_height * output_width * output_channels * batch_size * 12
list_upsample.append(flops)
def foo(net):
childrens = list(net.children())
if not childrens:
if isinstance(net, torch.nn.Conv2d):
net.register_forward_hook(conv_hook)
if isinstance(net, torch.nn.Linear):
net.register_forward_hook(linear_hook)
if isinstance(net, torch.nn.BatchNorm2d):
net.register_forward_hook(bn_hook)
if isinstance(net, torch.nn.ReLU):
net.register_forward_hook(relu_hook)
if isinstance(net, torch.nn.MaxPool2d) or isinstance(net, torch.nn.AvgPool2d):
net.register_forward_hook(pooling_hook)
if isinstance(net, torch.nn.Upsample):
net.register_forward_hook(upsample_hook)
return
for c in childrens:
foo(c)
if model == None:
model = torchvision.models.alexnet()
foo(model)
if device != "cpu":
input = Variable(torch.rand(3, 3, input_width, input_height).cuda(), requires_grad = True)
else:
input = Variable(torch.rand(3, 3, input_width, input_height), requires_grad = True)
out = model(input)
total_flops = (sum(list_conv) + sum(list_linear) + sum(list_bn) + sum(list_relu) + sum(list_pooling) + sum(list_upsample))
# print(' + Number of FLOPs: %.5fG' % (total_flops / 3 / 1e9))
return total_flops / 3
def get_inference_time(model, repeat=10, height=416, width=416):
model.eval()
start = time.time()
with torch.no_grad():
inp = torch.randn(1, 3, height, width)
if device != "cpu":
inp = inp.cuda()
for i in range(repeat):
output = model(inp)
avg_infer_time = (time.time() - start) / repeat
return round(avg_infer_time, 4)
| [
"[email protected]"
] | |
53784f03ce7829e4fb56c21ce8b2f078c69eb7af | 6c21316d93c94766d4dbbe891643ceb0eca8630f | /appendix/1/keras/02_save_model_keras.py | 6aa98c4a8f3e279649dd76087868dba2b8d5ff21 | [] | no_license | takseki/deeplearning-tensorflow-keras | 39beef782a2026aaa5c8060f9f3cb955d3db1da2 | a6efd8df8408ddaac3ed52b1037a736aa70d44ff | refs/heads/master | 2021-05-15T04:05:51.143111 | 2018-01-29T12:08:10 | 2018-01-29T12:08:10 | 119,783,663 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,053 | py | import os
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from sklearn import datasets
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
np.random.seed(123)
'''
モデルファイル用設定
'''
MODEL_DIR = os.path.join(os.path.dirname(__file__), 'model')
if os.path.exists(MODEL_DIR) is False:
os.mkdir(MODEL_DIR)
'''
データの生成
'''
mnist = datasets.fetch_mldata('MNIST original', data_home='.')
n = len(mnist.data)
N = 30000 # MNISTの一部を使う
N_train = 20000
N_validation = 4000
indices = np.random.permutation(range(n))[:N] # ランダムにN枚を選択
X = mnist.data[indices]
X = X / 255.0
X = X - X.mean(axis=1).reshape(len(X), 1)
y = mnist.target[indices]
Y = np.eye(10)[y.astype(int)]
X_train, X_test, Y_train, Y_test = \
train_test_split(X, Y, train_size=N_train)
X_train, X_validation, Y_train, Y_validation = \
train_test_split(X_train, Y_train, test_size=N_validation)
'''
モデル設定
'''
n_in = len(X[0]) # 784
n_hiddens = [200, 200, 200]
n_out = len(Y[0]) # 10
p_keep = 0.5
activation = 'relu'
checkpoint = ModelCheckpoint(
filepath=os.path.join(
MODEL_DIR,
'model_{epoch:02d}_vloss{val_loss:.2f}.hdf5'),
save_best_only=True)
model = Sequential()
for i, input_dim in enumerate(([n_in] + n_hiddens)[:-1]):
model.add(Dense(n_hiddens[i], input_dim=input_dim))
model.add(Activation(activation))
model.add(Dropout(p_keep))
model.add(Dense(n_out))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999),
metrics=['accuracy'])
'''
モデル学習
'''
epochs = 50
batch_size = 200
model.fit(X_train, Y_train, epochs=epochs,
batch_size=batch_size,
validation_data=(X_validation, Y_validation),
callbacks=[checkpoint])
| [
"[email protected]"
] | |
13860fa1eafedf68adeeb3a5c6820df45f2e07eb | 578bdcf2720805c1075ba348764983d99031911f | /Udacity/Project2/BS_first_and_last.py | 3f009075ede678361730daef255531c39bb073ec | [] | no_license | mrudula-pb/Python_Code | 994de4720289ded0a55017407d27b1d0f0b08c65 | 0dcdc6589d3c614bd1e6a03aa5c2b55664b9e6b2 | refs/heads/master | 2023-03-25T16:52:27.420925 | 2021-03-22T21:40:37 | 2021-03-22T21:40:37 | 350,476,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,751 | py | # Given a sorted array that may have duplicate values, use binary search to find the first and last indexes of a given value.
#
# For example, if you have the array [0, 1, 2, 2, 3, 3, 3, 4, 5, 6] and the given value is 3, the answer will be [4, 6] (because the value 3 occurs first at index 4 and last at index 6 in the array).
#
# The expected complexity of the problem is 𝑂(𝑙𝑜𝑔(𝑛)) .
#
def binary_search(target, source):
if len(source) == 0:
return print("Array size is 0")
arr_length = len(source)
center = (arr_length - 1) // 2
if source[center] == target:
return center
elif source[center] < target:
return binary_search(target, source[center+1:])
else:
return binary_search(target, source[:center])
def find_first_index(arr, number):
index = binary_search(number, arr)
firstIndex = index
#if not index:
#return print("Element ", index ,"not present in arr")
while arr[index] == target:
if index == 0:
return index
elif arr[index - 1] == target:
index -= 1
firstIndex = index
else:
return firstIndex
def find_last_index(arr, number):
index = binary_search(number, arr)
lastIndex = index
if not index:
return print("Element ", index, "not present in arr")
while arr[index] == target:
if index == 0:
return index
elif arr[index + 1] == target:
index += 1
lastIndex = index
else:
return lastIndex
target = 2
arr = [0, 1, 2, 2, 3, 3, 3, 4, 5, 6]
print("First index of ", target, ":", find_first_index(arr, target))
print("Last index of ", target, ":", find_last_index(arr, target)) | [
"[email protected]"
] | |
b475d9acd2bf5edf86414b21cbef51f2b51fc0a5 | 6dde4d0af49bda344688d37cd9cdf3af837932a1 | /mwpersistence/errors.py | fd24209bdabacff5473d53f39c2da6662aed0da4 | [
"MIT"
] | permissive | leojoubert/python-mwpersistence | 876db2686c31fb85bd5f776234ab4e660342e9b1 | cbea234c9785aed471feab11712152c06e4b7fe5 | refs/heads/master | 2020-04-16T23:38:36.011563 | 2019-01-29T14:09:33 | 2019-01-29T14:09:33 | 166,019,714 | 0 | 0 | null | 2019-01-16T10:19:32 | 2019-01-16T10:19:32 | null | UTF-8 | Python | false | false | 44 | py | class FileTypeError(RuntimeError):
pass
| [
"[email protected]"
] | |
9b577efe3af43ad442b2ecbcdc2087ad0a041ca3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02713/s488912314.py | 5f097787063f1a8f1bf781024c3dc61745246b46 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | import math
k=int(input())
n=0
for a in range(1,k+1):
for b in range(1,k+1):
d = math.gcd(a,b)
for c in range(1,k+1):
n+=math.gcd(d,c)
print(n) | [
"[email protected]"
] | |
f7470fd051d4593fdc5ae963ad11cbc24acc36fc | ba719722e890a7822a5533a8b6efd06cc776b17e | /Macricopa_County_30_08_2020/Phoenix_85014_Maricopa_AZ.py | b7e87b40911505a21c71c5d2057f2de94414ea26 | [] | no_license | devhadisov/python_selenium_zillow | 9c80566d829721dce952ab4d7a285d1fd970fe19 | e0b4f7243b548404912bdcdce4bcdf7168413242 | refs/heads/master | 2022-12-24T17:14:32.903874 | 2020-09-24T20:20:25 | 2020-09-24T20:20:25 | 298,384,758 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,817 | py | import selenium
from selenium import webdriver
from selenium.webdriver import Chrome
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException
import requests
import urllib.request
import json, csv, lxml, time, re
import datetime
import hashlib
from insertdatabase import InsertDB
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def wait(broswer, xpath):
WebDriverWait(driver, 100).until(
EC.visibility_of_element_located((By.XPATH, xpath)))
def main(htmlstring, driver):
table_name = "maricopa_30_08_2020"
header = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9,ko;q=0.8',
'cookie' : 'zguid=23|%2410ab80e6-80db-4e0a-9f70-2449ca972d74; _ga=GA1.2.759159145.1599348167; zjs_user_id=null; zjs_anonymous_id=%2210ab80e6-80db-4e0a-9f70-2449ca972d74%22; _gcl_au=1.1.607943717.1599348169; _pxvid=be9ff2f0-efce-11ea-9652-0242ac12000b; __gads=ID=cab593cad6cbce43:T=1599348200:S=ALNI_MaFYrYCZZvPIITKUEoEDXGvXSRYwQ; _gid=GA1.2.1287304564.1599556314; _pin_unauth=dWlkPU9EUXdZamxrTldJdE9ESTBNUzAwWXprMExXSXdNekl0TkdWak0yWTFNVEE1TldJeSZycD1abUZzYzJV; ki_r=; ki_s=; _fbp=fb.1.1599562363584.1440832488; g_state={"i_p":1599570378147,"i_l":1}; ki_t=1599556892885%3B1599556892885%3B1599563330503%3B1%3B19; JSESSIONID=62F47C1DAFBF00B3DB7B301BEA3E6586; zgsession=1|8840c1ee-f8a6-43d7-9a7b-3169df33c987; _pxff_cc=U2FtZVNpdGU9TGF4Ow==; _pxff_rf=1; _pxff_fp=1; _pxff_bsco=1; _px3=6d722620cec81d0df86c8eff4b631bdd93cef163fb0a14808e80f81013747454:M7trNae6CpAztMArZT97P3Vy9jFLz9FuEZ5p2efYpXeqOJC7Bw+xzsVGxArAYe+PM+vQKNuEI3qytjutx2UEXg==:1000:M1Vo/kdU1lI8Zqky6jJnuwSu45xHxX8ueCLKUiW6KX8rNR+VWAORLQi+1ns4dhilOU7gSCJfJmToj1SeyKN49kHZQZIQ0wSFeFtn+txzkIo/fhFAr2Cq7WvjCVWw7GBx8F3JIjMqHf1BZAAFg0YXqy/IVuCFhvIioSyK35nkm4A=; _gat=1; KruxPixel=true; DoubleClickSession=true; _uetsid=f44fc66ca5c392a6859170ed776b6ae9; _uetvid=dc708dafb2b6d91ab6c6923ac1ae6673; AWSALB=3gLhoP6QCdmf4zskymQ7ej/kbqzRHNkv+QNQMFmS6Y7S9pENaOusdnQVhFHWm1W9z8/1Og/WmO8JK63ys0wmi6ZNwRc4SN8lf4pcoyrm+nj8lLAPLRDIqMaYAEte; AWSALBCORS=3gLhoP6QCdmf4zskymQ7ej/kbqzRHNkv+QNQMFmS6Y7S9pENaOusdnQVhFHWm1W9z8/1Og/WmO8JK63ys0wmi6ZNwRc4SN8lf4pcoyrm+nj8lLAPLRDIqMaYAEte; search=6|1602203173818%7Crb%3DMaricopa%252C-AZ%26rect%3D33.203401%252C-111.882231%252C32.788612%252C-112.512953%26disp%3Dmap%26mdm%3Dauto%26sort%3Ddays%26pt%3Dpmf%252Cpf%26fs%3D1%26fr%3D0%26rs%3D0%26ah%3D0%26singlestory%3D0%26abo%3D0%26garage%3D0%26pool%3D0%26ac%3D0%26waterfront%3D0%26finished%3D0%26unfinished%3D0%26cityview%3D0%26mountainview%3D0%26parkview%3D0%26waterview%3D0%26hoadata%3D1%26zillow-owned%3D0%263dhome%3D0%09%0932697%09%09%09%09%09%09',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'
}
pagination = ""
usersSearchTerm = "85014"
west = "-112.11312814855958"
east = "-112.00377985144044"
south = "33.44773367209288"
north = "33.5671071512105"
regionId = "94730"
regionType = "7"
mapZoom = "13"
includeList = "true"
# https://www.zillow.com/search/GetSearchPageState.htm?searchQueryState={%22pagination%22:{},%22usersSearchTerm%22:%2285006%22,%22mapBounds%22:{%22west%22:-112.07973577801513,%22east%22:-112.01665022198486,%22south%22:33.43522122804253,%22north%22:33.494937169247144},%22regionSelection%22:[{%22regionId%22:94722,%22regionType%22:7}],%22isMapVisible%22:true,%22mapZoom%22:14,%22filterState%22:{%22sort%22:{%22value%22:%22globalrelevanceex%22}},%22isListVisible%22:true}&includeMap=false&includeList=true
default_first_url = 'https://www.zillow.com/search/GetSearchPageState.htm?searchQueryState={"pagination":{0},"usersSearchTerm":"{1}","mapBounds":{"west":{2},"east":{3},"south":{4},"north":{5}},"regionSelection":[{"regionId":{6},"regionType":{7}}],"isMapVisible":true,"mapZoom":{8},"filterState":{"sort":{"value":"globalrelevanceex"}},"isListVisible":true}&includeMap=false&includeList={9}'
first_case_url = 'https://www.zillow.com/search/GetSearchPageState.htm?searchQueryState={"pagination":{' + pagination + '},' + '"usersSearchTerm":"' + usersSearchTerm + '","mapBounds":{"west":' + west + ',"east":' + east + ',"south":' + south + ',"north":' + north + '},"regionSelection":[{"regionId":' + regionId + ',"regionType":' + regionType + '}],"isMapVisible":true,"mapZoom":' + mapZoom + ',"filterState":{"sort":{"value":"globalrelevanceex"}},"isListVisible":true}&includeMap=false&includeList=' + includeList
# first_url = default_first_url.format(pagination, usersSearchTerm, west, east, south, north, regionId, regionType, mapZoom, includeList)
print(first_case_url)
# return
default_page_url = 'https://www.zillow.com/search/GetSearchPageState.htm?searchQueryState={"pagination":{"currentPage":' + pagination + '},' + '"usersSearchTerm":"' + usersSearchTerm + '","mapBounds":{"west":' + west + ',"east":' + east + ',"south":' + south + ',"north":' + north + '},"regionSelection":[{"regionId":' + regionId + ',"regionType":' + regionType + '}],"isMapVisible":true,"mapZoom":' + mapZoom + ',"filterState":{"sort":{"value":"globalrelevanceex"}},"isListVisible":true}&includeMap=false&includeList=' + includeList
counts = 1
for page in range(1, 4):
default_page_url = 'https://www.zillow.com/search/GetSearchPageState.htm?searchQueryState={"pagination":{"currentPage":' + str(page) + '},' + '"usersSearchTerm":"' + usersSearchTerm + '","mapBounds":{"west":' + west + ',"east":' + east + ',"south":' + south + ',"north":' + north + '},"regionSelection":[{"regionId":' + regionId + ',"regionType":' + regionType + '}],"isMapVisible":true,"mapZoom":' + mapZoom + ',"filterState":{"sort":{"value":"globalrelevanceex"}},"isListVisible":true}&includeMap=false&includeList=' + includeList
if page == 1:
url = first_case_url
else:
url = default_page_url
response = requests.get(url, headers=header)
result = response.json()
properties_infos = result["searchResults"]["listResults"]
print(len(properties_infos))
for i in range(0, len(properties_infos)):
data_base = []
property_url = properties_infos[i]["detailUrl"]
status_text = properties_infos[i]["statusText"]
print(status_text, counts)
counts += 1
try:
street_add = properties_infos[i]["hdpData"]["homeInfo"]["streetAddress"]
except:
street_add = ""
try:
city = properties_infos[i]["hdpData"]["homeInfo"]["city"]
except:
city = ""
try:
state = properties_infos[i]["hdpData"]["homeInfo"]["state"]
except:
state = ""
try:
zipcode = properties_infos[i]["hdpData"]["homeInfo"]["zipcode"]
except:
zipcode = ""
property_address = street_add + ", " + city + ", " + state + " " + zipcode
try:
bathrooms = properties_infos[i]["hdpData"]["homeInfo"]["bathrooms"]
except:
bathrooms = ""
try:
bedrooms = properties_infos[i]["hdpData"]["homeInfo"]["bedrooms"]
except:
bedrooms = ""
try:
tax_assessed_value = properties_infos[i]["hdpData"]["homeInfo"]["taxAssessedValue"]
except:
tax_assessed_value = ""
try:
zestimate = properties_infos[i]["hdpData"]["homeInfo"]["zestimate"]
except:
zestimate = ""
try:
rent_zestimate = properties_infos[i]["hdpData"]["homeInfo"]["rentZestimate"]
except:
rent_zestimate = ""
try:
home_type = properties_infos[i]["hdpData"]["homeInfo"]["homeType"]
except:
home_type = ""
if "by owner" in status_text:
print("--------------------------------------------------> : ", i + 1)
driver.get(property_url)
time.sleep(10)
try:
wait(driver, "//ul[@class='ds-home-fact-list']")
except:
print("There is no xpath")
# street_add = driver.find_element_by_xpath("//h1[@class='ds-address-container']/span[1]").text
# property_address = street_add + ", " + city + ", " + state + " " + zipcode
# phone_number = driver.find_element_by_xpath("//span[@class='listing-field']").text
phones = re.findall(r'[(][\d]{3}[)][ ]?[\d]{3}-[\d]{4}', driver.page_source)
for phone in range(1, len(phones) + 1):
phone_number = phones[phone - 1]
features_labels = driver.find_elements_by_xpath("//ul[@class='ds-home-fact-list']//span[contains(@class, 'ds-standard-label') and contains(@class, 'ds-home-fact-label')]")
features_infos = driver.find_elements_by_xpath("//ul[@class='ds-home-fact-list']//span[contains(@class, 'ds-body') and contains(@class, 'ds-home-fact-value')]")
parking = ""
year_built = ""
hoa = ""
heating = ""
lot = ""
cooling = ""
price_sqft = ""
for feature_label, feature_info in zip(features_labels, features_infos):
feature_label_txt = feature_label.text
if 'Parking' in feature_label_txt:
parking = feature_info.text
elif 'Year built' in feature_label_txt:
year_built = feature_info.text
elif 'HOA' in feature_label_txt:
hoa = feature_info.text
elif 'Heating' in feature_label_txt:
heating = feature_info.text
elif 'Lot' in feature_label_txt:
lot = feature_info.text
elif 'Cooling' in feature_label_txt:
cooling = feature_info.text
elif 'Price/' in feature_label_txt:
price_sqft = feature_info.text
print("Property Address--------------------> : ", property_address)
print("Property Url------------------------> : ", property_url)
print("Property Status---------------------> : ", status_text)
print("Owner Phone Number------------------> : ", phone_number)
print("BathRooms---------------------------> : ", bathrooms)
print("BedRooms----------------------------> : ", bedrooms)
print("Tax Assessed Value------------------> : ", tax_assessed_value)
print("Zestimate---------------------------> : ", zestimate)
print("Rent Zestimate----------------------> : ", rent_zestimate)
print("Home Type---------------------------> : ", home_type)
print("Parking-----------------------------> : ", parking)
print("Year Built--------------------------> : ", year_built)
print("HOA---------------------------------> : ", hoa)
print("Heating-----------------------------> : ", heating)
print("Lot---------------------------------> : ", lot)
print("Cooling-----------------------------> : ", cooling)
print("Price Sqft--------------------------> : ", price_sqft)
string_id = property_address + status_text + phone_number
m = hashlib.md5()
m.update(string_id.encode('utf8'))
identifier = m.hexdigest()
print("hash-------------------->", identifier)
create_time = str(datetime.datetime.now())
update_time = ""
insertdb = InsertDB()
data_base.append((property_address, street_add, city, state, zipcode, status_text, phone_number, bathrooms, bedrooms, tax_assessed_value, zestimate, rent_zestimate, home_type, parking, year_built, hoa, heating, lot, cooling, price_sqft, identifier, create_time, update_time))
insertdb.insert_document(data_base, table_name)
if __name__ == "__main__":
print("-----------------start---------------")
options = Options()
options.binary_location = "C:\Program Files\Google\Chrome\Application\chrome.exe"
path = "driver\\chromedriver.exe"
driver = Chrome(executable_path=path, chrome_options = options)
driver.get("https://www.zillow.com/")
time.sleep(2)
driver.maximize_window()
main(driver.page_source, driver) | [
"[email protected]"
] | |
33e02699afe888c26836bdf9d19c9af1238c53ad | 88ce0028ffaa5dc2829fab54f95ada301da4e7b0 | /Extras/CIFAR_Torch.py | 99fd198602c39aa73ccd315b1a47a04548714740 | [] | no_license | anhnguyendepocen/BayesianCNN | 29f56122a97ab54e6415c8845ed817e2694ab49e | fb776d2bef189f76de41f7d2e86222c9d59eebfe | refs/heads/master | 2023-07-15T07:52:31.634301 | 2021-08-20T05:56:15 | 2021-08-20T05:56:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51,103 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torchvision
import torch.nn.functional as F
import os
import torchvision.transforms as transforms
import copy
import multiprocessing
import os
import sys
import gc
import numpy as np
import random
import time
import operator
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
import argparse
mpl.use('agg')
torch.backends.cudnn.enabled = False
device = 'cpu'
# Hyper-Parameters
input_size = 320 # Junk
hidden_size = 50 # Junk
num_layers = 2 # Junk
num_classes = 10
batch_size = 32
batch_Size = batch_size
def data_load(data='train'):
if data == 'test':
samples = torchvision.datasets.CIFAR10(root = './CIFAR',train=False,download=True,transform=torchvision.transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))]))
size = 1024 # Test
a,_ = torch.utils.data.random_split(samples, [size,len(samples)-size])
else:
samples = torchvision.datasets.CIFAR10(root = './CIFAR',train=True,download=True,transform=torchvision.transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))]))
size = 2048 # Train
a,_ = torch.utils.data.random_split(samples,[size,len(samples)-size])
data_loader = torch.utils.data.DataLoader(a, batch_size=batch_Size, shuffle=True)
return data_loader
# Initialise and parse command-line inputs
parser = argparse.ArgumentParser(description='PT MCMC CNN')
parser.add_argument('-s', '--samples', help='Number of samples', default=1000, dest="samples", type=int)
parser.add_argument('-r', '--replicas', help='Number of chains/replicas, best to have one per availble core/cpu',
default=10, dest="num_chains", type=int)
parser.add_argument('-lr', '--learning_rate', help='Learning Rate for Model', dest="learning_rate",
default=0.01, type=float)
parser.add_argument('-swap', '--swap', help='Swap Ratio', dest="swap_ratio", default=0.1, type=float)
parser.add_argument('-b', '--burn', help='How many samples to discard before determing posteriors', dest="burn_in",
default=0.50, type=float)
parser.add_argument('-pt', '--ptsamples', help='Ratio of PT vs straight MCMC samples to run', dest="pt_samples",
default=0.50, type=float)
parser.add_argument('-step', '--step_size', help='Step size for proposals (0.02, 0.05, 0.1 etc)', dest="step_size",
default=0.005, type=float)
parser.add_argument('-t', '--temperature', help='Demoninator to determine Max Temperature of chains (MT=no.chains*t) ',
default=2, dest="mt_val", type=int) # Junk
parser.add_argument('-n', '--net', help='Choose rnn net, "1" for RNN, "2" for GRU, "3" for LSTM', default=4, dest="net",
type=int) # Junk
args = parser.parse_args()
def f(): raise Exception("Found exit()")
# CNN model defined using pytorch
class Model(nn.Module):
def __init__(self, topo, lrate, batch_size, rnn_net='CNN'):
super(Model, self).__init__()
if rnn_net == 'CNN':
self.conv1 = nn.Conv2d(3, 16, 5, 1)
self.conv2 = nn.Conv2d(16, 32, 5, 1)
#self.conv3 = nn.Conv2d(32, 64, 5, 1)
self.fc1 = nn.Linear(800, 10)
# self.fc2 = nn.Linear(128, 10)
self.batch_size = batch_size
self.sigmoid = nn.Sigmoid()
self.topo = topo
self.los = 0
self.softmax = nn.Softmax(dim=1)
self.criterion = torch.nn.CrossEntropyLoss()
self.optimizer = torch.optim.Adam(self.parameters(), lr=lrate)
# Sequence of execution for the model layers
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, 2)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1)
x = F.relu(x)
x = self.fc1(x)
return x
# Used to apply softmax and obtain loss value
def evaluate_proposal(self, data, w=None):
self.los = 0
if w is not None:
self.loadparameters(w)
y_pred = torch.zeros((len(data), self.batch_size))
prob = torch.zeros((len(data), self.batch_size, self.topo[2]))
for i, sample in enumerate(data, 0):
inputs, labels = sample
a = copy.deepcopy(self.forward(inputs).detach())
_, predicted = torch.max(a.data, 1)
y_pred[i] = predicted
b = copy.deepcopy(a)
prob[i] = self.softmax(b)
loss = self.criterion(a, labels)
self.los += loss
return y_pred, prob
# Applied langevin gradient to obtain weight proposal
def langevin_gradient(self, x, w=None):
if w is not None:
self.loadparameters(w)
self.los = 0
for i, sample in enumerate(x, 0):
inputs, labels = sample
outputs = self.forward(inputs)
_, predicted = torch.max(outputs.data, 1)
loss = self.criterion(outputs, labels)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# if (i % 50 == 0):
# print(loss.item(), ' is loss', i)
self.los += copy.deepcopy(loss.item())
return copy.deepcopy(self.state_dict())
# Obtain a list of the model parameters (weights and biases)
def getparameters(self, w=None):
l = np.array([1, 2])
dic = {}
if w is None:
dic = self.state_dict()
else:
dic = copy.deepcopy(w)
for name in sorted(dic.keys()):
l = np.concatenate((l, np.array(copy.deepcopy(dic[name])).reshape(-1)), axis=None)
l = l[2:]
return l
# Loads the model parameters
def loadparameters(self, param):
self.load_state_dict(param)
# Converting list of model parameters to pytorch dictionary form
def dictfromlist(self, param):
dic = {}
i = 0
for name in sorted(self.state_dict().keys()):
dic[name] = torch.FloatTensor(param[i:i + (self.state_dict()[name]).view(-1).shape[0]]).view(
self.state_dict()[name].shape)
i += (self.state_dict()[name]).view(-1).shape[0]
return dic
# Adds random noise to weights to create new weight proposal
def addnoiseandcopy(self, mea, std_dev):
dic = {}
w = self.state_dict()
for name in (w.keys()):
dic[name] = copy.deepcopy(w[name]) + torch.zeros(w[name].size()).normal_(mean=mea, std=std_dev)
self.loadparameters(dic)
return dic
# Each instance of the class is one parallel chain
class ptReplica(multiprocessing.Process):
def __init__(self, use_langevin_gradients, learn_rate, w, minlim_param, maxlim_param, samples, traindata, testdata,
topology, burn_in, temperature, swap_interval, path, parameter_queue, main_process, event, batch_size,
rnn_net, step_size):
self.rnn = Model(topology, learn_rate, batch_size, rnn_net=rnn_net)
multiprocessing.Process.__init__(self)
self.processID = temperature
self.parameter_queue = parameter_queue
self.signal_main = main_process
self.event = event
self.batch_size = batch_size
self.temperature = temperature
self.adapttemp = temperature
self.swap_interval = swap_interval
self.path = path
self.burn_in = burn_in
self.samples = samples
self.topology = topology
self.traindata = traindata
self.testdata = testdata
self.w = w
self.minY = np.zeros((1, 1))
self.maxY = np.zeros((1, 1))
self.minlim_param = minlim_param
self.maxlim_param = maxlim_param
self.use_langevin_gradients = use_langevin_gradients
self.sgd_depth = 1 # Keep as 1
self.learn_rate = learn_rate
self.l_prob = 0.7 # Ratio of langevin based proposals, higher value leads to more computation time, evaluate for different problems
self.step_size = step_size
# Returns loss value
def rmse(self, pred, actual):
return self.rnn.los.item()
# Computes the accuracy value for the model run
def accuracy(self, data):
correct = 0
total = 0
for images, labels in data:
labels = labels.to(device)
outputs = self.rnn(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
return 100 * correct / total
# Calculates likelihood value, change based on problem
def likelihood_func(self, rnn, data, w=None):
y = torch.zeros((len(data), self.batch_size))
for i, dat in enumerate(data, 0):
inputs, labels = dat
y[i] = labels
if w is not None:
fx, prob = rnn.evaluate_proposal(data, w)
else:
fx, prob = rnn.evaluate_proposal(data)
# rmse = self.rmse(fx,y)
rmse = copy.deepcopy(self.rnn.los) / len(data)
lhood = 0
for i in range(len(data)):
for j in range(self.batch_size):
for k in range(self.topology[2]):
if k == y[i][j]:
if prob[i, j, k] == 0:
lhood += 0
else:
lhood += np.log(prob[i, j, k])
return [lhood / self.adapttemp, fx, rmse]
# Calculates prior value, change based on problem
def prior_likelihood(self, sigma_squared, w_list):
part1 = -1 * ((len(w_list)) / 2) * np.log(sigma_squared)
part2 = 1 / (2 * sigma_squared) * (sum(np.square(w_list)))
log_loss = part1 - part2
return log_loss
# MCMC sampling function, saving of results to text files
def run(self):
samples = self.samples
rnn = self.rnn
# Random Initialisation of weights
w = rnn.state_dict()
w_size = len(rnn.getparameters(w))
step_w = self.step_size
rmse_train = np.zeros(samples)
rmse_test = np.zeros(samples)
acc_train = np.zeros(samples)
acc_test = np.zeros(samples)
weight_array = np.zeros(samples)
weight_array1 = np.zeros(samples)
weight_array2 = np.zeros(samples)
weight_array3 = np.zeros(samples)
weight_array4 = np.zeros(samples)
sum_value_array = np.zeros(samples)
learn_rate = self.learn_rate
eta = 0 # Junk variable
w_proposal = np.random.randn(w_size)
w_proposal = rnn.dictfromlist(w_proposal)
train = self.traindata
test = self.testdata
sigma_squared = 25
prior_current = self.prior_likelihood(sigma_squared, rnn.getparameters(w)) # takes care of the gradients
# Evaluate Likelihoods
[likelihood, pred_train, rmsetrain] = self.likelihood_func(rnn, train)
#print("Hi")
[_, pred_test, rmsetest] = self.likelihood_func(rnn, test)
#print("Bye")
# Beginning sampling using MCMC
y_test = torch.zeros((len(test), self.batch_size))
for i, dat in enumerate(test, 0):
inputs, labels = dat
y_test[i] = copy.deepcopy(labels)
y_train = torch.zeros((len(train), self.batch_size))
for i, dat in enumerate(train, 0):
inputs, labels = dat
y_train[i] = copy.deepcopy(labels)
num_accepted = 0
langevin_count = 0
pt_samples = samples * 0.6 # PT in canonical form with adaptive temp will work till assigned limit
init_count = 0
rmse_train[0] = rmsetrain
rmse_test[0] = rmsetest
acc_train[0] = self.accuracy(train)
acc_test[0] = self.accuracy(test)
weight_array[0] = 0
weight_array1[0] = 0
weight_array2[0] = 0
weight_array3[0] = 0
weight_array4[0] = 0
sum_value_array[0] = 0
for i in range(
samples): # Begin sampling --------------------------------------------------------------------------
ratio = ((samples - i) / (samples * 1.0))
if i < pt_samples:
self.adapttemp = self.temperature # T1=T/log(k+1);
if i == pt_samples and init_count == 0: # Move to canonical MCMC
self.adapttemp = 1
[likelihood, pred_train, rmsetrain] = self.likelihood_func(rnn, train, w)
[_, pred_test, rmsetest] = self.likelihood_func(rnn, test, w)
init_count = 1
lx = np.random.uniform(0, 1, 1)
old_w = rnn.state_dict()
if (self.use_langevin_gradients is True) and (lx < self.l_prob):
w_gd = rnn.langevin_gradient(train)
w_proposal = rnn.addnoiseandcopy(0, step_w)
w_prop_gd = rnn.langevin_gradient(train)
wc_delta = (rnn.getparameters(w) - rnn.getparameters(w_prop_gd))
wp_delta = (rnn.getparameters(w_proposal) - rnn.getparameters(w_gd))
sigma_sq = step_w
first = -0.5 * np.sum(wc_delta * wc_delta) / sigma_sq
second = -0.5 * np.sum(wp_delta * wp_delta) / sigma_sq
diff_prop = first - second
diff_prop = diff_prop / self.adapttemp
langevin_count = langevin_count + 1
else:
diff_prop = 0
w_proposal = rnn.addnoiseandcopy(0, step_w)
[likelihood_proposal, pred_train, rmsetrain] = self.likelihood_func(rnn, train)
[likelihood_ignore, pred_test, rmsetest] = self.likelihood_func(rnn, test)
prior_prop = self.prior_likelihood(sigma_squared, rnn.getparameters(w_proposal))
diff_likelihood = likelihood_proposal - likelihood
diff_prior = prior_prop - prior_current
"""
try:
mh_prob = min(1, math.exp(diff_likelihood + diff_prior + diff_prop))
except OverflowError as e:
mh_prob = 1
"""
sum_value = diff_likelihood + diff_prior + diff_prop
sum_value_array[i] = sum_value
u = np.log(random.uniform(0, 1))
if u < sum_value:
num_accepted = num_accepted + 1
likelihood = likelihood_proposal
prior_current = prior_prop
w = copy.deepcopy(w_proposal) # rnn.getparameters(w_proposal)
acc_train1 = self.accuracy(train)
acc_test1 = self.accuracy(test)
print(i, rmsetrain, rmsetest, acc_train1, acc_test1, 'Accepted')
rmse_train[i] = rmsetrain
rmse_test[i] = rmsetest
acc_train[i,] = acc_train1
acc_test[i,] = acc_test1
else:
w = old_w
rnn.loadparameters(w)
acc_train1 = self.accuracy(train)
acc_test1 = self.accuracy(test)
print(i, rmsetrain, rmsetest, acc_train1, acc_test1, 'Rejected')
rmse_train[i,] = rmse_train[i - 1,]
rmse_test[i,] = rmse_test[i - 1,]
acc_train[i,] = acc_train[i - 1,]
acc_test[i,] = acc_test[i - 1,]
ll = rnn.getparameters()
#print(ll.shape)
weight_array[i] = ll[0]
weight_array1[i] = ll[100]
weight_array2[i] = ll[3000]
weight_array3[i] = ll[10000]
weight_array4[i] = ll[20000]
if (i + 1) % self.swap_interval == 0:
param = np.concatenate([np.asarray([rnn.getparameters(w)]).reshape(-1), np.asarray([eta]).reshape(-1),
np.asarray([likelihood]), np.asarray([self.temperature]), np.asarray([i])])
self.parameter_queue.put(param)
self.signal_main.set()
self.event.clear()
self.event.wait()
result = self.parameter_queue.get()
w = rnn.dictfromlist(result[0:w_size])
eta = result[w_size]
if i % 100 == 0:
print(i, rmsetrain, rmsetest, 'Iteration Number and RMSE Train & Test')
"""
big_data=data_load1()
final_test_acc=self.accuracy(big_data)
print(final_test_acc)
"""
param = np.concatenate(
[np.asarray([rnn.getparameters(w)]).reshape(-1), np.asarray([eta]).reshape(-1), np.asarray([likelihood]),
np.asarray([self.temperature]), np.asarray([i])])
# print('SWAPPED PARAM',self.temperature,param)
# self.parameter_queue.put(param)
self.signal_main.set()
# param = np.concatenate([s_pos_w[i-self.surrogate_interval:i,:],lhood_list[i-self.surrogate_interval:i,:]],axis=1)
# self.surrogate_parameterqueue.put(param)
print((num_accepted * 100 / (samples * 1.0)), '% was Accepted')
accept_ratio = num_accepted / (samples * 1.0) * 100
print((langevin_count * 100 / (samples * 1.0)), '% was Langevin')
langevin_ratio = langevin_count / (samples * 1.0) * 100
print('Exiting the Thread', self.temperature)
file_name = self.path + '/predictions/sum_value_' + str(self.temperature) + '.txt'
np.savetxt(file_name, sum_value_array, fmt='%1.2f')
file_name = self.path + '/predictions/weight[0]_' + str(self.temperature) + '.txt'
np.savetxt(file_name, weight_array, fmt='%1.2f')
file_name = self.path + '/predictions/weight[100]_' + str(self.temperature) + '.txt'
np.savetxt(file_name, weight_array1, fmt='%1.2f')
file_name = self.path + '/predictions/weight[50000]_' + str(self.temperature) + '.txt'
np.savetxt(file_name, weight_array2, fmt='%1.2f')
file_name = self.path + '/predictions/weight[40000]_' + str(self.temperature) + '.txt'
np.savetxt(file_name, weight_array3, fmt='%1.2f')
file_name = self.path + '/predictions/weight[60000]_' + str(self.temperature) + '.txt'
np.savetxt(file_name, weight_array4, fmt='%1.2f')
file_name = self.path + '/predictions/rmse_test_chain_' + str(self.temperature) + '.txt'
np.savetxt(file_name, rmse_test, fmt='%1.2f')
file_name = self.path + '/predictions/rmse_train_chain_' + str(self.temperature) + '.txt'
np.savetxt(file_name, rmse_train, fmt='%1.2f')
file_name = self.path + '/predictions/acc_test_chain_' + str(self.temperature) + '.txt'
np.savetxt(file_name, acc_test, fmt='%1.2f')
file_name = self.path + '/predictions/acc_train_chain_' + str(self.temperature) + '.txt'
np.savetxt(file_name, acc_train, fmt='%1.2f')
file_name = self.path + '/predictions/accept_percentage' + str(self.temperature) + '.txt'
with open(file_name, 'w') as f:
f.write('%d' % accept_ratio)
# Manages the parallel tempering, initialises and executes the parallel chains
class ParallelTempering:
def __init__(self, use_langevin_gradients, learn_rate, topology, num_chains, maxtemp, NumSample, swap_interval,
path, batch_size, bi, rnn_net, step_size):
rnn = Model(topology, learn_rate, batch_size, rnn_net=rnn_net)
self.rnn = rnn
self.rnn_net = rnn_net
self.traindata = data_load(data='train')
self.testdata = data_load(data='test')
self.topology = topology
self.num_param = len(rnn.getparameters(
rnn.state_dict())) # (topology[0] * topology[1]) + (topology[1] * topology[2]) + topology[1] + topology[2]
# Parallel Tempering variables
self.swap_interval = swap_interval
self.path = path
self.maxtemp = maxtemp
self.num_swap = 0
self.total_swap_proposals = 0
self.num_chains = num_chains
self.chains = []
self.temperatures = []
self.NumSamples = int(NumSample / self.num_chains)
self.sub_sample_size = max(1, int(0.05 * self.NumSamples))
# create queues for transfer of parameters between process chain
self.parameter_queue = [multiprocessing.Queue() for i in range(num_chains)]
self.chain_queue = multiprocessing.JoinableQueue()
self.wait_chain = [multiprocessing.Event() for i in range(self.num_chains)]
self.event = [multiprocessing.Event() for i in range(self.num_chains)]
self.all_param = None
self.geometric = True # True (geometric) False (Linear)
self.minlim_param = 0.0
self.maxlim_param = 0.0
self.minY = np.zeros((1, 1))
self.maxY = np.ones((1, 1))
self.model_signature = 0.0
self.learn_rate = learn_rate
self.use_langevin_gradients = use_langevin_gradients
self.batch_size = batch_size
self.masternumsample = NumSample
self.burni = bi
self.step_size = step_size
def default_beta_ladder(self, ndim, ntemps,
Tmax): # https://github.com/konqr/ptemcee/blob/master/ptemcee/sampler.py
"""
Returns a ladder of :math:`\beta \equiv 1/T` under a geometric spacing that is determined by the
arguments ``ntemps`` and ``Tmax``. The temperature selection algorithm works as follows:
Ideally, ``Tmax`` should be specified such that the tempered posterior looks like the prior at
this temperature. If using adaptive parallel tempering, per `arXiv:1501.05823
<http://arxiv.org/abs/1501.05823>`_, choosing ``Tmax = inf`` is a safe bet, so long as
``ntemps`` is also specified.
"""
if type(ndim) != int or ndim < 1:
raise ValueError('Invalid number of dimensions specified.')
if ntemps is None and Tmax is None:
raise ValueError('Must specify one of ``ntemps`` and ``Tmax``.')
if Tmax is not None and Tmax <= 1:
raise ValueError('``Tmax`` must be greater than 1.')
if ntemps is not None and (type(ntemps) != int or ntemps < 1):
raise ValueError('Invalid number of temperatures specified.')
tstep = np.array([25.2741, 7., 4.47502, 3.5236, 3.0232,
2.71225, 2.49879, 2.34226, 2.22198, 2.12628,
2.04807, 1.98276, 1.92728, 1.87946, 1.83774,
1.80096, 1.76826, 1.73895, 1.7125, 1.68849,
1.66657, 1.64647, 1.62795, 1.61083, 1.59494,
1.58014, 1.56632, 1.55338, 1.54123, 1.5298,
1.51901, 1.50881, 1.49916, 1.49, 1.4813,
1.47302, 1.46512, 1.45759, 1.45039, 1.4435,
1.4369, 1.43056, 1.42448, 1.41864, 1.41302,
1.40761, 1.40239, 1.39736, 1.3925, 1.38781,
1.38327, 1.37888, 1.37463, 1.37051, 1.36652,
1.36265, 1.35889, 1.35524, 1.3517, 1.34825,
1.3449, 1.34164, 1.33847, 1.33538, 1.33236,
1.32943, 1.32656, 1.32377, 1.32104, 1.31838,
1.31578, 1.31325, 1.31076, 1.30834, 1.30596,
1.30364, 1.30137, 1.29915, 1.29697, 1.29484,
1.29275, 1.29071, 1.2887, 1.28673, 1.2848,
1.28291, 1.28106, 1.27923, 1.27745, 1.27569,
1.27397, 1.27227, 1.27061, 1.26898, 1.26737,
1.26579, 1.26424, 1.26271, 1.26121,
1.25973])
if ndim > tstep.shape[0]:
# An approximation to the temperature step at large
# dimension
tstep = 1.0 + 2.0 * np.sqrt(np.log(4.0)) / np.sqrt(ndim)
else:
tstep = tstep[ndim - 1]
appendInf = False
if Tmax == np.inf:
appendInf = True
Tmax = None
ntemps = ntemps - 1
if ntemps is not None:
if Tmax is None:
# Determine Tmax from ntemps.
Tmax = tstep ** (ntemps - 1)
else:
if Tmax is None:
raise ValueError('Must specify at least one of ``ntemps'' and '
'finite ``Tmax``.')
# Determine ntemps from Tmax.
ntemps = int(np.log(Tmax) / np.log(tstep) + 2)
betas = np.logspace(0, -np.log10(Tmax), ntemps)
if appendInf:
# Use a geometric spacing, but replace the top-most temperature with
# infinity.
betas = np.concatenate((betas, [0]))
return betas
def assign_temperatures(self):
if self.geometric == True:
betas = self.default_beta_ladder(2, ntemps=self.num_chains, Tmax=self.maxtemp)
for i in range(0, self.num_chains):
self.temperatures.append(np.inf if betas[i] == 0 else 1.0 / betas[i])
# print (self.temperatures[i])
else:
tmpr_rate = (self.maxtemp / self.num_chains)
temp = 1
for i in range(0, self.num_chains):
self.temperatures.append(temp)
temp += tmpr_rate
# print(self.temperatures[i])
def initialize_chains(self, burn_in):
self.burn_in = burn_in
self.assign_temperatures()
self.minlim_param = np.repeat([-100], self.num_param) # priors for nn weights
self.maxlim_param = np.repeat([100], self.num_param)
for i in range(0, self.num_chains):
w = np.random.randn(self.num_param)
w = self.rnn.dictfromlist(w)
self.chains.append(
ptReplica(self.use_langevin_gradients, self.learn_rate, w, self.minlim_param, self.maxlim_param,
self.NumSamples, self.traindata, self.testdata, self.topology, self.burn_in,
self.temperatures[i], self.swap_interval, self.path, self.parameter_queue[i],
self.wait_chain[i], self.event[i], self.batch_size, self.rnn_net, self.step_size))
def surr_procedure(self, queue):
if queue.empty() is False:
return queue.get()
else:
return
def swap_procedure(self, parameter_queue_1, parameter_queue_2):
# if parameter_queue_2.empty() is False and parameter_queue_1.empty() is False:
param1 = parameter_queue_1.get()
param2 = parameter_queue_2.get()
w1 = param1[0:self.num_param]
eta1 = param1[self.num_param]
lhood1 = param1[self.num_param + 1]
T1 = param1[self.num_param + 2]
w2 = param2[0:self.num_param]
eta2 = param2[self.num_param]
lhood2 = param2[self.num_param + 1]
T2 = param2[self.num_param + 2]
# print('yo')
# SWAPPING PROBABILITIES
try:
swap_proposal = min(1, 0.5 * np.exp(lhood2 - lhood1))
except OverflowError:
swap_proposal = 1
u = np.random.uniform(0, 1)
if u < swap_proposal:
swapped = True
self.total_swap_proposals += 1
self.num_swap += 1
param_temp = param1
param1 = param2
param2 = param_temp
else:
swapped = False
self.total_swap_proposals += 1
return param1, param2, swapped
def run_chains(self):
# only adjacent chains can be swapped therefore, the number of proposals is ONE less num_chains
# swap_proposal = np.ones(self.num_chains-1)
# create parameter holders for paramaters that will be swapped
# replica_param = np.zeros((self.num_chains, self.num_param))
# lhood = np.zeros(self.num_chains)
# Define the starting and ending of MCMC Chains
start = 0
end = self.NumSamples - 1
# number_exchange = np.zeros(self.num_chains)
# filen = open(self.path + '/num_exchange.txt', 'a')
# RUN MCMC CHAINS
for l in range(0, self.num_chains):
self.chains[l].start_chain = start
self.chains[l].end = end
for j in range(0, self.num_chains):
self.wait_chain[j].clear()
self.event[j].clear()
self.chains[j].start()
# SWAP PROCEDURE
swaps_affected_main = 0
total_swaps = 0
for i in range(int(self.NumSamples / self.swap_interval)):
# print(i,int(self.NumSamples/self.swap_interval), 'Counting')
count = 0
for index in range(self.num_chains):
if not self.chains[index].is_alive():
count += 1
self.wait_chain[index].set()
# print(str(self.chains[index].temperature) + " Dead" + str(index))
if count == self.num_chains:
break
# print(count,'Is the Count')
timeout_count = 0
for index in range(0, self.num_chains):
# print("Waiting for chain: {}".format(index+1))
flag = self.wait_chain[index].wait()
if flag:
# print("Signal from chain: {}".format(index+1))
timeout_count += 1
if timeout_count != self.num_chains:
# print("Skipping the Swap!")
continue
# print("Event Occured")
for index in range(0, self.num_chains - 1):
# print('Starting Swap')
swapped = False
param_1, param_2, swapped = self.swap_procedure(self.parameter_queue[index],
self.parameter_queue[index + 1])
self.parameter_queue[index].put(param_1)
self.parameter_queue[index + 1].put(param_2)
if index == 0:
if swapped:
swaps_affected_main += 1
total_swaps += 1
for index in range(self.num_chains):
self.wait_chain[index].clear()
self.event[index].set()
print("Joining Processes")
# JOIN THEM TO MAIN PROCESS
for index in range(0, self.num_chains):
print('Waiting to Join ', index, self.num_chains)
print(self.chains[index].is_alive())
self.chains[index].join()
print(index, 'Chain Joined')
self.chain_queue.join()
# pos_w, fx_train, fx_test, rmse_train, rmse_test, acc_train, acc_test, likelihood_vec, accept_vec, accept = self.show_results()
rmse_train, rmse_test, acc_train, acc_test, apal = self.show_results()
print("NUMBER OF SWAPS = ", self.num_swap)
swap_perc = self.num_swap * 100 / self.total_swap_proposals
# return pos_w, fx_train, fx_test, rmse_train, rmse_test, acc_train, acc_test, likelihood_vec, swap_perc, accept_vec, accept
return rmse_train, rmse_test, acc_train, acc_test, apal, swap_perc
def show_results(self):
burnin = int(self.NumSamples * self.burn_in)
mcmc_samples = int(self.NumSamples * 0.25)
# likelihood_rep = np.zeros((self.num_chains, self.NumSamples - burnin,2)) # index 1 for likelihood posterior and index 0 for Likelihood proposals. Note all likilihood proposals plotted only
# accept_percent = np.zeros((self.num_chains, 1))
# accept_list = np.zeros((self.num_chains, self.NumSamples))
# pos_w = np.zeros((self.num_chains, self.NumSamples - burnin, self.num_param))
# fx_train_all = np.zeros((self.num_chains, self.NumSamples - burnin, len(self.traindata)))
rmse_train = np.zeros((self.num_chains, self.NumSamples))
acc_train = np.zeros((self.num_chains, self.NumSamples))
# fx_test_all = np.zeros((self.num_chains, self.NumSamples - burnin, len(self.testdata)))
rmse_test = np.zeros((self.num_chains, self.NumSamples))
acc_test = np.zeros((self.num_chains, self.NumSamples))
sum_val_array = np.zeros((self.num_chains, self.NumSamples))
weight_ar = np.zeros((self.num_chains, self.NumSamples))
weight_ar1 = np.zeros((self.num_chains, self.NumSamples))
weight_ar2 = np.zeros((self.num_chains, self.NumSamples))
weight_ar3 = np.zeros((self.num_chains, self.NumSamples))
weight_ar4 = np.zeros((self.num_chains, self.NumSamples))
accept_percentage_all_chains = np.zeros(self.num_chains)
for i in range(self.num_chains):
# file_name = self.path + '/posterior/pos_w/' + 'chain_' + str(self.temperatures[i]) + '.txt'
# print(self.path)
# print(file_name)
# dat = np.loadtxt(file_name)
# pos_w[i, :, :] = dat[burnin:, :]
# file_name = self.path + '/posterior/pos_likelihood/' + 'chain_' + str(self.temperatures[i]) + '.txt'
# dat = np.loadtxt(file_name)
# likelihood_rep[i, :] = dat[burnin:]
# file_name = self.path + '/posterior/accept_list/' + 'chain_' + str(self.temperatures[i]) + '.txt'
# dat = np.loadtxt(file_name)
# accept_list[i, :] = dat
file_name = self.path + '/predictions/rmse_test_chain_' + str(self.temperatures[i]) + '.txt'
dat = np.loadtxt(file_name)
rmse_test[i, :] = dat
file_name = self.path + '/predictions/rmse_train_chain_' + str(self.temperatures[i]) + '.txt'
dat = np.loadtxt(file_name)
rmse_train[i, :] = dat
file_name = self.path + '/predictions/acc_test_chain_' + str(self.temperatures[i]) + '.txt'
dat = np.loadtxt(file_name)
acc_test[i, :] = dat
file_name = self.path + '/predictions/acc_train_chain_' + str(self.temperatures[i]) + '.txt'
dat = np.loadtxt(file_name)
acc_train[i, :] = dat
file_name = self.path + '/predictions/sum_value_' + str(self.temperatures[i]) + '.txt'
dat = np.loadtxt(file_name)
sum_val_array[i, :] = dat
file_name = self.path + '/predictions/weight[0]_' + str(self.temperatures[i]) + '.txt'
dat = np.loadtxt(file_name)
weight_ar[i, :] = dat
file_name = self.path + '/predictions/weight[100]_' + str(self.temperatures[i]) + '.txt'
dat = np.loadtxt(file_name)
weight_ar1[i, :] = dat
file_name = self.path + '/predictions/weight[50000]_' + str(self.temperatures[i]) + '.txt'
dat = np.loadtxt(file_name)
weight_ar2[i, :] = dat
file_name = self.path + '/predictions/weight[40000]_' + str(self.temperatures[i]) + '.txt'
dat = np.loadtxt(file_name)
weight_ar3[i, :] = dat
file_name = self.path + '/predictions/weight[60000]_' + str(self.temperatures[i]) + '.txt'
dat = np.loadtxt(file_name)
weight_ar4[i, :] = dat
file_name = self.path + '/predictions/accept_percentage' + str(self.temperatures[i]) + '.txt'
dat = np.loadtxt(file_name)
accept_percentage_all_chains[i] = dat
rmse_train_single_chain_plot = rmse_train[0, :]
rmse_test_single_chain_plot = rmse_test[0, :]
acc_train_single_chain_plot = acc_train[0, :]
acc_test_single_chain_plot = acc_test[0, :]
sum_val_array_single_chain_plot = sum_val_array[0]
path = 'cifar_torch/CNN/graphs'
x2 = np.linspace(0, self.NumSamples, num=self.NumSamples)
plt.plot(x2, sum_val_array_single_chain_plot, label='Sum Value')
plt.legend(loc='upper right')
plt.title("Sum Value Single Chain")
plt.savefig(path + '/sum_value_single_chain.png')
plt.clf()
color = 'tab:red'
plt.plot(x2, acc_train_single_chain_plot, label="Train", color=color)
color = 'tab:blue'
plt.plot(x2, acc_test_single_chain_plot, label="Test", color=color)
plt.xlabel('Samples')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig(path + '/superimposed_acc_single_chain.png')
plt.clf()
color = 'tab:red'
plt.plot(x2, rmse_train_single_chain_plot, label="Train", color=color)
color = 'tab:blue'
plt.plot(x2, rmse_test_single_chain_plot, label="Test", color=color)
plt.xlabel('Samples')
plt.ylabel('RMSE')
plt.legend()
plt.savefig(path + '/superimposed_rmse_single_chain.png')
plt.clf()
"""
fig2, ax7 = plt.subplots()
color = 'tab:red'
ax7.set_xlabel('Samples')
ax7.set_ylabel('Accuracy Train Single Chain', color=color)
ax7.plot(x2, acc_train_single_chain_plot, color=color)
ax7.tick_params(axis='y', labelcolor=color)
ax8 = ax7.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax8.set_ylabel('Accuracy Test Single Chain', color=color) # we already handled the x-label with ax1
ax8.plot(x2, acc_test_single_chain_plot, color=color)
ax8.tick_params(axis='y', labelcolor=color)
fig2.tight_layout() # otherwise the right y-label is slightly clipped
plt.savefig(path + '/superimposed_acc_single_chain.png')
plt.clf()
fig3, ax9 = plt.subplots()
color = 'tab:red'
ax9.set_xlabel('Samples')
ax9.set_ylabel('RMSE Train Single Chain', color=color)
ax9.plot(x2, rmse_train_single_chain_plot, color=color)
ax9.tick_params(axis='y', labelcolor=color)
ax10 = ax9.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax10.set_ylabel('RMSE Test Single Chain', color=color) # we already handled the x-label with ax1
ax10.plot(x2, rmse_test_single_chain_plot, color=color)
ax10.tick_params(axis='y', labelcolor=color)
fig3.tight_layout() # otherwise the right y-label is slightly clipped
plt.savefig(path + '/superimposed_rmse_single_chain.png')
plt.clf()
"""
rmse_train = rmse_train.reshape((self.num_chains * self.NumSamples), 1)
acc_train = acc_train.reshape((self.num_chains * self.NumSamples), 1)
rmse_test = rmse_test.reshape((self.num_chains * self.NumSamples), 1)
acc_test = acc_test.reshape((self.num_chains * self.NumSamples), 1)
sum_val_array = sum_val_array.reshape((self.num_chains * self.NumSamples), 1)
weight_ar = weight_ar.reshape((self.num_chains * self.NumSamples), 1)
weight_ar1 = weight_ar1.reshape((self.num_chains * self.NumSamples), 1)
weight_ar2 = weight_ar2.reshape((self.num_chains * self.NumSamples), 1)
weight_ar3 = weight_ar3.reshape((self.num_chains * self.NumSamples), 1)
weight_ar4 = weight_ar4.reshape((self.num_chains * self.NumSamples), 1)
x = np.linspace(0, int(self.masternumsample - self.masternumsample * self.burni),
num=int(self.masternumsample - self.masternumsample * self.burni))
x1 = np.linspace(0, self.masternumsample, num=self.masternumsample)
plt.plot(x1, weight_ar, label='Weight[0]')
plt.legend(loc='upper right')
plt.title("Weight[0] Trace")
plt.savefig(path + '/weight[0]_samples.png')
plt.clf()
plt.hist(weight_ar, bins=20, color="blue", alpha=0.7)
plt.ylabel('Frequency')
plt.xlabel('Parameter Values')
plt.savefig(path + '/weight[0]_hist.png')
plt.clf()
plt.plot(x1, weight_ar1, label='Weight[100]')
plt.legend(loc='upper right')
plt.title("Weight[100] Trace")
plt.savefig(path + '/weight[100]_samples.png')
plt.clf()
plt.hist(weight_ar1, bins=20, color="blue", alpha=0.7)
plt.ylabel('Frequency')
plt.xlabel('Parameter Values')
plt.savefig(path + '/weight[100]_hist.png')
plt.clf()
plt.plot(x1, weight_ar2, label='Weight[50000]')
plt.legend(loc='upper right')
plt.title("Weight[50000] Trace")
plt.savefig(path + '/weight[50000]_samples.png')
plt.clf()
plt.hist(weight_ar2, bins=20, color="blue", alpha=0.7)
plt.ylabel('Frequency')
plt.xlabel('Parameter Values')
plt.savefig(path + '/weight[50000]_hist.png')
plt.clf()
plt.plot(x1, weight_ar3, label='Weight[40000]')
plt.legend(loc='upper right')
plt.title("Weight[40000] Trace")
plt.savefig(path + '/weight[40000]_samples.png')
plt.clf()
plt.hist(weight_ar3, bins=20, color="blue", alpha=0.7)
plt.ylabel('Frequency')
plt.xlabel('Parameter Values')
plt.savefig(path + '/weight[40000]_hist.png')
plt.clf()
plt.plot(x1, weight_ar4, label='Weight[60000]')
plt.legend(loc='upper right')
plt.title("Weight[60000] Trace")
plt.savefig(path + '/weight[60000]_samples.png')
plt.clf()
plt.hist(weight_ar4, bins=20, color="blue", alpha=0.7)
plt.ylabel('Frequency')
plt.xlabel('Parameter Values')
plt.savefig(path + '/weight[60000]_hist.png')
plt.clf()
plt.plot(x1, sum_val_array, label='Sum_Value')
plt.legend(loc='upper right')
plt.title("Sum Value Over Samples")
plt.savefig(path + '/sum_value_samples.png')
plt.clf()
# plt.plot(x, acc_train, label='Train')
# plt.legend(loc='upper right')
# plt.title("Accuracy Train Values Over Samples")
# plt.savefig('cifar_torch_single_chain' + '/accuracy_samples.png')
# plt.clf()
color = 'tab:red'
plt.plot(x1, acc_train, label="Train", color=color)
color = 'tab:blue'
plt.plot(x1, acc_test, label="Test", color=color)
plt.xlabel('Samples')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig(path + '/superimposed_acc.png')
plt.clf()
color = 'tab:red'
plt.plot(x1, rmse_train, label="Train", color=color)
color = 'tab:blue'
plt.plot(x1, rmse_test, label="Test", color=color)
plt.xlabel('Samples')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig(path + '/superimposed_rmse.png')
plt.clf()
"""
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('Samples')
ax1.set_ylabel('Accuracy Train', color=color)
ax1.plot(x1, acc_train, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('Accuracy Test', color=color) # we already handled the x-label with ax1
ax2.plot(x1, acc_test, color=color)
ax2.tick_params(axis='y', labelcolor=color)
# ax3=ax1.twinx()
# color = 'tab:green'
# ax3.set_ylabel('Accuracy Test', color=color) # we already handled the x-label with ax1
# ax3.plot(x, acc_test, color=color)
# ax3.tick_params(axis='y', labelcolor=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.savefig(path + '/superimposed_acc.png')
plt.clf()
fig1, ax4 = plt.subplots()
color = 'tab:red'
ax4.set_xlabel('Samples')
ax4.set_ylabel('RMSE Train', color=color)
ax4.plot(x1, rmse_train, color=color)
ax4.tick_params(axis='y', labelcolor=color)
ax5 = ax4.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax5.set_ylabel('RMSE Test', color=color) # we already handled the x-label with ax1
ax5.plot(x1, rmse_test, color=color)
ax5.tick_params(axis='y', labelcolor=color)
# ax6 = ax4.twinx()
# color = 'tab:green'
# ax6.set_ylabel('RMSE Test', color=color) # we already handled the x-label with ax1
# ax6.plot(x, rmse_test, color=color)
# ax6.tick_params(axis='y', labelcolor=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.savefig(path + '/superimposed_rmse.png')
plt.clf()
"""
'''rmse_train = rmse_train.reshape(self.num_chains*(mcmc_samples), 1)
acc_train = acc_train.reshape(self.num_chains*(mcmc_samples), 1)
rmse_test = rmse_test.reshape(self.num_chains*(mcmc_samples), 1)
acc_test = acc_test.reshape(self.num_chains*(mcmc_samples), 1)
rmse_train = np.append(rmse_train, chain1_rmsetrain)
rmse_test = np.append(rmse_test, chain1_rmsetest)
acc_train = np.append(acc_train, chain1_acctrain)
acc_test = np.append(acc_test, chain1_acctest) '''
# accept_vec = accept_list
# accept = np.sum(accept_percent) / self.num_chains
# np.savetxt(self.path + '/pos_param.txt', posterior.T) # tcoment to save space
# np.savetxt(self.path + '/likelihood.txt', likelihood_vec.T, fmt='%1.5f')
# np.savetxt(self.path + '/accept_list.txt', accept_list, fmt='%1.2f')
# np.savetxt(self.path + '/acceptpercent.txt', [accept], fmt='%1.2f')
# return posterior, fx_train_all, fx_test_all, rmse_train, rmse_test, acc_train, acc_test, likelihood_vec.T, accept_vec, accept
return rmse_train, rmse_test, acc_train, acc_test, accept_percentage_all_chains
def make_directory(self, directory):
if not os.path.exists(directory):
os.makedirs(directory)
def main():
topology = [input_size, hidden_size, num_classes]
net1 = 'CNN'
numSamples = args.samples
batch_size = batch_Size
num_chains = args.num_chains
swap_ratio = args.swap_ratio
burn_in = args.burn_in
learning_rate = args.learning_rate
step_size = args.step_size
maxtemp = 2
use_langevin_gradients = True # False leaves it as Random-walk proposals. Note that Langevin gradients will take a bit more time computationally
bi = burn_in
swap_interval = int(
swap_ratio * numSamples / num_chains) # int(swap_ratio * (NumSample/num_chains)) #how ofen you swap neighbours. note if swap is more than Num_samples, its off
# learn_rate = 0.01 # in case langevin gradients are used. Can select other values, we found small value is ok.
problemfolder = 'cifar_torch/' + net1 # change this to your directory for results output - produces large datasets
name = ""
filename = ""
if not os.path.exists(problemfolder + name):
os.makedirs(problemfolder + name)
path = (problemfolder + name)
timer = time.time()
pt = ParallelTempering(use_langevin_gradients, learning_rate, topology, num_chains, maxtemp, numSamples,
swap_interval, path, batch_size, bi, net1, step_size)
directories = [path + '/predictions/', path + '/graphs/']
for d in directories:
pt.make_directory((filename) + d)
pt.initialize_chains(burn_in)
# pos_w, fx_train, fx_test, rmse_train, rmse_test, acc_train, acc_test, likelihood_rep, swap_perc, accept_vec, accept = pt.run_chains()
rmse_train, rmse_test, acc_train, acc_test, accept_percent_all, sp = pt.run_chains()
timer2 = time.time()
# list_end = accept_vec.shape[1]
# accept_ratio = accept_vec[:, list_end-1:list_end]/list_end
# accept_per = np.mean(accept_ratio) * 100
# print(accept_per, ' accept_per')
timetotal = (timer2 - timer) / 60
"""
# #PLOTS
acc_tr = np.mean(acc_train [:])
acctr_std = np.std(acc_train[:])
acctr_max = np.amax(acc_train[:])
acc_tes = np.mean(acc_test[:])
acctest_std = np.std(acc_test[:])
acctes_max = np.amax(acc_test[:])
rmse_tr = np.mean(rmse_train[:])
rmsetr_std = np.std(rmse_train[:])
rmsetr_max = np.amax(acc_train[:])
rmse_tes = np.mean(rmse_test[:])
rmsetest_std = np.std(rmse_test[:])
rmsetes_max = np.amax(rmse_test[:])
"""
burnin = burn_in
acc_tr = np.mean(acc_train[int(numSamples * burnin):])
acctr_std = np.std(acc_train[int(numSamples * burnin):])
acctr_max = np.amax(acc_train[int(numSamples * burnin):])
acc_tes = np.mean(acc_test[int(numSamples * burnin):])
acctest_std = np.std(acc_test[int(numSamples * burnin):])
acctes_max = np.amax(acc_test[int(numSamples * burnin):])
rmse_tr = np.mean(rmse_train[int(numSamples * burnin):])
rmsetr_std = np.std(rmse_train[int(numSamples * burnin):])
rmsetr_max = np.amax(rmse_train[int(numSamples * burnin):])
rmse_tes = np.mean(rmse_test[int(numSamples * burnin):])
rmsetest_std = np.std(rmse_test[int(numSamples * burnin):])
rmsetes_max = np.amax(rmse_test[int(numSamples * burnin):])
accept_percent_mean = np.mean(accept_percent_all)
# outres = open(path+'/result.txt', "a+")
# outres_db = open(path_db+'/result.txt', "a+")
# resultingfile = open(problemfolder+'/master_result_file.txt','a+')
# resultingfile_db = open( problemfolder_db+'/master_result_file.txt','a+')
# xv = name+'_'+ str(run_nb)
print("\n\n\n\n")
print("Train Acc (Mean, Max, Std)")
print(acc_tr, acctr_max, acctr_std)
print("\n")
print("Test Acc (Mean, Max, Std)")
print(acc_tes, acctes_max, acctest_std)
print("\n")
print("Train RMSE (Mean, Max, Std)")
print(rmse_tr, rmsetr_max, rmsetr_std)
print("\n")
print("Test RMSE (Mean, Max, Std)")
print(rmse_tes, rmsetes_max, rmsetest_std)
print("\n")
print("Acceptance Percentage Mean")
print(accept_percent_mean)
print("\n")
print("Swap Percentage")
print(sp)
print("\n")
print("Time (Minutes)")
print(timetotal)
if __name__ == "__main__": main()
| [
"[email protected]"
] | |
2d948670ff31eb0aacdf2b5fada3761978d73a49 | 02fc83e331257882b7701d347fe30bbc934fbc1e | /tk.py | fac1768ec8e270cdbf54c7e0185ff605fc4baaec | [
"Unlicense"
] | permissive | tankle/GoodTranslate | 991dc71190a1e909f48a91799ab5b141b46de8d8 | 5fec6791e6492cb1c181e4f866e1e0fb45e7ab29 | refs/heads/master | 2021-01-18T20:32:52.755140 | 2016-05-12T03:04:16 | 2016-05-12T03:04:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,205 | py | import re
import sys
import ctypes
try:
import urllib2 as request
except:
from urllib import request
def get_d1():
req = request.Request(url='http://translate.google.com/', headers={'User-Agent': 'Mozilla/5.0'})
t = request.urlopen(req).read().decode('utf8')
a, b, h = re.search(r"TKK=eval\(\'\(\(function\(\){var a\\x3d(\-?\d+);var b\\x3d(\-?\d+);return (\d+)", t).groups()
return int(h), ctypes.c_int32(int(a) + int(b)).value
b, d1 = get_d1()
def RL(a, b):
for c in range(0, len(b)-2, 3):
d = b[c+2]
d = ord(d) - 87 if d >= 'a' else int(d)
xa = ctypes.c_uint32(a).value
d = xa >> d if b[c+1] == '+' else xa << d
a = a + d & 4294967295 if b[c] == '+' else a ^ d
return ctypes.c_int32(a).value
def calc_tk(a):
if sys.version_info >= (3,):
d = a.encode('utf-8')
else:
d = map(ord, a)
a = b
for di in d:
a = RL(a + di, "+-a^+6")
a = RL(a, "+-3^+b+-f")
a = ctypes.c_int32(a ^ d1).value
a = a if a >= 0 else ((a & 2147483647) + 2147483648)
a %= pow(10, 6)
return '%d.%d' % (a, a ^ b)
if __name__ == '__main__':
text = ' '.join(sys.argv[1:])
print(calc_tk(text))
| [
"[email protected]"
] | |
db0528144723f98ed5d16e50c2774c3f5c79d4e0 | c3fc7865f163a43fbd3e832547005cf3c89547c5 | /bin/oldScripts/JobCommands/last_rnn_out_experiment_1_39.py | 18252ea2007256ee849d4654ef3580b69f088c4d | [] | no_license | ahmetrifaioglu/PyTorch | 936d5317ca90a147e147241cbf75ae0864477f13 | fff02012eb5f05cdd634b5128f8bdceb9ed3b2a7 | refs/heads/master | 2021-10-24T19:27:53.495478 | 2021-10-12T22:05:08 | 2021-10-12T22:05:08 | 165,693,778 | 0 | 0 | null | 2021-10-12T22:04:20 | 2019-01-14T16:24:32 | Jupyter Notebook | UTF-8 | Python | false | false | 8,444 | py | import subprocess
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 256 3 1024 1024 0.0001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 256 3 1024 1024 0.005 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 256 3 1024 1024 0.001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 256 3 512 512 0.0001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 256 3 512 512 0.005 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 256 3 512 512 0.001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 256 3 256 256 0.0001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 256 3 256 256 0.005 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 256 3 256 256 0.001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 512 2 1024 1024 0.0001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 512 2 1024 1024 0.005 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 512 2 1024 1024 0.001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 512 2 512 512 0.0001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 512 2 512 512 0.005 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 512 2 512 512 0.001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 512 2 256 256 0.0001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 512 2 256 256 0.005 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 512 2 256 256 0.001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 512 3 1024 1024 0.0001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 512 3 1024 1024 0.005 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 512 3 1024 1024 0.001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 512 3 512 512 0.0001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 512 3 512 512 0.005 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 512 3 512 512 0.001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 512 3 256 256 0.0001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 512 3 256 256 0.005 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 512 3 256 256 0.001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 1024 2 1024 1024 0.0001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 1024 2 1024 1024 0.005 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 1024 2 1024 1024 0.001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 1024 2 512 512 0.0001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 1024 2 512 512 0.005 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 1024 2 512 512 0.001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 1024 2 256 256 0.0001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 1024 2 256 256 0.005 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 1024 2 256 256 0.001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 1024 3 1024 1024 0.0001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 1024 3 1024 1024 0.005 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 1024 3 1024 1024 0.001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 1024 3 512 512 0.0001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 1024 3 512 512 0.005 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 1024 3 512 512 0.001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 1024 3 256 256 0.0001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 1024 3 256 256 0.005 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 512_256 8000 512 400 1024 3 256 256 0.001 idg_comp_targ_uniq_inter_filtered.csv r 32", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 1024_512 8000 128 100 128 2 1024 1024 0.0001 idg_comp_targ_uniq_inter_filtered.csv r 64", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 1024_512 8000 128 100 128 2 1024 1024 0.005 idg_comp_targ_uniq_inter_filtered.csv r 64", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 1024_512 8000 128 100 128 2 1024 1024 0.001 idg_comp_targ_uniq_inter_filtered.csv r 64", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 1024_512 8000 128 100 128 2 512 512 0.0001 idg_comp_targ_uniq_inter_filtered.csv r 64", shell=True)
subprocess.call("python rnn_playground.py ecfp4 trigramencodings1000 1024_512 8000 128 100 128 2 512 512 0.005 idg_comp_targ_uniq_inter_filtered.csv r 64", shell=True)
| [
"[email protected]"
] | |
3823899a91f91a13241e70151dc9f4af5ae1e358 | fab7b6e422b74424fb59398635f74faca9ff5a58 | /waimak_extended_boundry/model_and_NSMC_build/m_packages/wel_packages.py | e91f9669c683d6190f7bed5ee1833856e694ae25 | [] | no_license | hansonmcoombs/Waimakariri-Model-Ashley-to-Selwyn | c7a56a2ebd0d421c9679cb4a16ae319dfb2041b1 | c96c2663b010975ec08d42840fbc7970f3c2b085 | refs/heads/master | 2023-05-29T10:57:33.916912 | 2020-04-23T21:32:21 | 2020-04-23T21:32:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,230 | py | """
Author: matth
Date Created: 20/06/2017 11:57 AM
"""
from __future__ import division
import flopy_mh as flopy
from Waimak_modeling_non_extended.model_tools.well_values import get_race_data, get_nwai_wells
from waimak_extended_boundry import smt
import pandas as pd
import numpy as np
from env import sdp_required
#from core.ecan_io import rd_sql, sql_db
import os
import pickle
import geopandas as gpd
from copy import deepcopy
from warnings import warn
def create_wel_package(m, wel_version):
"""
create and add the well package
:param m: a flopy model instance
:param wel_version: which version of wells to use
:return:
"""
wel = flopy.modflow.mfwel.ModflowWel(m,
ipakcb=740,
stress_period_data={
0: smt.convert_well_data_to_stresspd(get_wel_spd(wel_version))},
options=['AUX IFACE'], # next time don't include this unless I use it
unitnumber=709)
def get_wel_spd(version, recalc=False):
"""
get the well data
:param version: which well version to use
:param recalc: boolean whether to recalc (True) or load from pickle if avalible
:return: pd.dataframe
"""
if version == 1:
outdata = _get_wel_spd_v1(recalc, sub_version=1)
elif version == 0:
outdata = _get_wel_spd_v1(recalc, sub_version=0)
elif version == 3:
outdata = _get_wel_spd_v3(recalc)
else:
raise ValueError('unexpected version: {}'.format(version))
return outdata
def _get_wel_spd_v1(recalc=False, sub_version=1):
"""
version 1, which is deprecated but uses use data and other estimates for the waimakariri zone
:param recalc: boolean whether to recalc (True) or load from pickle if avalible
:param sub_version: passed to get swai wells
:return: pd.DataFrame
"""
warn('v1 wells are depreciated in the newest itteration of the model')
pickle_path = '{}/well_spd.p'.format(smt.pickle_dir)
if os.path.exists(pickle_path) and not recalc and sub_version != 0:
well_data = pickle.load(open(pickle_path))
return well_data
races = get_race_data()
elv_db = smt.calc_elv_db()
for site in races.index:
races.loc[site, 'row'], races.loc[site, 'col'] = smt.convert_coords_to_matix(races.loc[site, 'x'],
races.loc[site, 'y'])
races['zone'] = 'n_wai'
races = races.set_index('well')
n_wai_wells = get_nwai_wells()
for site in n_wai_wells.index:
x, y, z = n_wai_wells.loc[site, ['x', 'y', 'z']]
temp = smt.convert_coords_to_matix(x, y, z, elv_db=elv_db)
n_wai_wells.loc[site, 'layer'], n_wai_wells.loc[site, 'row'], n_wai_wells.loc[site, 'col'] = temp
n_wai_wells['zone'] = 'n_wai'
n_wai_wells['cwms'] = 'waimak'
n_wai_wells = n_wai_wells.set_index('well')
s_wai_wells = _get_s_wai_wells(
sub_version) # there are some s_wai wells which do not have data in wells, but do in consents file fix if bored
temp = smt.get_well_postions(np.array(s_wai_wells.index), one_val_per_well=True, raise_exct=False)
s_wai_wells['layer'], s_wai_wells['row'], s_wai_wells['col'] = temp
no_flow = smt.get_no_flow()
for i in s_wai_wells.index:
layer, row, col = s_wai_wells.loc[i, ['layer', 'row', 'col']]
if any(pd.isnull([layer, row, col])):
continue
if no_flow[layer, row, col] == 0: # get rid of non-active wells
s_wai_wells.loc[i, 'layer'] = np.nan
s_wai_wells = s_wai_wells.dropna(subset=['layer', 'row', 'col'])
s_wai_rivers = _get_s_wai_rivers().set_index('well')
all_wells = pd.concat((races, n_wai_wells, s_wai_wells, s_wai_rivers))
for i in all_wells.index:
row, col = all_wells.loc[i, ['row', 'col']]
x, y = smt.convert_matrix_to_coords(row, col)
all_wells.loc[i, 'mx'] = x
all_wells.loc[i, 'my'] = y
# check wells in correct aquifer
aq_to_layer = {'Avonside Formation': 0,
'Springston Formation': 0,
'Christchurch Formation': 0,
'Riccarton Gravel': 1,
'Bromley Formation': 2,
'Linwood Gravel': 3,
'Heathcote Formation': 4,
'Burwood Gravel': 5,
'Shirley Formation': 6,
'Wainoni Gravel': 7}
leapfrog_aq = gpd.read_file("{}/m_ex_bd_inputs/shp/layering/gis_aq_name_clipped.shp".format(smt.sdp))
leapfrog_aq = leapfrog_aq.set_index('well')
leapfrog_aq.loc[:, 'use_aq_name'] = leapfrog_aq.loc[:, 'aq_name']
leapfrog_aq.loc[leapfrog_aq.use_aq_name.isnull(), 'use_aq_name'] = leapfrog_aq.loc[
leapfrog_aq.use_aq_name.isnull(), 'aq_name_gi']
for well in all_wells.index:
try:
all_wells.loc[well, 'aquifer_in_confined'] = aq = leapfrog_aq.loc[well, 'use_aq_name']
all_wells.loc[well, 'layer_by_aq'] = aq_to_layer[aq]
except KeyError:
pass
all_wells.loc[:, 'layer_by_depth'] = all_wells.loc[:, 'layer']
all_wells.loc[all_wells.layer_by_aq.notnull(), 'layer'] = all_wells.loc[
all_wells.layer_by_aq.notnull(), 'layer_by_aq']
# move wells that fall on other boundry conditions north of waimak (or in constant head)
overlap = gpd.read_file("{}/m_ex_bd_inputs/shp/overlap_adjustment2.shp".format(smt.sdp))
overlap = overlap.set_index('index')
for well in all_wells.index:
if not well in overlap.index:
continue
all_wells.loc[well, 'layer'] += overlap.loc[well, 'add_k']
all_wells.loc[well, 'row'] += overlap.loc[well, 'add_row']
all_wells.loc[well, 'col'] += overlap.loc[well, 'add_col']
overlap = gpd.read_file("{}/m_ex_bd_inputs/shp/overlap_adjustment2part2.shp".format(smt.sdp))
overlap = overlap.set_index('Field1')
for well in all_wells.index:
if not well in overlap.index:
continue
all_wells.loc[well, 'layer'] += overlap.loc[well, 'add_layer']
all_wells.loc[well, 'row'] += overlap.loc[well, 'add_row']
all_wells.loc[well, 'col'] += overlap.loc[well, 'add_col']
# add little rakaia flux which will be parameterized via pest in two groups upper flux is north of SH1, lower is coastal of SH1
temp = smt.model_where(np.isfinite(
smt.shape_file_to_model_array("{}/m_ex_bd_inputs/shp/little_rakaia_boundry_wells.shp".format(smt.sdp),
'Id', True)))
all_llrf = pd.DataFrame(columns=all_wells.keys())
for i in range(smt.layers):
llrf = pd.DataFrame(index=['llrz_flux{:04d}'.format(e) for e in range(i * len(temp), (i + 1) * len(temp))],
columns=all_wells.keys())
llrf.loc[:, 'row'] = np.array(temp)[:, 0]
llrf.loc[:, 'col'] = np.array(temp)[:, 1]
llrf.loc[:, 'layer'] = i
llrf.loc[:, 'flux'] = -9999999 # identifier flux, parameterised in pest
llrf.loc[:, 'type'] = 'llr_boundry_flux'
llrf.loc[:, 'zone'] = 's_wai'
all_llrf = pd.concat((all_llrf, llrf))
up_temp = smt.model_where(
np.isfinite(smt.shape_file_to_model_array("{}/m_ex_bd_inputs/shp/upper_lRZF.shp".format(smt.sdp),
'Id', True)))
all_ulrf = pd.DataFrame(columns=all_wells.keys())
for i in range(smt.layers):
ulrf = pd.DataFrame(
index=['ulrz_flux{:04d}'.format(e) for e in range(i * len(up_temp), (i + 1) * len(up_temp))],
columns=all_wells.keys())
ulrf.loc[:, 'row'] = np.array(up_temp)[:, 0]
ulrf.loc[:, 'col'] = np.array(up_temp)[:, 1]
ulrf.loc[:, 'layer'] = i
ulrf.loc[:, 'flux'] = -8888888 # identifier flux, parameterised in pest
ulrf.loc[:, 'type'] = 'ulr_boundry_flux'
ulrf.loc[:, 'zone'] = 's_wai'
all_ulrf = pd.concat((all_ulrf, ulrf))
swai_races = get_s_wai_races()
all_wells = pd.concat((all_wells, swai_races, all_llrf, all_ulrf))
all_wells = all_wells.loc[~((all_wells.duplicated(subset=['row', 'col', 'layer'], keep=False)) &
(all_wells.type.str.contains('lr_boundry_flux')))]
all_wells = add_use_type(all_wells) # any well that has irrigation/stockwater in it's uses is considered irrigation
if sub_version != 0:
pickle.dump(all_wells, open(pickle_path, 'w'))
return all_wells
def _get_wel_spd_v3(recalc=False, sub_version=1):
"""
all wells derived from mikes usage estimates I may pull down some of the WDC WS wells this was used in teh model as
of 20/10/2017
:param recalc: boolean whether to recalc (True) or load from pickle if avalible
:param sub_version: passed to get all wells
:return: pd.DataFrame
"""
pickle_path = '{}/well_spd_v3.p'.format(smt.pickle_dir)
if os.path.exists(pickle_path) and not recalc and sub_version != 0:
well_data = pickle.load(open(pickle_path))
return well_data
races = get_race_data()
elv_db = smt.calc_elv_db()
for site in races.index:
races.loc[site, 'row'], races.loc[site, 'col'] = smt.convert_coords_to_matix(races.loc[site, 'x'],
races.loc[site, 'y'])
races['zone'] = 'n_wai'
races = races.set_index('well')
all_wai_wells = _get_all_wai_wells()
temp = smt.get_well_postions(np.array(all_wai_wells.index), one_val_per_well=True, raise_exct=False)
all_wai_wells['layer'], all_wai_wells['row'], all_wai_wells['col'] = temp
no_flow = smt.get_no_flow()
for i in all_wai_wells.index:
layer, row, col = all_wai_wells.loc[i, ['layer', 'row', 'col']]
if any(pd.isnull([layer, row, col])):
continue
if no_flow[layer, row, col] == 0: # get rid of non-active wells
all_wai_wells.loc[i, 'layer'] = np.nan
all_wai_wells = all_wai_wells.dropna(subset=['layer', 'row', 'col'])
s_wai_rivers = _get_s_wai_rivers().set_index('well')
all_wells = pd.concat((races, all_wai_wells, s_wai_rivers))
for i in all_wells.index:
row, col = all_wells.loc[i, ['row', 'col']]
x, y = smt.convert_matrix_to_coords(row, col)
all_wells.loc[i, 'mx'] = x
all_wells.loc[i, 'my'] = y
# check wells in correct aquifer
aq_to_layer = {'Avonside Formation': 0,
'Springston Formation': 0,
'Christchurch Formation': 0,
'Riccarton Gravel': 1,
'Bromley Formation': 2,
'Linwood Gravel': 3,
'Heathcote Formation': 4,
'Burwood Gravel': 5,
'Shirley Formation': 6,
'Wainoni Gravel': 7}
leapfrog_aq = gpd.read_file("{}/m_ex_bd_inputs/shp/layering/gis_aq_name_clipped.shp".format(smt.sdp))
leapfrog_aq = leapfrog_aq.set_index('well')
leapfrog_aq.loc[:, 'use_aq_name'] = leapfrog_aq.loc[:, 'aq_name']
leapfrog_aq.loc[leapfrog_aq.use_aq_name.isnull(), 'use_aq_name'] = leapfrog_aq.loc[
leapfrog_aq.use_aq_name.isnull(), 'aq_name_gi']
for well in all_wells.index:
try:
all_wells.loc[well, 'aquifer_in_confined'] = aq = leapfrog_aq.loc[well, 'use_aq_name']
all_wells.loc[well, 'layer_by_aq'] = aq_to_layer[aq]
except KeyError:
pass
all_wells.loc[:, 'layer_by_depth'] = all_wells.loc[:, 'layer']
all_wells.loc[all_wells.layer_by_aq.notnull(), 'layer'] = all_wells.loc[
all_wells.layer_by_aq.notnull(), 'layer_by_aq']
# move wells that fall on other boundry conditions north of waimak (or in constant head)
overlap = gpd.read_file("{}/m_ex_bd_inputs/shp/overlap_adjustment2.shp".format(smt.sdp))
overlap = overlap.set_index('index')
for well in all_wells.index:
if not well in overlap.index:
continue
all_wells.loc[well, 'layer'] += overlap.loc[well, 'add_k']
all_wells.loc[well, 'row'] += overlap.loc[well, 'add_row']
all_wells.loc[well, 'col'] += overlap.loc[well, 'add_col']
overlap = gpd.read_file("{}/m_ex_bd_inputs/shp/overlap_adjustment2part2.shp".format(smt.sdp))
overlap = overlap.set_index('Field1')
for well in all_wells.index:
if not well in overlap.index:
continue
all_wells.loc[well, 'layer'] += overlap.loc[well, 'add_layer']
all_wells.loc[well, 'row'] += overlap.loc[well, 'add_row']
all_wells.loc[well, 'col'] += overlap.loc[well, 'add_col']
overlap = gpd.read_file("{}/m_ex_bd_inputs/shp/overlap_adjustmentpart3.shp".format(smt.sdp))
overlap = overlap.set_index('Field1')
for well in all_wells.index:
if not well in overlap.index:
continue
all_wells.loc[well, 'layer'] += overlap.loc[well, 'add_layer']
all_wells.loc[well, 'row'] += overlap.loc[well, 'add_row']
all_wells.loc[well, 'col'] += overlap.loc[well, 'add_col']
# note there are some overlaps remaining, but it's probably not a huge problem most are races
# add little rakaia flux which will be parameterized via pest in two groups upper flux is north of SH1, lower is coastal of SH1
temp = smt.model_where(np.isfinite(
smt.shape_file_to_model_array("{}/m_ex_bd_inputs/shp/little_rakaia_boundry_wells.shp".format(smt.sdp),
'Id', True)))
all_llrf = pd.DataFrame(columns=all_wells.keys())
for i in range(smt.layers):
llrf = pd.DataFrame(index=['llrz_flux{:04d}'.format(e) for e in range(i * len(temp), (i + 1) * len(temp))],
columns=all_wells.keys())
llrf.loc[:, 'row'] = np.array(temp)[:, 0]
llrf.loc[:, 'col'] = np.array(temp)[:, 1]
llrf.loc[:, 'layer'] = i
llrf.loc[:, 'flux'] = -9999999 # identifier flux, parameterised in pest
llrf.loc[:, 'type'] = 'llr_boundry_flux'
llrf.loc[:, 'zone'] = 's_wai'
all_llrf = pd.concat((all_llrf, llrf))
up_temp = smt.model_where(
np.isfinite(smt.shape_file_to_model_array("{}/m_ex_bd_inputs/shp/upper_lRZF.shp".format(smt.sdp),
'Id', True)))
all_ulrf = pd.DataFrame(columns=all_wells.keys())
for i in range(smt.layers):
ulrf = pd.DataFrame(
index=['ulrz_flux{:04d}'.format(e) for e in range(i * len(up_temp), (i + 1) * len(up_temp))],
columns=all_wells.keys())
ulrf.loc[:, 'row'] = np.array(up_temp)[:, 0]
ulrf.loc[:, 'col'] = np.array(up_temp)[:, 1]
ulrf.loc[:, 'layer'] = i
ulrf.loc[:, 'flux'] = -8888888 # identifier flux, parameterised in pest
ulrf.loc[:, 'type'] = 'ulr_boundry_flux'
ulrf.loc[:, 'zone'] = 's_wai'
all_ulrf = pd.concat((all_ulrf, ulrf))
swai_races = get_s_wai_races()
all_wells = pd.concat((all_wells, swai_races, all_llrf, all_ulrf))
all_wells = all_wells.loc[~((all_wells.duplicated(subset=['row', 'col', 'layer'], keep=False)) &
(all_wells.type.str.contains('lr_boundry_flux')))]
all_wells = add_use_type(all_wells) # any well that has irrigation/stockwater in it's uses is considered irrigation
if sub_version != 0:
pickle.dump(all_wells, open(pickle_path, 'w'))
return all_wells
def _get_2014_2015_waimak_usage():
"""
get the 2014-2015 usage for the waimakariri zone
:return: pd.DataFrame
"""
mike = pd.read_hdf("{}/m_ex_bd_inputs/sd_est_all_mon_vol.h5".format(smt.sdp))
mike = mike.loc[(mike.time >= pd.datetime(2014, 7, 1)) & (mike.take_type == 'Take Groundwater')]
mike.loc[:, 'd_in_m'] = mike.time.dt.daysinmonth
data = mike.groupby('wap').aggregate(
{'usage_est': np.sum, 'crc': ','.join, 'd_in_m': np.sum, 'mon_allo_m3': np.sum})
data.loc[:, 'flux'] = data.loc[:, 'usage_est'] / (mike.time.max() - pd.datetime(2014, 6, 30)).days
data.loc[:, 'cav_flux'] = data.loc[:, 'mon_allo_m3'] / (mike.time.max() - pd.datetime(2014, 6, 30)).days
well_details = rd_sql(**sql_db.wells_db.well_details)
well_details = well_details.set_index('WELL_NO')
out_data = pd.merge(data, pd.DataFrame(well_details.loc[:, 'WMCRZone']), left_index=True, right_index=True)
out_data = out_data.loc[np.in1d(out_data.WMCRZone, [4])]
out_data.loc[:, 'cwms'] = out_data.loc[:, 'WMCRZone'].replace({7: 'chch', 8: 'selwyn', 4: 'waimak'})
out_data = out_data.drop('WMCRZone', axis=1)
out_data['type'] = 'well'
out_data = add_use_type(out_data)
idx = (out_data.cwms == 'waimak') & (out_data.use_type == 'other')
out_data.loc[idx, 'flux'] = out_data.loc[
idx, 'cav_flux'] * 0.25 # this comes from average of the WDC CAV vs usage made before my time I also confirmed with colin as WDC that this is about right
idx = out_data.flux > out_data.cav_flux
out_data.loc[idx, 'flux'] = out_data.loc[idx, 'cav_flux']
out_data.loc[:, 'flux'] *= -1
out_data['consent'] = [tuple(e.split(',')) for e in out_data.loc[:, 'crc']]
out_data = out_data.drop('crc', axis=1)
out_data = out_data.dropna()
out_data['zone'] = 'n_wai'
out_data.index.names = ['well']
return out_data
def _get_wel_spd_v2(recalc=False, sub_version=1):
"""
as version 1 but uses the 2014-2015 usage for the waimakariri zone used for the forward runs
:param recalc: boolean whether to recalc (True) or load from pickle if avalible
:param sub_version: passed to get s wai wells
:return: pd.Dataframe
"""
warn('v2 pumping is for 2014 to 2015 period')
pickle_path = '{}/well_spd_v2.p'.format(smt.pickle_dir)
if os.path.exists(pickle_path) and not recalc and sub_version != 0:
well_data = pickle.load(open(pickle_path))
return well_data
races = get_race_data()
elv_db = smt.calc_elv_db()
for site in races.index:
races.loc[site, 'row'], races.loc[site, 'col'] = smt.convert_coords_to_matix(races.loc[site, 'x'],
races.loc[site, 'y'])
races['zone'] = 'n_wai'
races = races.set_index('well')
n_wai_wells = _get_2014_2015_waimak_usage()
s_wai_wells = _get_s_wai_wells(
sub_version) # there are some s_wai wells which do not have data in wells, but do in consents file fix if bored
ns_wells = pd.concat((n_wai_wells, s_wai_wells))
temp = smt.get_well_postions(np.array(ns_wells.index), one_val_per_well=True, raise_exct=False)
ns_wells['layer'], ns_wells['row'], ns_wells['col'] = temp
no_flow = smt.get_no_flow()
for i in ns_wells.index:
layer, row, col = ns_wells.loc[i, ['layer', 'row', 'col']]
if any(pd.isnull([layer, row, col])):
continue
if no_flow[layer, row, col] == 0: # get rid of non-active wells
ns_wells.loc[i, 'layer'] = np.nan
ns_wells = ns_wells.dropna(subset=['layer', 'row', 'col'])
s_wai_rivers = _get_s_wai_rivers().set_index('well')
all_wells = pd.concat((races, ns_wells, s_wai_rivers))
for i in all_wells.index:
row, col = all_wells.loc[i, ['row', 'col']]
x, y = smt.convert_matrix_to_coords(row, col)
all_wells.loc[i, 'mx'] = x
all_wells.loc[i, 'my'] = y
# check wells in correct aquifer
aq_to_layer = {'Avonside Formation': 0,
'Springston Formation': 0,
'Christchurch Formation': 0,
'Riccarton Gravel': 1,
'Bromley Formation': 2,
'Linwood Gravel': 3,
'Heathcote Formation': 4,
'Burwood Gravel': 5,
'Shirley Formation': 6,
'Wainoni Gravel': 7}
leapfrog_aq = gpd.read_file("{}/m_ex_bd_inputs/shp/layering/gis_aq_name_clipped.shp".format(smt.sdp))
leapfrog_aq = leapfrog_aq.set_index('well')
leapfrog_aq.loc[:, 'use_aq_name'] = leapfrog_aq.loc[:, 'aq_name']
leapfrog_aq.loc[leapfrog_aq.use_aq_name.isnull(), 'use_aq_name'] = leapfrog_aq.loc[
leapfrog_aq.use_aq_name.isnull(), 'aq_name_gi']
for well in all_wells.index:
try:
all_wells.loc[well, 'aquifer_in_confined'] = aq = leapfrog_aq.loc[well, 'use_aq_name']
all_wells.loc[well, 'layer_by_aq'] = aq_to_layer[aq]
except KeyError:
pass
all_wells.loc[:, 'layer_by_depth'] = all_wells.loc[:, 'layer']
all_wells.loc[all_wells.layer_by_aq.notnull(), 'layer'] = all_wells.loc[
all_wells.layer_by_aq.notnull(), 'layer_by_aq']
# move wells that fall on other boundry conditions north of waimak (or in constant head)
overlap = gpd.read_file("{}/m_ex_bd_inputs/shp/overlap_adjustment2.shp".format(smt.sdp))
overlap = overlap.set_index('index')
for well in all_wells.index:
if not well in overlap.index:
continue
all_wells.loc[well, 'layer'] += overlap.loc[well, 'add_k']
all_wells.loc[well, 'row'] += overlap.loc[well, 'add_row']
all_wells.loc[well, 'col'] += overlap.loc[well, 'add_col']
overlap = gpd.read_file("{}/m_ex_bd_inputs/shp/overlap_adjustment2part2.shp".format(smt.sdp))
overlap = overlap.set_index('Field1')
for well in all_wells.index:
if not well in overlap.index:
continue
all_wells.loc[well, 'layer'] += overlap.loc[well, 'add_layer']
all_wells.loc[well, 'row'] += overlap.loc[well, 'add_row']
all_wells.loc[well, 'col'] += overlap.loc[well, 'add_col']
# add little rakaia flux which will be parameterized via pest in two groups upper flux is north of SH1, lower is coastal of SH1
temp = smt.model_where(np.isfinite(
smt.shape_file_to_model_array("{}/m_ex_bd_inputs/shp/little_rakaia_boundry_wells.shp".format(smt.sdp),
'Id', True)))
all_llrf = pd.DataFrame(columns=all_wells.keys())
for i in range(smt.layers):
llrf = pd.DataFrame(index=['llrz_flux{:04d}'.format(e) for e in range(i * len(temp), (i + 1) * len(temp))],
columns=all_wells.keys())
llrf.loc[:, 'row'] = np.array(temp)[:, 0]
llrf.loc[:, 'col'] = np.array(temp)[:, 1]
llrf.loc[:, 'layer'] = i
llrf.loc[:, 'flux'] = -9999999 # identifier flux, parameterised in pest
llrf.loc[:, 'type'] = 'llr_boundry_flux'
llrf.loc[:, 'zone'] = 's_wai'
all_llrf = pd.concat((all_llrf, llrf))
up_temp = smt.model_where(
np.isfinite(smt.shape_file_to_model_array("{}/m_ex_bd_inputs/shp/upper_lRZF.shp".format(smt.sdp),
'Id', True)))
all_ulrf = pd.DataFrame(columns=all_wells.keys())
for i in range(smt.layers):
ulrf = pd.DataFrame(
index=['ulrz_flux{:04d}'.format(e) for e in range(i * len(up_temp), (i + 1) * len(up_temp))],
columns=all_wells.keys())
ulrf.loc[:, 'row'] = np.array(up_temp)[:, 0]
ulrf.loc[:, 'col'] = np.array(up_temp)[:, 1]
ulrf.loc[:, 'layer'] = i
ulrf.loc[:, 'flux'] = -8888888 # identifier flux, parameterised in pest
ulrf.loc[:, 'type'] = 'ulr_boundry_flux'
ulrf.loc[:, 'zone'] = 's_wai'
all_ulrf = pd.concat((all_ulrf, ulrf))
swai_races = get_s_wai_races()
all_wells = pd.concat((all_wells, swai_races, all_llrf, all_ulrf))
all_wells = all_wells.loc[~((all_wells.duplicated(subset=['row', 'col', 'layer'], keep=False)) &
(all_wells.type.str.contains('lr_boundry_flux')))]
all_wells = add_use_type(all_wells) # any well that has irrigation/stockwater in it's uses is considered irrigation
if sub_version != 0:
pickle.dump(all_wells, open(pickle_path, 'w'))
return all_wells
def _get_s_wai_wells(subversion=1):
"""
get wells south of the river
:param subversion: if 0 use mike's allo data (depreciated, but held for histories sake)
if 1 use mike's usage estimate
:return: pd.DataFrame
"""
if subversion == 1:
mike = pd.read_hdf("{}/m_ex_bd_inputs/sd_est_all_mon_vol.h5".format(smt.sdp))
mike = mike.loc[(mike.time >= pd.datetime(2008, 1, 1)) & (mike.take_type == 'Take Groundwater')]
mike.loc[:, 'd_in_m'] = mike.time.dt.daysinmonth
data = mike.groupby('wap').aggregate({'usage_est': np.sum, 'crc': ','.join, 'd_in_m': np.sum})
data.loc[:, 'flux'] = data.loc[:, 'usage_est'] / (mike.time.max() - pd.datetime(2007, 12, 31)).days
well_details = rd_sql(**sql_db.wells_db.well_details)
well_details = well_details.set_index('WELL_NO')
out_data = pd.merge(data, pd.DataFrame(well_details.loc[:, 'WMCRZone']), left_index=True, right_index=True)
out_data = out_data.loc[np.in1d(out_data.WMCRZone, [7, 8])]
out_data.loc[:, 'cwms'] = out_data.loc[:, 'WMCRZone'].replace({7: 'chch', 8: 'selwyn'})
out_data = out_data.drop('WMCRZone', axis=1)
out_data.loc[:, 'flux'] *= -1
elif subversion == 0:
allo = pd.read_csv("{}/inputs/wells/allo_gis.csv".format(sdp_required), index_col='crc')
# option 2
end_time = pd.datetime(2016, 12, 31)
start_time = pd.datetime(2008, 1, 1)
allo2 = allo.loc[np.in1d(allo['cwms'], ['Selwyn - Waihora', 'Christchurch - West Melton']) &
(allo['take_type'] == 'Take Groundwater') &
(
(allo.status_details.str.contains('Terminated')) | allo.status_details.str.contains('Issued'))]
allo2.loc[:, 'to_date'] = pd.to_datetime(allo2.loc[:, 'to_date'], format='%d/%m/%Y', errors='coerce')
allo2.loc[:, 'from_date'] = pd.to_datetime(allo2.loc[:, 'from_date'], format='%d/%m/%Y', errors='coerce')
allo2.loc[allo2.loc[:, 'to_date'] > end_time, 'to_date'] = end_time
allo2.loc[allo2.loc[:, 'to_date'] < start_time, 'to_date'] = None
allo2.loc[allo2.loc[:, 'from_date'] < start_time, 'from_date'] = start_time
allo2.loc[allo2.loc[:, 'from_date'] > end_time, 'from_date'] = None
idx = (pd.notnull(allo2.loc[:, 'to_date']) & pd.notnull(allo2.loc[:, 'from_date']))
allo2.loc[idx, 'temp_days'] = (allo2.loc[idx, 'to_date'] - allo2.loc[idx, 'from_date'])
allo2.loc[:, 'days'] = [e.days for e in allo2.loc[:, 'temp_days']]
# the below appear to be an interal consents marker... and should not be included here as a replacement consent
# is active at the same time at the consent with negitive number of days
allo2.loc[allo2.loc[:, 'days'] < 0, 'days'] = 0
allo2.loc[:, 'flux'] = allo2.loc[:, 'cav'] / 365 * allo2.loc[:, 'days'] / (end_time - start_time).days * -1
allo2.loc[allo2.use_type == 'irrigation', 'flux'] *= 0.5
out_data = allo2.reset_index().groupby('wap').aggregate({'flux': np.sum, 'crc': ','.join})
out_data.loc[:, 'flux'] *= 0.50 # start with a 50% scaling factor from CAV come back if time
else:
raise ValueError('unexpected sub-version')
out_data['consent'] = [tuple(e.split(',')) for e in out_data.loc[:, 'crc']]
out_data = out_data.drop('crc', axis=1)
out_data = out_data.dropna()
out_data['type'] = 'well'
out_data['zone'] = 's_wai'
out_data.index.names = ['well']
return out_data
def _get_all_wai_wells():
"""
get's well data from mike's usage for V3
:return: pd.DataFrame
"""
# there are some wells where the flux is greater than the CAV;
# however these are rather minor, moslty in Selwyn, and most could be true.
mike = pd.read_hdf("{}/m_ex_bd_inputs/sd_est_all_mon_vol.h5".format(smt.sdp))
mike = mike.loc[(mike.time >= pd.datetime(2008, 1, 1)) & (mike.take_type == 'Take Groundwater')]
mike.loc[:, 'd_in_m'] = mike.time.dt.daysinmonth
data = mike.groupby('wap').aggregate(
{'usage_est': np.sum, 'crc': ','.join, 'd_in_m': np.sum, 'mon_allo_m3': np.sum})
data.loc[:, 'flux'] = data.loc[:, 'usage_est'] / (mike.time.max() - pd.datetime(2007, 12, 31)).days
data.loc[:, 'cav_flux'] = data.loc[:, 'mon_allo_m3'] / (mike.time.max() - pd.datetime(2007, 12, 31)).days
well_details = rd_sql(**sql_db.wells_db.well_details)
well_details = well_details.set_index('WELL_NO')
out_data = pd.merge(data, pd.DataFrame(well_details.loc[:, 'WMCRZone']), left_index=True, right_index=True)
out_data = out_data.loc[np.in1d(out_data.WMCRZone, [4, 7, 8])]
out_data.loc[:, 'cwms'] = out_data.loc[:, 'WMCRZone'].replace({7: 'chch', 8: 'selwyn', 4: 'waimak'})
out_data = out_data.drop('WMCRZone', axis=1)
out_data['type'] = 'well'
out_data = add_use_type(out_data)
# set WDC (waimak and other usage) wells to 10% of CAV
idx = (out_data.cwms == 'waimak') & (out_data.use_type == 'other')
out_data.loc[idx, 'flux'] = out_data.loc[
idx, 'cav_flux'] * 0.25 # this comes from average of the WDC CAV vs usage made before my time I also confirmed with colin as WDC that this is about right
out_data.loc[:, 'flux'] *= -1
out_data['consent'] = [tuple(e.split(',')) for e in out_data.loc[:, 'crc']]
out_data = out_data.drop('crc', axis=1)
out_data = out_data.dropna()
out_data.loc[out_data.cwms == 'waimak', 'zone'] = 'n_wai'
out_data.loc[~(out_data.cwms == 'waimak'), 'zone'] = 's_wai'
out_data.index.names = ['well']
return out_data
def _check_chch_wells():
"""
some well checks
:return:
"""
allo = pd.read_csv("{}/inputs/wells/allo_gis.csv".format(sdp_required), index_col='crc')
# option 2
end_time = pd.datetime(2016, 12, 31)
start_time = pd.datetime(2008, 1, 1)
allo2 = allo.loc[np.in1d(allo['cwms'], ['Christchurch - West Melton']) &
(allo['take_type'] == 'Take Groundwater') &
((allo.status_details.str.contains('Terminated')) | allo.status_details.str.contains('Issued'))]
allo2.loc[:, 'to_date'] = pd.to_datetime(allo2.loc[:, 'to_date'], format='%d/%m/%Y', errors='coerce')
allo2.loc[:, 'from_date'] = pd.to_datetime(allo2.loc[:, 'from_date'], format='%d/%m/%Y', errors='coerce')
allo2.loc[allo2.loc[:, 'to_date'] > end_time, 'to_date'] = end_time
allo2.loc[allo2.loc[:, 'to_date'] < start_time, 'to_date'] = None
allo2.loc[allo2.loc[:, 'from_date'] < start_time, 'from_date'] = start_time
allo2.loc[allo2.loc[:, 'from_date'] > end_time, 'from_date'] = None
idx = (pd.notnull(allo2.loc[:, 'to_date']) & pd.notnull(allo2.loc[:, 'from_date']))
allo2.loc[idx, 'temp_days'] = (allo2.loc[idx, 'to_date'] - allo2.loc[idx, 'from_date'])
allo2.loc[:, 'days'] = [e.days for e in allo2.loc[:, 'temp_days']]
# the below appear to be an interal consents marker... and should not be included here as a replacement consent
# is active at the same time at the consent with negitive number of days
allo2.loc[allo2.loc[:, 'days'] < 0, 'days'] = 0
allo2.loc[:, 'flux'] = allo2.loc[:, 'cav'] / 365 * allo2.loc[:, 'days'] / (end_time - start_time).days * -1
allo2.loc[allo2.use_type == 'irrigation', 'flux'] *= 0.5
out_data = allo2.reset_index().groupby('wap').aggregate({'flux': np.sum, 'crc': ','.join})
out_data['consent'] = [tuple(e.split(',')) for e in out_data.loc[:, 'crc']]
out_data = out_data.drop('crc', axis=1)
out_data = out_data.dropna()
out_data['type'] = 'well'
out_data['zone'] = 's_wai'
out_data.index.names = ['well']
out_data.loc[:, 'flux'] *= 0.50 # start with a 50% scaling factor from CAV come back if time
return out_data
def _check_waimak_wells():
"""
some well checks
:return:
"""
allo = pd.read_csv("{}/inputs/wells/allo_gis.csv".format(sdp_required), index_col='crc')
# option 2
end_time = pd.datetime(2016, 12, 31)
start_time = pd.datetime(2008, 1, 1)
allo2 = allo.loc[np.in1d(allo['cwms'], ['Waimakariri']) &
(allo['take_type'] == 'Take Groundwater') &
((allo.status_details.str.contains('Terminated')) | allo.status_details.str.contains('Issued'))]
allo2.loc[:, 'to_date'] = pd.to_datetime(allo2.loc[:, 'to_date'], format='%d/%m/%Y', errors='coerce')
allo2.loc[:, 'from_date'] = pd.to_datetime(allo2.loc[:, 'from_date'], format='%d/%m/%Y', errors='coerce')
allo2.loc[allo2.loc[:, 'to_date'] > end_time, 'to_date'] = end_time
allo2.loc[allo2.loc[:, 'to_date'] < start_time, 'to_date'] = None
allo2.loc[allo2.loc[:, 'from_date'] < start_time, 'from_date'] = start_time
allo2.loc[allo2.loc[:, 'from_date'] > end_time, 'from_date'] = None
idx = (pd.notnull(allo2.loc[:, 'to_date']) & pd.notnull(allo2.loc[:, 'from_date']))
allo2.loc[idx, 'temp_days'] = (allo2.loc[idx, 'to_date'] - allo2.loc[idx, 'from_date'])
allo2.loc[:, 'days'] = [e.days for e in allo2.loc[:, 'temp_days']]
# the below appear to be an interal consents marker... and should not be included here as a replacement consent
# is active at the same time at the consent with negitive number of days
allo2.loc[allo2.loc[:, 'days'] < 0, 'days'] = 0
allo2.loc[:, 'flux'] = allo2.loc[:, 'cav'] / 365 * allo2.loc[:, 'days'] / (end_time - start_time).days * -1
allo2.loc[allo2.use_type == 'irrigation', 'flux'] *= 0.5
out_data = allo2.reset_index().groupby('wap').aggregate({'flux': np.sum, 'crc': ','.join})
out_data['consent'] = [tuple(e.split(',')) for e in out_data.loc[:, 'crc']]
out_data = out_data.drop('crc', axis=1)
out_data = out_data.dropna()
out_data['type'] = 'well'
out_data['zone'] = 'n_wai'
out_data.index.names = ['well']
out_data.loc[:, 'flux'] *= 0.50 # start with a 50% scaling factor from CAV come back if time
return out_data
def _get_s_wai_rivers():
"""
get the well features that are being used to represent the selwyn hillfed streams
:return:
"""
# julian's report suggeststs that the hawkins is mostly a balanced flow and it was not included in
# scott and thorley 2009 so it is not being simulated here
# values for the upper selwyn, horoatat and waianiwanwa river are taken from scott and thorley 2009 and are evenly
# distributed across the area as injection wells in layer 1
# value in the shapefile reference the reach numbers from scott and thorley 2009
no_flow = smt.get_no_flow(0)
no_flow[no_flow < 0] = 0
rivers = smt.shape_file_to_model_array("{}/m_ex_bd_inputs/shp/selwyn_hill_feds.shp".format(smt.sdp), 'reach', True)
rivers[~no_flow.astype(bool)] = np.nan
waian = pd.DataFrame(smt.model_where(rivers[np.newaxis, :, :] == 106), columns=['layer', 'row', 'col'])
waian['well'] = ['waian{:04d}'.format(e) for e in waian.index]
waian['flux'] = 0.142 * 86400 / len(waian) # evenly distribute flux from scott and thorley 2009
selwyn = pd.DataFrame(smt.model_where(rivers[np.newaxis, :, :] == 104), columns=['layer', 'row', 'col'])
selwyn['well'] = ['selwyn{:04d}'.format(e) for e in selwyn.index]
selwyn['flux'] = 4.152 * 86400 / len(selwyn) # evenly distribute flux from scott and thorley 2009
horo = pd.DataFrame(smt.model_where(rivers[np.newaxis, :, :] == 105), columns=['layer', 'row', 'col'])
horo['well'] = ['horo{:04d}'.format(e) for e in horo.index]
# evenly distribute flux from scott thorley 2009 but only including 7/32 of the river in my model
horo['flux'] = 0.554 * 7. / 32 * 86400 / len(horo)
# the hawkins 0.25 m3/s equally distributed between watsons bridge road and homebush road
hawkins = pd.DataFrame(smt.model_where(rivers[np.newaxis, :, :] == 103), columns=['layer', 'row', 'col'])
hawkins['well'] = ['hawkins{:04d}'.format(e) for e in hawkins.index]
hawkins['flux'] = 0.25 * 86400 / len(hawkins) # evenly distribute flux from scott and thorley 2009
outdata = pd.concat((waian, selwyn, horo, hawkins))
outdata['zone'] = 's_wai'
outdata['type'] = 'river'
outdata['consent'] = None
return outdata
def get_s_wai_races():
"""
get the wells that represent the race systems
:return:
"""
no_flow = smt.get_no_flow(0)
race_array = smt.shape_file_to_model_array('{}/m_ex_bd_inputs/shp/s_wai_races.shp'.format(smt.sdp), 'race_code',
True)
race_array[np.isclose(no_flow, 0)] = np.nan
nums = [1, 2, 3]
names = ['ells_race', 'mal_race', 'papa_race']
# for now the losses below are calculated as 80% loss over the races, and the influx volume is assumed to be the
# baseflow from Selwyn District Council Water Race Management Plan 30 July 2013 table 2.1 the losses in the
# ellesmere scheme is scaled by 38/156 because only a small portion of the scheme is in the model domain.
min_flow = [1.539, 1.375, 1.231] # excludes irrigation
max_flow = [1.732, 2.210, 1.331]
losses = [(i + j) / 2 for i, j in zip(min_flow, max_flow)]
losses[0] *= 38 / 156
proportion_lost = 0.89
proportion_season = 0.75
losses = [e * 86400 * proportion_lost * proportion_season for e in losses]
outdata = pd.DataFrame()
for num, name, loss in zip(nums, names, losses):
idx = smt.model_where(np.isclose(race_array, num))
keys = ['{}{:04d}'.format(name, e) for e in range(len(idx))]
row = np.array(idx)[:, 0]
col = np.array(idx)[:, 1]
flux = loss / len(keys)
temp = pd.DataFrame(index=keys, data={'row': row, 'col': col, 'flux': flux})
outdata = pd.concat((outdata, temp))
outdata['layer'] = 0
outdata['zone'] = 's_wai'
outdata['type'] = 'race'
outdata['consent'] = None
outdata.index.names = ['well']
return outdata
def add_use_type(data):
"""
add the use type to a set of well data
:param data: well data (pd.DataFrame)
:return:
"""
data = deepcopy(data)
allo = pd.read_csv("{}/inputs/wells/allo_gis.csv".format(sdp_required))
allo = allo.set_index('wap')
data.loc[:, 'use_type'] = 'injection'
for i in data.loc[data.type == 'well'].index:
if np.in1d(np.atleast_1d(allo.loc[i, 'use_type']), ['stockwater', 'irrigation']).any():
data.loc[i, 'use_type'] = 'irrigation-sw'
else:
data.loc[i, 'use_type'] = 'other'
return data
if __name__ == '__main__':
#wells = get_wel_spd(3)
# wells.to_csv(r"P:\Groundwater\Waimakariri\Groundwater\Numerical GW model\modelling_reports\ashely_waimakarriri_model_build\figs_for_report\wells.csv")
# tests
from waimak_extended_boundry import get_well_budget
from waimak_extended_boundry import get_max_rate, get_full_consent
new = _get_wel_spd_v2()
print('version 2 (2014/15)')
print(get_well_budget(new) / 86400)
old = _get_wel_spd_v3()
print('version 3 (2008 - 2015)')
temp = get_well_budget(old) / 86400
print(temp)
temp.to_csv(r"P:\Groundwater\Waimakariri\Groundwater\Numerical GW model\modelling_reports\ashely_waimakarriri_model_build\figs_for_report\well_budget.csv")
print('max_rate')
max_rate = get_max_rate('opt')
print(get_well_budget(max_rate)/86400)
print('full CAV')
full_cav = get_full_consent('opt')
print(get_well_budget(full_cav)/86400)
well_data = new
well_data = well_data.loc[:, ['layer', 'row', 'col', 'flux', 'type']]
well_data.to_csv(r"C:\Users\MattH\Desktop\to_brioch_2017_10_4/well_data.csv")
new_nwai = _get_2014_2015_waimak_usage()
nwai = get_nwai_wells()
s_wells = _get_s_wai_wells()
test = get_s_wai_races()
old = _get_wel_spd_v1(recalc=False)
new = _get_wel_spd_v3(recalc=True)
n_wells_new = _check_waimak_wells()
allo = pd.read_csv("{}/inputs/wells/allo_gis.csv".format(sdp_required), index_col='crc')
chch_wells = _check_chch_wells()
n_wells = get_nwai_wells()
s_wells_all = _get_s_wai_wells()
print 'done'
| [
"[email protected]"
] | |
7c97a5eae902c35c8233bee53f36ce711c8da55f | ccb4cb8358fb896a88bbf0c6771462d898d7a492 | /examples/venus_evening_chart.py | bc89920d2db7aab3a46d0afccdcec128b4e5fe3d | [
"MIT"
] | permissive | skyfielders/python-skyfield | a30d34a680dcd285bc8cd39cedc2629f792d5821 | 61fb6324e312715e20aa75ec24dc87286442be1a | refs/heads/master | 2023-08-31T13:10:32.863587 | 2023-08-10T14:25:56 | 2023-08-10T14:25:56 | 7,924,113 | 1,040 | 204 | MIT | 2023-08-28T19:44:50 | 2013-01-30T21:19:21 | Python | UTF-8 | Python | false | false | 3,584 | py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from skyfield import almanac
from skyfield.api import load, wgs84
from skyfield.magnitudelib import planetary_magnitude
MONTH_NAMES = '0 Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split()
# Figure out the times of sunset over our range of dates.
eph = load('de421.bsp')
earth, sun, venus = eph['earth'], eph['sun'], eph['venus']
observer = wgs84.latlon(+40.0, 0.0)
ts = load.timescale()
start, end = ts.utc(2021, 3, 7), ts.utc(2022, 2, 7)
f = almanac.sunrise_sunset(eph, observer)
t, y = almanac.find_discrete(start, end, f)
sunsets = (y == 0)
t = t[sunsets]
# For each moment of sunset, ask Skyfield for the month number, the day
# number, and for Venus’s altitude, azimuth, and magnitude.
year, month, day, hour, minute, second = t.utc
month = month.astype(int)
day = day.astype(int)
apparent = (earth + observer).at(t).observe(venus).apparent()
alt, az, distance = apparent.altaz()
x, y = az.degrees, alt.degrees
m = planetary_magnitude(apparent)
# Convert magnitude to marker size, remembering that smaller magnitude
# numbers mean a brighter Venus (and thus a larger marker).
maxmag = max(m)
minmag = min(m)
size = 40 - 30 * (m - minmag) / (maxmag - minmag)
# Start with a smooth curve tracing Venus's motion.
fig, ax = plt.subplots(figsize=[9, 3])
ax.plot(x, y, c='#fff6', zorder=1)
# Next, put a circle representing Venus on the 1st of the month and on
# every fifth day after that. (Except for the 30th, which would sit too
# close to the 1st of the following month.)
fives = (day % 5 == 1) & (day < 30)
ax.scatter(x[fives], y[fives], size[fives], 'white',
edgecolor='black', linewidth=0.25, zorder=2)
# Put day and month labels off to the sides of the curve.
offset_x, offset_y = 10, 8
for i in np.flatnonzero(fives):
if i == 0:
continue # We can’t compute dx/dy with no previous point.
# Build a unit vector pointing in the direction Venus is traveling.
day_i = day[i]
xi = x[i]
yi = y[i]
dx = xi - x[i-1]
dy = yi - y[i-1]
length = np.sqrt(dx*dx + dy*dy)
dx /= length
dy /= length
# Offset the text at a right angle to the direction of travel.
side = 'right' if (year[i], month[i]) < (2021, 10) else 'left'
if side == 'left':
xytext = - offset_x*dy, offset_y*dx
else:
xytext = offset_x*dy, - offset_y*dx
# Label the dates 1, 11, and 21.
if day_i in (1, 11, 21):
ax.annotate(day_i, (xi, yi), c='white', ha='center', va='center',
textcoords='offset points', xytext=xytext, size=8)
# On the 15th of each month, put the month name.
if day_i == 16:
name = MONTH_NAMES[month[i]]
ax.annotate(name, (xi, yi), c='white', ha='center', va='center',
textcoords='offset points', xytext=2.2 * np.array(xytext))
# Finally, some decorations.
points = 'N NE E SE S SW W NW'.split()
for i, name in enumerate(points):
xy = 45 * i, 1
ax.annotate(name, xy, c='white', ha='center', size=12, weight='bold')
ax.set(
aspect=1.0,
title='Venus at sunset for 40°N latitude, April 2021 – January 2022',
xlabel='Azimuth (°)',
ylabel='Altitude (°)',
xlim=(195, 300),
ylim=(0, max(y) + 10.0),
xticks=np.arange(210, 300, 15),
)
sky = LinearSegmentedColormap.from_list('sky', ['black', 'blue'])
extent = ax.get_xlim() + ax.get_ylim()
ax.imshow([[0,0], [1,1]], cmap=sky, interpolation='bicubic', extent=extent)
fig.savefig('venus_evening_chart.png')
| [
"[email protected]"
] | |
67b744eb1a386ef9781575699805fb61d73dec1b | 6a0a634265957e9dcd26bc80e3304e107fb004d0 | /venvflask/lib/python3.7/site-packages/eth_account/signers/local.py | b3112edece1bdaa7c182b3ff9fb18a6e36a75012 | [] | no_license | ogutiann/PythonEthereumSmartContracts | 8bd81aa14eab567d41b5dad74b67aba92a405ebd | d870e9fd1c7f68b8493db4c2b2af224f966d8e51 | refs/heads/master | 2023-01-04T14:23:12.396898 | 2020-10-29T12:12:46 | 2020-10-29T12:12:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,845 | py | import warnings
from eth_account.signers.base import (
BaseAccount,
)
class LocalAccount(BaseAccount):
r"""
A collection of convenience methods to sign and encrypt, with an embedded private key.
:var bytes key: the 32-byte private key data
.. code-block:: python
>>> my_local_account.address # doctest: +SKIP
"0xF0109fC8DF283027b6285cc889F5aA624EaC1F55"
>>> my_local_account.key # doctest: +SKIP
b"\x01\x23..."
You can also get the private key by casting the account to :class:`bytes`:
.. code-block:: python
>>> bytes(my_local_account) # doctest: +SKIP
b"\\x01\\x23..."
"""
def __init__(self, key, account):
"""
:param eth_keys.PrivateKey key: to prefill in private key execution
:param ~eth_account.account.Account account: the key-unaware management API
"""
self._publicapi = account
self._address = key.public_key.to_checksum_address()
key_raw = key.to_bytes()
self._private_key = key_raw
self._key_obj = key
@property
def address(self):
return self._address
@property
def privateKey(self):
"""
.. CAUTION:: Deprecated for :meth:`~eth_account.signers.local.LocalAccount.key`.
This attribute will be removed in v0.5
"""
warnings.warn(
"privateKey is deprecated in favor of key",
category=DeprecationWarning,
)
return self._private_key
@property
def key(self):
"""
Get the private key.
"""
return self._private_key
def encrypt(self, password, kdf=None, iterations=None):
"""
Generate a string with the encrypted key, as in
:meth:`~eth_account.account.Account.encrypt`, but without a private key argument.
"""
return self._publicapi.encrypt(self.key, password, kdf=kdf, iterations=iterations)
def signHash(self, message_hash):
return self._publicapi.signHash(
message_hash,
private_key=self.key,
)
def sign_message(self, signable_message):
"""
Generate a string with the encrypted key, as in
:meth:`~eth_account.account.Account.sign_message`, but without a private key argument.
"""
return self._publicapi.sign_message(signable_message, private_key=self.key)
def signTransaction(self, transaction_dict):
warnings.warn(
"signTransaction is deprecated in favor of sign_transaction",
category=DeprecationWarning,
)
return self.sign_transaction(transaction_dict)
def sign_transaction(self, transaction_dict):
return self._publicapi.sign_transaction(transaction_dict, self.key)
def __bytes__(self):
return self.key
| [
"[email protected]"
] | |
e29dc15662e20df9a68545bb651642a23299991c | 07504838d12c6328da093dce3726e8ed096cecdb | /pylon/resources/datapoints/multiplier_s.py | e1f6f1673af46c99867dc958f2dc2a793e6010aa | [] | no_license | lcoppa/fiat-lux | 9caaa7f3105e692a149fdd384ec590676f06bf00 | 7c166bcc08768da67c241078b397570de159e240 | refs/heads/master | 2020-04-04T02:47:19.917668 | 2013-10-10T10:22:51 | 2013-10-10T10:22:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,282 | py | """multiplier_s standard datapoint type, originally defined in resource file
set standard 00:00:00:00:00:00:00:00-0. """
# Copyright (C) 2013 Echelon Corporation. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software" to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# This file is generated from device resource files using an automated
# database to source code conversion process. Grammar and punctuation within
# the embedded documentation may not be correct, as this data is gathered and
# combined from several sources. The machine-generated code may not meet
# compliance with PEP-8 and PEP-257 recommendations at all times.
# Generated at 23-Sep-2013 09:14.
import pylon.resources.base
from pylon.resources.standard import standard
class multiplier_s(pylon.resources.base.Scaled):
"""multiplier_s standard datapoint type. Multiplier Value multiplier."""
def __init__(self):
super().__init__(
size=1,
signed=False,
scaling=(0.01, 0),
invalid=2.55,
minimum=0,
maximum=2.54,
scope=0,
key=188
)
self._original_name = 'SNVT_multiplier_s'
self._definition = standard.add(self)
if __name__ == '__main__':
# unit test code.
item = multiplier_s()
pass
| [
"[email protected]"
] | |
b1bba65c64df29b31b76339751cbbc8806397ddc | 14567e2f77d2bf697bb18c3c1e3d6744c11f41c8 | /kfpt/old/ftp.py | dacb7eea6ca2d99b1bd4ef22df1c34b411c3282e | [] | no_license | yanislong/junnan | 268e64c288e18456da621d5485e04bf8eb8f5322 | fc35f32a29a7b6da2a8ea334d0e53a21a81d97f3 | refs/heads/master | 2021-01-01T20:08:05.825407 | 2017-09-08T02:24:40 | 2017-09-08T02:24:40 | 98,772,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 933 | py | #!/usr/bin/python
# -*- coding:utf-8 -*-
from ftplib import FTP
import os
ftp = FTP()
ftp.set_debuglevel(2)
ftp.connect("123.126.34.27","12221")
for i in range(1):
try:
ftp.login("TTuser7017","791241")
print "ok"
except:
print "no"
print ftp.getwelcome()
print "****************come in path"
ftp.cmd("/tmp/")
ftp.retrlines('LIST')
ftp.cwd("")
print "************ show file"
ftp.dir('/tmp/')
print "**********show now dir"
ftp.pwd()
print "*************show filler file"
ftp.nlst
bufsize = 1024
filename ="long1.xlsx"
file_handle = open("/root/long.xlsx","rb")
down_file = open("./down","wb").write
#ftp.storbinary('STOR %s' % os.path.basename(filename),file_handle,bufsize)
ftp.storbinary('STOR /home/main_admin/long.txt',file_handle,bufsize)
ftp.retrbinary("RETR %s" % os.path.basename(filename),down_file,bufsize)
ftp.set_debuglevel(0)
file_handle.close()
ftp.quit
print ">>>>>..end..<<<<<<"
| [
"[email protected]"
] | |
30830b98e7dfdae390d1c5750b4945123531013a | c9b1e04ba65ba3e0af2a8ae86b88187b72bcaa0b | /.svn/pristine/30/30830b98e7dfdae390d1c5750b4945123531013a.svn-base | 434e78a9bc49b211fa0f15acea01356572d0765a | [] | no_license | feitianyiren/TaskCoach | 7762a89d5b521cfba0827323a9e8a91d1579810b | 0b7427562074845ac771e59e24a750aa5b432589 | refs/heads/master | 2020-04-08T04:56:35.491490 | 2016-01-12T13:29:03 | 2016-01-12T13:29:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,967 | import patterns, wx
from i18n import _
import domain.date as date
import task
def newTaskMenuText():
# There is a bug in wxWidget/wxPython on the Mac that causes the
# INSERT accelerator to be mapped so some other key sequence ('c' in
# this case) so that whenever that key sequence is typed, this command
# is invoked. Hence, we use a different accelarator on the Mac.
menuText = _('&New task...')
if '__WXMAC__' in wx.PlatformInfo:
menuText += u'\tCtrl+N'
else:
menuText += u'\tCtrl+INS'
return menuText
def newSubTaskMenuText():
# See comments in newTaskMenuText() above
menuText = _('New &subtask...')
if '__WXMAC__' in wx.PlatformInfo:
menuText += u'\tShift+Ctrl+N'
else:
menuText += u'\tShift+Ctrl+INS'
return menuText
class TaskList(patterns.CompositeSet):
# FIXME: TaskList should be called TaskCollection or TaskSet
newItemMenuText = newTaskMenuText()
newItemHelpText = _('Insert a new task')
editItemMenuText = _('&Edit task...')
editItemHelpText = _('Edit the selected task')
deleteItemMenuText = _('&Delete task\tCtrl+DEL')
deleteItemHelpText = _('Delete the selected task(s)')
newSubItemMenuText = newSubTaskMenuText()
newSubItemHelpText = _('Insert a new subtask into the selected task')
def _nrInterestingTasks(self, isInteresting):
interestingTasks = [task for task in self if isInteresting(task)]
return len(interestingTasks)
def nrCompleted(self):
return self._nrInterestingTasks(task.Task.completed)
def nrOverdue(self):
return self._nrInterestingTasks(task.Task.overdue)
def nrInactive(self):
return self._nrInterestingTasks(task.Task.inactive)
def nrDueToday(self):
return self._nrInterestingTasks(task.Task.dueToday)
def nrBeingTracked(self):
return self._nrInterestingTasks(task.Task.isBeingTracked)
def allCompleted(self):
nrCompleted = self.nrCompleted()
return nrCompleted > 0 and nrCompleted == len(self)
def efforts(self):
result = []
for task in self:
result.extend(task.efforts())
return result
def __allDates(self):
realDates = [aDate for task in self
for aDate in (task.startDate(), task.dueDate(), task.completionDate())
if aDate != date.Date()]
if realDates:
return realDates
else:
return [date.Date()]
def minDate(self):
return min(self.__allDates())
def maxDate(self):
return max(self.__allDates())
def originalLength(self):
''' Provide a way for bypassing the __len__ method of decorators. '''
return len(self)
class SingleTaskList(TaskList):
pass | [
"[email protected]"
] | ||
1630c7c38774560507877f3e076ad65cc552781d | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2539/60757/284819.py | 3db560fcc8fdfff1cf8c9c9c7072d6a8c1b3457c | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | arr=eval(input())
so=sorted(arr)
if arr==so:
print(0)
else:
start=0
end=len(arr)
for i in range(len(arr)):
if arr[i]!=so[i]:
start=i
break
for i in range(len(arr)-1,-1,-1):
if arr[i]!=so[i]:
end=i
break
print(end-start+1) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.