blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0d0e45b717500e7096f723da45140055b013f0af | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnbedew.py | 903345cf4acfef6232547aba112b5421e03d2b8f | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 65 | py | ii = [('GodwWSL2.py', 3), ('AinsWRR3.py', 1), ('FerrSDO2.py', 1)] | [
"[email protected]"
] | |
e914f312a676793d37bbd0b6ebd106a5a1ed8467 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/sankey/node/line/_widthsrc.py | f4b333792ea5fe507c7ee8f990eed4194966516e | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 419 | py | import _plotly_utils.basevalidators
class WidthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="widthsrc", parent_name="sankey.node.line", **kwargs
):
super(WidthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| [
"[email protected]"
] | |
5cd9a52daf3835b8c6d129f6f036940adfa985e3 | 700bf615913fa5dd8686fac51b444f691023a035 | /igrins/recipes/recipe_wvlsol_sky2.py | 2ccbf18967d717ff08c39a6b8788abb53a01a2d4 | [] | no_license | shihyuntang/plp | e24da3d23debabb61edfca5416a72577717c6195 | 842a5db71dfe57d1b3bf8ac292dabdb69830ac7f | refs/heads/master | 2023-03-09T06:13:50.874801 | 2021-02-26T20:03:20 | 2021-02-26T20:03:20 | 285,095,289 | 0 | 0 | null | 2020-08-04T20:42:51 | 2020-08-04T20:42:50 | null | UTF-8 | Python | false | false | 6,854 | py | # This is to use new framework. Let's use this to measure flexure
# between emission spectra (e.g., sky, UNe, etc.)
#import os
import numpy as np
import pandas as pd
def save_qa(obsset):
df = obsset.load_data_frame("SKY_FITTED_PIXELS_JSON", orient="split")
msk_ = df["slit_center"] == 0.5
dfm_ = df[msk_]
msk = np.isfinite(dfm_["pixels"])
dfm = dfm_[msk]
lines_map = dict((o, (_["pixels"].values, _["wavelength"].values))
for o, _ in dfm.groupby("order"))
from matplotlib.figure import Figure
from igrins.libs.ecfit import check_fit
d = obsset.load_item("SKY_WVLSOL_JSON")
orders = d["orders"]
fit_results = obsset.load_item("SKY_WVLSOL_FIT_RESULT_JSON")
# fit_results = dict(xyz=[xl[msk], yl[msk], zl[msk]],
# fit_params=fit_params,
# fitted_model=poly_2d)
# xl, yl, zl = get_ordered_line_data(reidentified_lines_map)
xl, yl, zlo = fit_results["xyz"]
xl, yl, zlo = [np.array(_) for _ in [xl, yl, zlo]]
zl = zlo
m = np.array(fit_results["fitted_mask"])
lines_map_filtered = dict((o, (_["pixels"].values,
_["wavelength"].values))
for o, _ in dfm[m].groupby("order"))
modeul_name, class_name, serialized = fit_results["fitted_model"]
from igrins.libs.astropy_poly_helper import deserialize_poly_model
p = deserialize_poly_model(modeul_name, class_name, serialized)
if 1:
fig1 = Figure(figsize=(12, 7))
check_fit(fig1, xl, yl, zl, p,
orders,
lines_map)
fig1.tight_layout()
fig2 = Figure(figsize=(12, 7))
check_fit(fig2, xl[m], yl[m], zl[m], p,
orders,
lines_map_filtered)
fig2.tight_layout()
from igrins.libs.qa_helper import figlist_to_pngs
dest_dir = obsset.query_item_path("qa_sky_fit2d_dir",
subdir="sky_fit2d")
figlist_to_pngs(dest_dir, [fig1, fig2])
# sky_basename = helper.get_basename(band, obsids[0])
# sky_figs = helper.get_section_filename_base("QA_PATH",
# "oh_fit2d",
# "oh_fit2d_"+sky_basename)
# figlist_to_pngs(sky_figs, [fig1, fig2])
def save_distortion_db(obsset):
db = obsset.load_db("distortion")
db.update(obsset.band, obsset.basename)
def save_wvlsol_db(obsset):
db = obsset.load_db("wvlsol")
db.update(obsset.band, obsset.basename)
# if 1:
# thar_db.update(band, thar_basename)
# 20151003 : Below is an attemp to modularize the recipes, which has
# not finished. Initial solution part is done, but the distortion part
# is not.
def save_ordermap_slitposmap(obsset):
from aperture_helper import get_simple_aperture_from_obsset
wvlsol_v0 = obsset.load_resource_for("wvlsol_v0")
orders = wvlsol_v0["orders"]
ap = get_simple_aperture_from_obsset(obsset,
orders=orders)
order_map = ap.make_order_map()
slitpos_map = ap.make_slitpos_map()
order_map2 = ap.make_order_map(mask_top_bottom=True)
obsset.store_image("ordermap_fits", order_map)
obsset.store_image("slitposmap_fits", slitpos_map)
obsset.store_image("ordermap_masked_fits", order_map2)
def save_wavelength_map(obsset):
fit_results = obsset.load_item("SKY_WVLSOL_FIT_RESULT_JSON")
from igrins.libs.astropy_poly_helper import deserialize_poly_model
module_name, klass_name, serialized = fit_results["fitted_model"]
poly_2d = deserialize_poly_model(module_name, klass_name, serialized)
order_map = obsset.load_item("ordermap_fits")[0].data
# slitpos_map = caldb.load_item_from(basename, "slitposmap_fits")
offset_map = obsset.load_item("slitoffset_fits")[0].data
msk = order_map > 0
_, pixels = np.indices(msk.shape)
orders = order_map[msk]
wvl = poly_2d(pixels[msk] - offset_map[msk], orders) / orders
wvlmap = np.empty(msk.shape, dtype=float)
wvlmap.fill(np.nan)
wvlmap[msk] = wvl
obsset.store_image("WAVELENGTHMAP_FITS", wvlmap)
from igrins.libs.recipe_helper import RecipeHelper
from process_wvlsol_v0 import extract_spectra_multi
from process_wvlsol_v0 import make_combined_image
def process_band(utdate, recipe_name, band,
groupname,
obsids, frametypes,
aux_infos, config_name):
from igrins import get_caldb, get_obsset
caldb = get_caldb(config_name, utdate)
obsset = get_obsset(caldb, band, recipe_name, obsids, frametypes)
# STEP 1 :
## make combined image
make_combined_image(obsset)
# Step 2
extract_spectra_multi(obsset)
from process_identify_multiline import identify_multiline
identify_multiline(obsset)
from process_wvlsol_volume_fit import volume_fit, generate_slitoffsetmap
volume_fit(obsset)
save_distortion_db(obsset)
save_ordermap_slitposmap(obsset)
generate_slitoffsetmap(obsset)
from process_derive_wvlsol import derive_wvlsol
derive_wvlsol(obsset)
save_wvlsol_db(obsset)
save_wavelength_map(obsset)
from process_save_wat_header import save_wat_header
save_wat_header(obsset)
# save_wavelength_map(helper, band, obsids)
# #fit_wvl_sol(helper, band, obsids)
save_qa(obsset)
# some of the fugures are missing.
# save_figures()
from igrins.libs.recipe_factory import new_recipe_class, new_recipe_func
# If the recipe is != "SKY", the resulting combined image will be A-B.
_recipe_class_wvlsol_sky = new_recipe_class("RecipeWvlsolSky",
["SKY", "SKY_AB"], process_band)
wvlsol_sky = new_recipe_func("wvlsol_sky",
_recipe_class_wvlsol_sky)
sky_wvlsol = new_recipe_func("sky_wvlsol",
_recipe_class_wvlsol_sky)
__all__ = wvlsol_sky, sky_wvlsol
# if 0:
# # Step 3:
# identify_lines(helper, band, obsids)
# get_1d_wvlsol(helper, band, obsids)
# save_1d_wvlsol(extractor,
# orders_w_solutions, wvl_sol, p)
# save_qa(extractor, orders_w_solutions,
# reidentified_lines_map, p, m)
# save_figures(helper, band, obsids)
# save_db(helper, band, obsids)
if __name__ == "__main__":
utdate = "20140709"
obsids = [62, 63]
utdate = "20140525"
obsids = [29]
utdate = "20150525"
obsids = [52]
recipe_name = "SKY"
# utdate = "20150525"
# obsids = [32]
# recipe_name = "THAR"
band = "K"
#helper = RecipeHelper("../recipe.config", utdate)
config_name = "../recipe.config"
process_band(utdate, recipe_name, band, obsids, config_name)
| [
"[email protected]"
] | |
06470ed428c9e68e74635aeeb5e1e3f853111727 | 5686100c4ed0436347107f4e9faae30fca609c09 | /leetcode/1030. Matrix Cells in Distance Order/Solution.py | 926b64215552cf842d6465f0b2786a2cc0af72cb | [] | no_license | adamqddnh/algorithm-questions | 7d4f56b7e5ac2ff9460774d43ecf8cba2cd7b0cb | 93a1b082e10ade0dd464deb80b5df6c81552f534 | refs/heads/master | 2023-06-29T04:51:26.635740 | 2021-07-23T09:11:45 | 2021-07-23T09:11:45 | 252,675,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | class Solution(object):
def allCellsDistOrder(self, R, C, r0, c0):
"""
:type R: int
:type C: int
:type r0: int
:type c0: int
:rtype: List[List[int]]
"""
maxLength = R + C + 1
distance = [[] for i in range(0, maxLength)]
for i in range(0, R):
for j in range(0, C):
temp = abs(r0 - i) + abs(c0 - j)
distance[temp].append([i, j])
result = []
for i in range(0, maxLength):
for temp in distance[i]:
result.append(temp)
return result
| [
"[email protected]"
] | |
f35da223ba059f5824b45b5398b78811b900ca89 | aef40813a1b92cec0ea4fc25ec1d4a273f9bfad4 | /Q15__/04_Count_Submatrices_With_All_Ones/Solution.py | c0f071e4eb738bc0cdf16e003c0295732600aa05 | [
"Apache-2.0"
] | permissive | hsclinical/leetcode | e9d0e522e249a24b28ab00ddf8d514ec855110d7 | 48a57f6a5d5745199c5685cd2c8f5c4fa293e54a | refs/heads/main | 2023-06-14T11:28:59.458901 | 2021-07-09T18:57:44 | 2021-07-09T18:57:44 | 319,078,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,238 | py | from typing import List
class Solution:
def numSubmat(self, mat: List[List[int]]) -> int:
rowLen = len(mat)
colLen = len(mat[0])
medium = [ [0] * colLen for _ in range(rowLen) ]
#stack to bottom
for j in range(colLen):
for i in range(rowLen):
if mat[i][j] == 1:
if i == 0 or mat[i-1][j] == 0:
medium[i][j] = 1
else:
medium[i][j] = medium[i-1][j] + 1
#print(medium)
result = [ [0] * colLen for _ in range(rowLen) ]
total = 0
for i in range(rowLen):
for j in range(colLen):
if mat[i][j] == 1:
cellTotal = medium[i][j]
# backward column
minStack = cellTotal
for k in range(j-1, -1, -1):
if mat[i][k] == 0:
break
else:
minStack = min(minStack, medium[i][k])
cellTotal += minStack
total += cellTotal
result[i][j] = cellTotal
#print(result)
return total | [
"[email protected]"
] | |
dc9f823aea13cfa9dee9a53a698656d62586ac18 | d50d24a111f7fc078ef98bc5059355793fe7dd37 | /tao_bao/db/dbhelper.py | 97d38ade4af4772e1f66711aeb87dcddf3af6dc3 | [] | no_license | liangxuCHEN/scrapy_taobao | f2cbd38b5d746052bac3366aa035edf988cb1115 | 595fb9a9dcd45a32b43e0478580a7a936d1b55a2 | refs/heads/master | 2021-05-08T11:50:12.854647 | 2018-04-11T03:20:17 | 2018-04-11T03:20:17 | 119,913,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,986 | py | # -*- coding: utf-8 -*-
from scrapy.utils.project import get_project_settings #导入seetings配置
from sqlalchemy import create_engine, Column, String, DateTime, Integer, Float, func
from sqlalchemy.ext.declarative import declarative_base
import redis
# 初始化数据库连接:
# engine = create_engine('mysql+pymysql://root:123asd@localhost:3306/utf_sys?charset=utf8')
engine = create_engine('postgresql+psycopg2://postgres:[email protected]/execdb')
# 初始化redis数据库连接
Redis = redis.StrictRedis(host='localhost',port=6379,db=0)
Base = declarative_base()
class TaoBaoModel(Base):
__tablename__ = 'tab_taobao_item'
id = Column(Integer, primary_key=True)
page_number = Column(Integer)
job_id = Column(String(50))
item_id = Column(String(50))
name = Column(String(100))
main_pic = Column(String(200))
price = Column(Float)
pay_person = Column(Integer)
province = Column(String(20))
city = Column(String(20))
shop_name = Column(String(50))
detail_url = Column(String(200))
category_id = Column(String(50))
category = Column(String(50))
is_tmall = Column(Integer)
user_id = Column(String(50))
market = Column(String(20))
record_date = Column(DateTime)
#做一个表记录搜索历史
class TaoBaoProjectModel(Base):
"""
参数说明:
_id: 这个搜索项目的ID, (TODO:以后在数据库生成)
project_name: 项目名称
market: 1 - -》 淘宝, 2 - -》 天猫
keyword: 输入搜索框的关键字
pageNumber: 需要爬取的页数,最大100页
min_price: 选填,搜索得到宝贝价格的最低价
max_price: 选填,搜索得到宝贝价格的最高价
status: 1:新任务, 2:进行中, 3:已经完成
created: 创建时间
"""
__tablename__ = 'tab_project'
id = Column(Integer, primary_key=True)
market = Column(String(10))
project_name = Column(String(50))
key_word = Column(String(200))
page_number = Column(Integer)
min_price = Column(String(20))
max_price = Column(String(20))
status = Column(String(20), server_default='new')
created_at = Column(DateTime, server_default=func.now())
updated_at = Column(DateTime, server_default=func.now(), server_onupdate=func.now())
def to_json(self):
return {
'id': self.id,
'project_name': self.project_name,
'market': self.market,
'key_word': self.key_word,
'page_number': self.page_number,
'min_price': self.min_price,
'max_price': self.max_price,
'status': self.status,
'created_at': self.created_at.strftime("%Y-%m-%d %H:%M:%S") if self.created_at is not None else "",
'updated_at': self.updated_at.strftime("%Y-%m-%d %H:%M:%S") if self.updated_at is not None else ""
}
#创建数据表,如果数据表存在则忽视!!!
Base.metadata.create_all(engine) | [
"[email protected]"
] | |
09dc5e4b00b0196437111230fc9b278edca31401 | cb6f9cf1901b68cad07def3dd0fad75ab046e330 | /constructor/migrations/0037_auto_20210824_0700.py | 28b5d8a020dee9473cd9b792f1def7f8a2828737 | [] | no_license | Junaid522/Cons | 631dd9e0e3f1249bd911196ba4a2fef8357bd8fb | cdceb1d07728209dad827917c8ba88e1319c94ad | refs/heads/master | 2023-08-10T17:23:28.942917 | 2021-09-20T07:30:02 | 2021-09-20T07:30:02 | 408,348,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | # Generated by Django 3.1.2 on 2021-08-24 07:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('constructor', '0036_currency_description'),
]
operations = [
migrations.AddField(
model_name='course',
name='career_prospects',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='course',
name='overview',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='course',
name='structure',
field=models.TextField(blank=True, null=True),
),
]
| [
"[email protected]"
] | |
be03a987402a3e0420168cf4f91c126ffd69c9de | d7641647d67d110e08997767e85bbea081c2537b | /bitmovin_api_sdk/encoding/inputs/udp_multicast/__init__.py | 82c7c6f74bb923c3eaaf1fb60471039f9d174982 | [
"MIT"
] | permissive | aachenmax/bitmovin-api-sdk-python | d3ded77c459852cbea4927ff28c2a4ad39e6026a | 931bcd8c4695a7eb224a7f4aa5a189ba2430e639 | refs/heads/master | 2022-11-16T08:59:06.830567 | 2020-07-06T07:16:51 | 2020-07-06T07:16:51 | 267,538,689 | 0 | 1 | MIT | 2020-07-06T07:16:52 | 2020-05-28T08:44:44 | Python | UTF-8 | Python | false | false | 322 | py | from bitmovin_api_sdk.encoding.inputs.udp_multicast.udp_multicast_api import UdpMulticastApi
from bitmovin_api_sdk.encoding.inputs.udp_multicast.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.inputs.udp_multicast.udp_multicast_input_list_query_params import UdpMulticastInputListQueryParams
| [
"[email protected]"
] | |
8a51a15dd22f14caa28b6fec7f2520ee774af67f | ed06ef44c944707276a2fca16d61e7820596f51c | /Python/path-with-minimum-effort.py | 46d3f4ed6f06ca05828dba1b5a76a7f30589242a | [] | no_license | sm2774us/leetcode_interview_prep_2021 | 15842bef80637c6ff43542ed7988ec4b2d03e82c | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | refs/heads/master | 2023-05-29T14:14:49.074939 | 2021-06-12T19:52:07 | 2021-06-12T19:52:07 | 374,725,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,239 | py | # Time: O(m * n * log(m * n))
# Space: O(m * n)
import heapq
# Dijkstra algorithm solution
class Solution(object):
def minimumEffortPath(self, heights):
"""
:type heights: List[List[int]]
:rtype: int
"""
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
dst = (len(heights)-1, len(heights[0])-1)
dist = [[float("inf")]*len(heights[0]) for _ in range(len(heights))]
min_heap = [(0, 0, 0)]
lookup = [[False]*len(heights[0]) for _ in range(len(heights))]
while min_heap:
d, r, c = heapq.heappop(min_heap)
if lookup[r][c]:
continue
lookup[r][c] = True
if (r, c) == dst:
return d
for dr, dc in directions:
nr, nc = r+dr, c+dc
if not (0 <= nr < len(heights) and 0 <= nc < len(heights[0]) and not lookup[nr][nc]):
continue
nd = max(d, abs(heights[nr][nc]-heights[r][c]))
if nd < dist[nr][nc]:
dist[nr][nc] = nd
heapq.heappush(min_heap, (nd, nr, nc))
return -1
# Time: O(m * n * log(m * n) + m * n * α(m * n)) = O(m * n * log(m * n))
# Space: O(m * n)
import collections
class UnionFind(object): # Time: (n * α(n)), Space: O(n)
def __init__(self, n):
self.set = range(n)
self.rank = [0]*n
def find_set(self, x):
stk = []
while self.set[x] != x: # path compression
stk.append(x)
x = self.set[x]
while stk:
self.set[stk.pop()] = x
return x
def union_set(self, x, y):
x_root, y_root = map(self.find_set, (x, y))
if x_root == y_root:
return False
if self.rank[x_root] < self.rank[y_root]: # union by rank
self.set[x_root] = y_root
elif self.rank[x_root] > self.rank[y_root]:
self.set[y_root] = x_root
else:
self.set[y_root] = x_root
self.rank[x_root] += 1
return True
# union find solution
class Solution2(object):
def minimumEffortPath(self, heights):
"""
:type heights: List[List[int]]
:rtype: int
"""
def index(n, i, j):
return i*n + j
diffs = []
for i in range(len(heights)):
for j in range(len(heights[0])):
if i > 0:
diffs.append((abs(heights[i][j]-heights[i-1][j]), index(len(heights[0]), i-1, j), index(len(heights[0]), i, j)))
if j > 0:
diffs.append((abs(heights[i][j]-heights[i][j-1]), index(len(heights[0]), i, j-1), index(len(heights[0]), i, j)))
diffs.sort()
union_find = UnionFind(len(heights)*len(heights[0]))
for d, i, j in diffs:
if union_find.union_set(i, j):
if union_find.find_set(index(len(heights[0]), 0, 0)) == \
union_find.find_set(index(len(heights[0]), len(heights)-1, len(heights[0])-1)):
return d
return 0
# Time: O(m * n * logh)
# Space: O(m * n)
# bi-bfs solution
class Solution3(object):
def minimumEffortPath(self, heights):
"""
:type heights: List[List[int]]
:rtype: int
"""
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
def check(heights, x): # bi-bfs
lookup = [[False]*len(heights[0]) for _ in range(len(heights))]
left, right = {(0, 0)}, {(len(heights)-1, len(heights[0])-1)}
while left:
for r, c in left:
lookup[r][c] = True
new_left = set()
for r, c in left:
if (r, c) in right:
return True
for dr, dc in directions:
nr, nc = r+dr, c+dc
if not (0 <= nr < len(heights) and
0 <= nc < len(heights[0]) and
abs(heights[nr][nc]-heights[r][c]) <= x and
not lookup[nr][nc]):
continue
new_left.add((nr, nc))
left = new_left
if len(left) > len(right):
left, right = right, left
return False
left, right = 0, 10**6
while left <= right:
mid = left + (right-left)//2
if check(heights, mid):
right = mid-1
else:
left = mid+1
return left
# Time: O(m * n * logh)
# Space: O(m * n)
import collections
# bfs solution
class Solution4(object):
def minimumEffortPath(self, heights):
"""
:type heights: List[List[int]]
:rtype: int
"""
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
def check(heights, x):
lookup = [[False]*len(heights[0]) for _ in range(len(heights))]
q = collections.deque([(0, 0)])
while q:
r, c = q.popleft()
if (r, c) == (len(heights)-1, len(heights[0])-1):
return True
for dr, dc in directions:
nr, nc = r+dr, c+dc
if not (0 <= nr < len(heights) and
0 <= nc < len(heights[0]) and
abs(heights[nr][nc]-heights[r][c]) <= x and
not lookup[nr][nc]):
continue
lookup[nr][nc] = True
q.append((nr, nc))
return False
left, right = 0, 10**6
while left <= right:
mid = left + (right-left)//2
if check(heights, mid):
right = mid-1
else:
left = mid+1
return left
# Time: O(m * n * logh)
# Space: O(m * n)
# dfs solution
class Solution5(object):
def minimumEffortPath(self, heights):
"""
:type heights: List[List[int]]
:rtype: int
"""
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
def check(heights, x):
lookup = [[False]*len(heights[0]) for _ in range(len(heights))]
stk = [(0, 0)]
while stk:
r, c = stk.pop()
if (r, c) == (len(heights)-1, len(heights[0])-1):
return True
for dr, dc in directions:
nr, nc = r+dr, c+dc
if not (0 <= nr < len(heights) and
0 <= nc < len(heights[0]) and
abs(heights[nr][nc]-heights[r][c]) <= x and
not lookup[nr][nc]):
continue
lookup[nr][nc] = True
stk.append((nr, nc))
return False
left, right = 0, 10**6
while left <= right:
mid = left + (right-left)//2
if check(heights, mid):
right = mid-1
else:
left = mid+1
return left
| [
"[email protected]"
] | |
5e8af7a5f3c6134790c205055461a82ddd53a5a9 | 292437b85108504a7ca91571f26a639a313501b6 | /venv/lib/python2.7/site-packages/oslo_middleware/tests/test_correlation_id.py | 6dde5d8af681454e6d686cb8ed827fed79d6f0df | [] | no_license | heekof/monitoring-agent | c86bebcf77091490df7a6b8c881b85fdb2b9e4eb | b1c079efdf2dabe854f2aa3d96f36d2ec7021070 | refs/heads/master | 2021-01-15T15:39:01.512801 | 2016-08-31T20:53:38 | 2016-08-31T20:53:38 | 58,620,098 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,713 | py | # Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslotest import base as test_base
from oslotest import moxstubout
from oslo_middleware import correlation_id
class CorrelationIdTest(test_base.BaseTestCase):
def setUp(self):
super(CorrelationIdTest, self).setUp()
self.stubs = self.useFixture(moxstubout.MoxStubout()).stubs
def test_process_request(self):
app = mock.Mock()
req = mock.Mock()
req.headers = {}
mock_uuid4 = mock.Mock()
mock_uuid4.return_value = "fake_uuid"
self.stubs.Set(uuid, 'uuid4', mock_uuid4)
middleware = correlation_id.CorrelationId(app)
middleware(req)
self.assertEqual(req.headers.get("X_CORRELATION_ID"), "fake_uuid")
def test_process_request_should_not_regenerate_correlation_id(self):
app = mock.Mock()
req = mock.Mock()
req.headers = {"X_CORRELATION_ID": "correlation_id"}
middleware = correlation_id.CorrelationId(app)
middleware(req)
self.assertEqual(req.headers.get("X_CORRELATION_ID"), "correlation_id")
| [
"[email protected]"
] | |
ec81bc05747f8dce6b33b00f5ce00cd60311d805 | 380a47268c5975473a2e7c38c747bc3bdbd981b1 | /benchmark/third_party/transformers/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py | 5b4de28b7ced312c7ef984524a8bae2aeeca582e | [
"Apache-2.0"
] | permissive | FMInference/FlexGen | 07aa9b1918c19b02077e13ad07e76840843810dd | d34f7b4b43ed87a374f394b0535ed685af66197b | refs/heads/main | 2023-07-24T02:29:51.179817 | 2023-07-21T22:38:31 | 2023-07-21T22:38:31 | 602,270,517 | 6,821 | 411 | Apache-2.0 | 2023-07-07T22:59:24 | 2023-02-15T21:18:53 | Python | UTF-8 | Python | false | false | 62,759 | py | # coding=utf-8
# Copyright 2022 Meta Platforms and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 Data2Vec Vision model."""
import collections.abc
import math
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from transformers.tf_utils import shape_list, stable_softmax
from ...activations_tf import get_tf_activation
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFBaseModelOutputWithPooling,
TFSemanticSegmenterOutput,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import (
TFModelInputType,
TFPreTrainedModel,
TFSequenceClassificationLoss,
get_initializer,
keras_serializable,
unpack_inputs,
)
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_data2vec_vision import Data2VecVisionConfig
logger = logging.get_logger(__name__)
# General docstring
_CONFIG_FOR_DOC = "Data2VecVisionConfig"
_FEAT_EXTRACTOR_FOR_DOC = "BeitFeatureExtractor"
# Base docstring
_CHECKPOINT_FOR_DOC = "facebook/data2vec-vision-base"
_EXPECTED_OUTPUT_SHAPE = [1, 197, 768]
# Image classification docstring
_IMAGE_CLASS_CHECKPOINT = "facebook/data2vec-vision-base-ft1k"
_IMAGE_CLASS_EXPECTED_OUTPUT = "remote control, remote"
TF_DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/data2vec-vision-base-ft1k",
# See all Data2VecVision models at https://huggingface.co/models?filter=data2vec-vision
]
@dataclass
class TFData2VecVisionModelOutputWithPooling(TFBaseModelOutputWithPooling):
"""
Class for outputs of [`TFData2VecVisionModel`].
Args:
last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`):
Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if
*config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token
will be returned.
hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: tf.Tensor = None
pooler_output: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
class TFData2VecVisionDropPath(tf.keras.layers.Layer):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
References:
(1) github.com:rwightman/pytorch-image-models
"""
def __init__(self, drop_path, **kwargs):
super().__init__(**kwargs)
self.drop_path = drop_path
def call(self, x, training=None):
if training:
keep_prob = 1 - self.drop_path
shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
random_tensor = tf.floor(random_tensor)
return (x / keep_prob) * random_tensor
return x
class TFData2VecVisionEmbeddings(tf.keras.layers.Layer):
"""
Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
"""
def __init__(self, config: Data2VecVisionConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.patch_embeddings = TFData2VecVisionPatchEmbeddings(config, name="patch_embeddings")
self.num_patches = self.patch_embeddings.num_patches
self.config = config
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def build(self, input_shape: tf.TensorShape):
self.cls_token = self.add_weight(
shape=(1, 1, self.config.hidden_size),
initializer=tf.random_normal_initializer(stddev=self.config.initializer_range),
trainable=True,
name="cls_token",
)
if self.config.use_mask_token:
self.mask_token = self.add_weight(
shape=(1, 1, self.config.hidden_size),
initializer=tf.random_normal_initializer(stddev=self.config.initializer_range),
trainable=True,
name="mask_token",
)
else:
self.mask_token = None
if self.config.use_absolute_position_embeddings:
self.position_embeddings = self.add_weight(
shape=(1, self.num_patches + 1, self.config.hidden_size),
initializer=tf.random_normal_initializer(stddev=self.config.initializer_range),
trainable=True,
name="position_embeddings",
)
else:
self.position_embeddings = None
super().build(input_shape)
def call(self, pixel_values: tf.Tensor, bool_masked_pos: Optional[tf.Tensor] = None) -> tf.Tensor:
embeddings = self.patch_embeddings(pixel_values)
batch_size, seq_len, projection_dim = shape_list(embeddings)
cls_tokens = tf.tile(self.cls_token, (batch_size, 1, 1))
if bool_masked_pos is not None:
mask_tokens = tf.broadcast_to(self.mask_token, (batch_size, seq_len, projection_dim))
# replace the masked visual tokens by mask_tokens
w = bool_masked_pos[..., None]
w = tf.cast(w, mask_tokens.dtype)
# since TF doesn't support eager tensor assignment
embeddings = embeddings * (1 - w) + mask_tokens * w
embeddings = tf.concat([cls_tokens, embeddings], axis=1)
if self.position_embeddings is not None:
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
class TFData2VecVisionPatchEmbeddings(tf.keras.layers.Layer):
"""
Image to Patch Embedding.
"""
def __init__(self, config: Data2VecVisionConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
image_size, patch_size = config.image_size, config.patch_size
num_channels, hidden_size = config.num_channels, config.hidden_size
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
self.image_size = image_size
self.patch_size = patch_size
self.num_patches = num_patches
self.patch_shape = patch_shape
self.num_channels = num_channels
self.projection = tf.keras.layers.Conv2D(
filters=hidden_size,
kernel_size=patch_size,
strides=patch_size,
padding="valid",
data_format="channels_last",
kernel_initializer="glorot_uniform", # following torch.nn.Linear
bias_initializer="zeros",
name="projection",
)
def call(self, pixel_values: tf.Tensor, training: bool = False) -> tf.Tensor:
batch_size, num_channels, height, width = shape_list(pixel_values)
if tf.executing_eagerly():
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the"
" configuration."
)
if height != self.image_size[0] or width != self.image_size[1]:
raise ValueError(
f"Input image size ({height}*{width}) doesn't match model"
f" ({self.image_size[0]}*{self.image_size[1]})."
)
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
projection = self.projection(pixel_values)
# Change the 2D spatial dimensions to a single temporal dimension.
# shape = (batch_size, num_patches, out_channels=embed_dim)
num_patches = (width // self.patch_size[1]) * (height // self.patch_size[0])
return tf.reshape(tensor=projection, shape=(batch_size, num_patches, -1))
class TFData2VecVisionSelfAttention(tf.keras.layers.Layer):
def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple] = None, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number "
f"of attention heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
self.query = tf.keras.layers.Dense(
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
)
self.key = tf.keras.layers.Dense(
units=self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="key",
use_bias=False,
)
self.value = tf.keras.layers.Dense(
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
)
self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
if window_size:
self.relative_position_bias = TFData2VecVisionRelativePositionBias(
config, window_size=window_size, name="relative_position_bias"
)
else:
self.relative_position_bias = None
def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
# Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
# Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
return tf.transpose(tensor, perm=[0, 2, 1, 3])
def call(
self,
hidden_states: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
relative_position_bias: Optional["TFData2VecVisionRelativePositionBias"] = None,
training: bool = False,
) -> Tuple[tf.Tensor]:
batch_size = shape_list(hidden_states)[0]
mixed_query_layer = self.query(inputs=hidden_states)
mixed_key_layer = self.key(inputs=hidden_states)
mixed_value_layer = self.value(inputs=hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
# Take the dot product between "query" and "key" to get the raw attention scores.
# (batch size, num_heads, seq_len_q, seq_len_k)
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = attention_scores / self.sqrt_att_head_size
# Add relative position bias if present.
if self.relative_position_bias is not None:
# Passing `0.0` to the `relative_position_bias()` layer because otherwise Keras
# might complain about `Layer.call()` not being invoked properly. In this case this input
# i.e., 0.0 is not going to be used in any calculations so we're safe.
attention_scores = attention_scores + self.relative_position_bias(0.0)[None, ...]
# Add shared relative position bias if provided.
if relative_position_bias is not None:
attention_scores = attention_scores + relative_position_bias
# Normalize the attention scores to probabilities.
attention_probs = stable_softmax(logits=attention_scores, axis=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(inputs=attention_probs, training=training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = tf.multiply(attention_probs, head_mask)
attention_output = tf.matmul(attention_probs, value_layer)
attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
# (batch_size, seq_len_q, all_head_size)
attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
return outputs
class TFData2VecVisionSelfOutput(tf.keras.layers.Layer):
"""
The residual connection is defined in TFData2VecVisionLayer instead of here (as is the case with other models), due
to the layernorm applied before each block.
"""
def __init__(self, config: Data2VecVisionConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, gamma=None, training: bool = False) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.dropout(inputs=hidden_states, training=training)
return hidden_states
class TFData2VecVisionAttention(tf.keras.layers.Layer):
def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple] = None, **kwargs):
super().__init__(**kwargs)
self.attention = TFData2VecVisionSelfAttention(config, window_size=window_size, name="attention")
self.dense_output = TFData2VecVisionSelfOutput(config, name="output")
def prune_heads(self, heads):
raise NotImplementedError
def call(
self,
input_tensor: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
relative_position_bias: Optional["TFData2VecVisionRelativePositionBias"] = None,
training: bool = False,
) -> Tuple[tf.Tensor]:
self_outputs = self.attention(
hidden_states=input_tensor,
head_mask=head_mask,
output_attentions=output_attentions,
relative_position_bias=relative_position_bias,
training=training,
)
attention_output = self.dense_output(
hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.vit.modeling_tf_vit.TFViTIntermediate with ViT->Data2VecVision
class TFData2VecVisionIntermediate(tf.keras.layers.Layer):
def __init__(self, config: Data2VecVisionConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
else:
self.intermediate_act_fn = config.hidden_act
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class TFData2VecVisionOutput(tf.keras.layers.Layer):
def __init__(self, config: Data2VecVisionConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.dropout(inputs=hidden_states, training=training)
return hidden_states
class TFData2VecVisionLayer(tf.keras.layers.Layer):
"""This corresponds to the Block class in the timm implementation."""
def __init__(
self, config: Data2VecVisionConfig, window_size: Optional[tuple] = None, drop_path_rate: float = 0.0, **kwargs
):
super().__init__(**kwargs)
self.config = config
self.attention = TFData2VecVisionAttention(config, window_size=window_size, name="attention")
self.intermediate = TFData2VecVisionIntermediate(config, name="intermediate")
self.data2vec_output = TFData2VecVisionOutput(config, name="output")
self.layernorm_before = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="layernorm_before"
)
self.layernorm_after = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="layernorm_after"
)
# Using `layers.Activation` instead of `tf.identity` to better control `training`
# behaviour.
self.drop_path = (
TFData2VecVisionDropPath(drop_path_rate, name="drop_path")
if drop_path_rate > 0.0
else tf.keras.layers.Activation("linear", name="drop_path")
)
self.init_values = config.layer_scale_init_value
def build(self, input_shape: tf.TensorShape):
if self.init_values > 0:
self.lambda_1 = self.add_weight(
shape=(self.config.hidden_size),
initializer="ones",
trainable=True,
name="lambda_1",
)
self.lambda_2 = self.add_weight(
shape=(self.config.hidden_size),
initializer="ones",
trainable=True,
name="lambda_2",
)
self.lambda_1.assign(self.init_values * tf.ones((self.config.hidden_size)))
self.lambda_2.assign(self.init_values * tf.ones((self.config.hidden_size)))
else:
self.lambda_1, self.lambda_2 = None, None
super().build(input_shape)
def call(
self,
hidden_states: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
relative_position_bias: Optional["TFData2VecVisionRelativePositionBias"] = None,
training: bool = False,
) -> Tuple[tf.Tensor]:
self_attention_outputs = self.attention(
# in Data2VecVision, layernorm is applied before self-attention
input_tensor=self.layernorm_before(inputs=hidden_states),
head_mask=head_mask,
output_attentions=output_attentions,
relative_position_bias=relative_position_bias,
training=training,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
# apply lambda_1 if present
if self.lambda_1 is not None:
attention_output = self.lambda_1 * attention_output
# first residual connection
hidden_states = self.drop_path(attention_output) + hidden_states
# in Data2VecVision, layernorm is also applied after self-attention
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = self.data2vec_output(layer_output)
if self.lambda_2 is not None:
layer_output = self.lambda_2 * layer_output
# second residual connection
layer_output = self.drop_path(layer_output) + hidden_states
outputs = (layer_output,) + outputs
return outputs
# Taken and modified from here:
# https://github.com/leondgarse/keras_cv_attention_models/blob/main/keras_cv_attention_models/beit/beit.py#L28
class TFData2VecVisionRelativePositionBias(tf.keras.layers.Layer):
def __init__(self, config: Data2VecVisionConfig, window_size: tuple, **kwargs) -> None:
super().__init__(**kwargs)
self.config = config
self.window_size = window_size
# +3 for cls_token_pos_len
# window_size can be something like (14, 14)
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_index = self.get_position_index()
def build(self, input_shape):
self.relative_position_bias_table = self.add_weight(
shape=(self.num_relative_distance, self.config.num_attention_heads),
initializer="zeros",
trainable=True,
name="relative_position_bias_table",
) # [2*Wh-1 * 2*Ww-1, nH]
# cls to token & token 2 cls & cls to cls
super().build(input_shape)
def get_position_index(self):
# get pair-wise relative position index for each token inside the window
xx, yy = tf.meshgrid(range(self.window_size[0]), range(self.window_size[1]))
coords = tf.stack([yy, xx], axis=0) # [2, Wh, Ww]
coords_flatten = tf.reshape(coords, [2, -1]) # [2, Wh*Ww]
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # [2, Wh*Ww, Wh*Ww]
relative_coords = tf.transpose(relative_coords, perm=[1, 2, 0]) # [Wh*Ww, Wh*Ww, 2]
xx = (relative_coords[:, :, 0] + self.window_size[0] - 1) * (2 * self.window_size[1] - 1)
yy = relative_coords[:, :, 1] + self.window_size[1] - 1
relative_coords = tf.stack([xx, yy], axis=-1)
relative_position_index = tf.reduce_sum(relative_coords, axis=-1) # [Wh*Ww, Wh*Ww]
top = tf.ones((1, relative_position_index.shape[1]), dtype=relative_position_index.dtype) * (
self.num_relative_distance - 3
)
left = tf.ones((relative_position_index.shape[0], 1), dtype=relative_position_index.dtype) * (
self.num_relative_distance - 2
)
corner = tf.ones((1, 1), dtype=relative_position_index.dtype) * (self.num_relative_distance - 1)
left_corner = tf.concat([corner, left], axis=0)
relative_position_index = tf.concat([top, relative_position_index], axis=0)
relative_position_index = tf.concat([left_corner, relative_position_index], axis=1) # [Wh*Ww + 1, Wh*Ww + 1]
return relative_position_index
def call(self, inputs=None) -> tf.Tensor:
relative_position_bias = tf.gather(self.relative_position_bias_table, self.relative_position_index, axis=0)
return tf.transpose(relative_position_bias, [2, 0, 1])
class TFData2VecVisionEncoder(tf.keras.layers.Layer):
def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple] = None, **kwargs):
super().__init__(**kwargs)
self.config = config
if config.use_shared_relative_position_bias:
self.relative_position_bias = TFData2VecVisionRelativePositionBias(
config, window_size=window_size, name="relative_position_bias"
)
else:
self.relative_position_bias = None
# stochastic depth decay rule
dpr = [x for x in tf.linspace(0.0, config.drop_path_rate, config.num_hidden_layers)]
self.layer = [
TFData2VecVisionLayer(
config,
window_size=window_size if config.use_relative_position_bias else None,
drop_path_rate=dpr[i],
name=f"layer_._{i}",
)
for i in range(config.num_hidden_layers)
]
def call(
self,
hidden_states: tf.Tensor,
head_mask: Optional[tf.Tensor] = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
) -> Union[tuple, TFBaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
# Passing `0.0` to the `relative_position_bias()` layer because otherwise Keras
# might complain about `Layer.call()` not being invoked properly. In this case this input
# i.e., 0.0 is not going to be used in any calculations so we're safe.
relative_position_bias = (
self.relative_position_bias(0.0) if self.relative_position_bias is not None else None
)
layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions, relative_position_bias)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@keras_serializable
class TFData2VecVisionMainLayer(tf.keras.layers.Layer):
config_class = Data2VecVisionConfig
def __init__(self, config: Data2VecVisionConfig, add_pooling_layer: bool = True, **kwargs):
super().__init__(**kwargs)
self.config = config
self.add_pooling_layer = add_pooling_layer
self.embeddings = TFData2VecVisionEmbeddings(config, name="embeddings")
self.encoder = TFData2VecVisionEncoder(
config, window_size=self.embeddings.patch_embeddings.patch_shape, name="encoder"
)
self.layernorm = (
tf.identity
if config.use_mean_pooling
else tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
)
# We are setting the `data_format` like so because from here on we will revert to the
# NCHW output format
self.pooler = TFData2VecVisionPooler(config, name="pooler") if add_pooling_layer else None
def get_input_embeddings(self) -> tf.keras.layers.Layer:
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError
@unpack_inputs
def call(
self,
pixel_values: Optional[tf.Tensor] = None,
bool_masked_pos: Optional[tf.Tensor] = None,
head_mask: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
) -> Union[tuple, TFData2VecVisionModelOutputWithPooling]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(pixel_values, bool_masked_pos, training=training)
encoder_outputs = self.encoder(
embedding_output,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
return head_outputs + encoder_outputs[1:]
return TFData2VecVisionModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
class TFData2VecVisionPooler(tf.keras.layers.Layer):
def __init__(self, config: Data2VecVisionConfig, **kwargs):
super().__init__(**kwargs)
self.layernorm = (
tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
if config.use_mean_pooling
else None
)
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
if self.layernorm is not None:
# Mean pool the final hidden states of the patch tokens
patch_tokens = hidden_states[:, 1:, :]
pooled_output = self.layernorm(tf.reduce_mean(patch_tokens, axis=1))
else:
# Pool by simply taking the final hidden state of the [CLS] token
pooled_output = hidden_states[:, 0]
return pooled_output
class TFData2VecVisionPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = Data2VecVisionConfig
base_model_prefix = "data2vec_vision"
main_input_name = "pixel_values"
_keys_to_ignore_on_load_unexpected = [r"relative_position_index"]
@property
def dummy_inputs(self) -> Dict[str, tf.Tensor]:
"""
Dummy inputs to build the network. Returns:
`Dict[str, tf.Tensor]`: The dummy inputs.
"""
VISION_DUMMY_INPUTS = tf.random.uniform(
shape=(3, self.config.num_channels, self.config.image_size, self.config.image_size),
dtype=tf.float32,
)
return {"pixel_values": tf.constant(VISION_DUMMY_INPUTS)}
@tf.function(
input_signature=[
{
"pixel_values": tf.TensorSpec((None, None, None, None), tf.float32, name="pixel_values"),
}
]
)
def serving(self, inputs):
"""
Method used for serving the model.
Args:
inputs (`Dict[str, tf.Tensor]`):
The input of the saved model as a dictionary of tensors.
"""
output = self.call(inputs)
return self.serving_output(output)
DATA2VEC_VISION_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.).
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TensorFlow models and layers in `transformers` accept two formats as input:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional argument.
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
positional argument:
- a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})`
Note that when creating models and layers with
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
about any of this, as you can just pass inputs like you would to any other Python function!
</Tip>
Args:
config ([`Data2VecVisionConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
DATA2VEC_VISION_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`BeitFeatureExtractor`]. See
[`BeitFeatureExtractor.__call__`] for details.
head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False``):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare Data2VecVision Model transformer outputting raw hidden-states without any specific head on top.",
DATA2VEC_VISION_START_DOCSTRING,
)
class TFData2VecVisionModel(TFData2VecVisionPreTrainedModel):
def __init__(self, config: Data2VecVisionConfig, add_pooling_layer: bool = False, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.config = config
self.data2vec_vision = TFData2VecVisionMainLayer(
config, add_pooling_layer=add_pooling_layer, name="data2vec_vision"
)
def get_input_embeddings(self):
return self.data2vec_vision.get_input_embeddings()
@unpack_inputs
@add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_FEAT_EXTRACTOR_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFData2VecVisionModelOutputWithPooling,
config_class=_CONFIG_FOR_DOC,
modality="vision",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def call(
self,
pixel_values: Optional[TFModelInputType] = None,
bool_masked_pos: Optional[tf.Tensor] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
) -> Union[tuple, TFData2VecVisionModelOutputWithPooling]:
outputs = self.data2vec_vision(
pixel_values=pixel_values,
bool_masked_pos=bool_masked_pos,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
return outputs
def serving_output(self, output: TFData2VecVisionModelOutputWithPooling) -> TFData2VecVisionModelOutputWithPooling:
hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attentions = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFData2VecVisionModelOutputWithPooling(
last_hidden_state=output.last_hidden_state,
pooler_output=output.pooler_output,
hidden_states=hidden_states,
attentions=attentions,
)
@add_start_docstrings(
"""
Data2VecVision Model transformer with an image classification head on top (a linear layer on top of the average of
the final hidden states of the patch tokens) e.g. for ImageNet.
""",
DATA2VEC_VISION_START_DOCSTRING,
)
class TFData2VecVisionForImageClassification(TFData2VecVisionPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config: Data2VecVisionConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.data2vec_vision = TFData2VecVisionMainLayer(config, add_pooling_layer=True, name="data2vec_vision")
# Classifier head
self.classifier = tf.keras.layers.Dense(
units=config.num_labels,
kernel_initializer=get_initializer(config.initializer_range),
name="classifier",
)
@unpack_inputs
@add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_FEAT_EXTRACTOR_FOR_DOC,
checkpoint=_IMAGE_CLASS_CHECKPOINT,
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
)
def call(
self,
pixel_values: Optional[TFModelInputType] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
) -> Union[TFSequenceClassifierOutput, tuple]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.data2vec_vision(
pixel_values=pixel_values,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(pooled_output)
loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attentions = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hidden_states, attentions=attentions)
class TFData2VecVisionConvModule(tf.keras.layers.Layer):
"""
A convolutional block that bundles conv/norm/activation layers. This block simplifies the usage of convolution
layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
"""
def __init__(
self,
out_channels: int,
kernel_size: Union[int, Tuple[int, int]],
padding: str = "valid",
bias: bool = False,
dilation: Union[int, Tuple[int, int]] = 1,
**kwargs
) -> None:
super().__init__(**kwargs)
self.conv = tf.keras.layers.Conv2D(
filters=out_channels,
kernel_size=kernel_size,
padding=padding,
use_bias=bias,
dilation_rate=dilation,
name="conv",
)
self.bn = tf.keras.layers.BatchNormalization(name="bn", momentum=0.9, epsilon=1e-5)
self.activation = tf.nn.relu
def call(self, input: tf.Tensor) -> tf.Tensor:
output = self.conv(input)
output = self.bn(output)
output = self.activation(output)
return output
# Copied from:
# https://gist.github.com/Rocketknight1/43abbe6e73f1008e6e459486e01e0ceb
class TFAdaptiveAvgPool1D(tf.keras.layers.Layer):
def __init__(self, output_dim, mode="dense", **kwargs):
super().__init__(**kwargs)
self.output_dim = output_dim
self.mode = mode
self.map = None
def build(self, input_shape):
super().build(input_shape)
"""We pre-compute the sparse matrix for the build() step once. The below code comes
from https://stackoverflow.com/questions/53841509/how-does-adaptive-pooling-in-pytorch-work/63603993#63603993."""
def get_kernels(ind, outd) -> List:
"""Returns a List [(kernel_offset_start,kernel_length)] defining all the pooling kernels for a 1-D adaptive
pooling layer that takes an input of dimension `ind` and yields an output of dimension `outd`"""
def start_index(a, b, c):
return math.floor((float(a) * float(c)) / b)
def end_index(a, b, c):
return math.ceil((float(a + 1) * float(c)) / b)
results = []
for ow in range(outd):
start = start_index(ow, outd, ind)
end = end_index(ow, outd, ind)
sz = end - start
results.append((start, sz))
return results
in_dim = int(input_shape[-1])
kernels = get_kernels(in_dim, self.output_dim)
sparse_map = np.zeros((in_dim, self.output_dim), dtype=np.float32)
for i, kernel in enumerate(kernels):
sparse_map[kernel[0] : kernel[0] + kernel[1], i] = 1 / kernel[1]
if self.mode == "dense":
self.map = tf.constant(sparse_map)
else:
self.map = tf.sparse.from_dense(sparse_map)
def call(self, inputs):
if self.mode == "dense":
return inputs @ self.map
else:
input_dims = inputs.shape
input_matrix = tf.reshape(inputs, (-1, input_dims[-1]))
out = tf.sparse.sparse_dense_matmul(input_matrix, self.map)
return tf.reshape(out, input_dims[:-1].as_list() + [-1])
def get_config(self):
config = super().get_config()
config.update({"output_dim": self.output_dim, "mode": self.mode})
return config
class TFAdaptiveAvgPool2D(tf.keras.layers.Layer):
def __init__(self, output_shape, mode="dense", **kwargs):
super().__init__(**kwargs)
self.mode = mode
self.h_pool = TFAdaptiveAvgPool1D(output_shape[0], mode=mode, name="h_pool")
self.w_pool = TFAdaptiveAvgPool1D(output_shape[1], mode=mode, name="w_pool")
def call(self, inputs):
# Rearrange from NHWC -> NCHW
inputs = tf.transpose(inputs, perm=[0, 3, 1, 2])
# Perform W-pooling
inputs = self.w_pool(inputs)
# Rearrange NCHW -> NCWH
inputs = tf.transpose(inputs, perm=[0, 1, 3, 2])
# Perform H-pooling
inputs = self.h_pool(inputs)
# Rearrange from NCWH -> NHWC
inputs = tf.transpose(inputs, perm=[0, 3, 2, 1])
return inputs
def get_config(self):
config = super().get_config()
config.update({"mode": self.mode})
return config
class TFData2VecVisionPyramidPoolingModule(tf.keras.layers.Layer):
"""
Pyramid Pooling Module (PPM) used in PSPNet.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module.
channels (int): Channels after modules, before conv_seg.
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
"""
def __init__(self, pool_scales: Tuple[int, ...], channels: int, **kwargs) -> None:
super().__init__(**kwargs)
self.pool_scales = pool_scales
self.channels = channels
self.layer_list = []
for idx, pool_scale in enumerate(pool_scales):
pool_scale = pool_scale if isinstance(pool_scale, collections.abc.Iterable) else (pool_scale, pool_scale)
self.layer_list.append(
[
TFAdaptiveAvgPool2D(output_shape=pool_scale),
TFData2VecVisionConvModule(out_channels=self.channels, kernel_size=1, name=f"{idx}.1"),
]
)
def call(self, x: tf.Tensor) -> List[tf.Tensor]:
ppm_outs = []
inputs = x
for ppm in self.layer_list:
for layer_module in ppm:
ppm_out = layer_module(x)
x = ppm_out
upsampled_ppm_out = tf.image.resize(ppm_out, size=shape_list(inputs)[1:-1], method="bilinear")
ppm_outs.append(upsampled_ppm_out)
return ppm_outs
class TFData2VecVisionUperHead(tf.keras.layers.Layer):
"""
Unified Perceptual Parsing for Scene Understanding. This head is the implementation of
[UPerNet](https://arxiv.org/abs/1807.10221).
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
"""
def __init__(self, config: Data2VecVisionConfig, **kwargs) -> None:
super().__init__(**kwargs)
self.pool_scales = config.pool_scales # e.g. (1, 2, 3, 6)
self.in_channels = [config.hidden_size] * 4 # e.g. [768, 768, 768, 768]
self.channels = config.hidden_size
self.classifier = tf.keras.layers.Conv2D(config.num_labels, kernel_size=1, name="classifier")
# PSP Module
self.psp_modules = TFData2VecVisionPyramidPoolingModule(self.pool_scales, self.channels, name="psp_modules")
self.bottleneck = TFData2VecVisionConvModule(self.channels, kernel_size=3, padding="same", name="bottleneck")
# FPN Module
self.lateral_convs = []
self.fpn_convs = []
for idx, _ in enumerate(self.in_channels[:-1]): # skip the top layer
l_conv = TFData2VecVisionConvModule(out_channels=self.channels, kernel_size=1, name=f"lateral_convs.{idx}")
fpn_conv = TFData2VecVisionConvModule(
out_channels=self.channels, kernel_size=3, padding="same", name=f"fpn_convs.{idx}"
)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
self.fpn_bottleneck = TFData2VecVisionConvModule(
out_channels=self.channels, kernel_size=3, padding="same", name="fpn_bottleneck"
)
def psp_forward(self, inputs):
x = inputs[-1]
psp_outs = [x]
psp_outs.extend(self.psp_modules(x))
psp_outs = tf.concat(psp_outs, axis=-1)
output = self.bottleneck(psp_outs)
return output
def call(self, encoder_hidden_states: tf.Tensor) -> tf.Tensor:
# build laterals
laterals = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)]
laterals.append(self.psp_forward(encoder_hidden_states))
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
prev_shape = shape_list(laterals[i - 1])[1:-1]
laterals[i - 1] = laterals[i - 1] + tf.image.resize(laterals[i], size=prev_shape, method="bilinear")
# build outputs
fpn_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
# append psp feature
fpn_outs.append(laterals[-1])
for i in range(used_backbone_levels - 1, 0, -1):
fpn_outs[i] = tf.image.resize(fpn_outs[i], size=shape_list(fpn_outs[0])[1:-1], method="bilinear")
fpn_outs = tf.concat(fpn_outs, axis=-1)
output = self.fpn_bottleneck(fpn_outs)
output = self.classifier(output)
return output
class TFData2VecVisionFCNHead(tf.keras.layers.Layer):
"""
Fully Convolution Networks for Semantic Segmentation. This head is implemented from
[FCNNet](https://arxiv.org/abs/1411.4038).
Args:
config (Data2VecVisionConfig): Configuration.
kernel_size (int): The kernel size for convs in the head. Default: 3.
dilation (int): The dilation rate for convs in the head. Default: 1.
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
"""
def __init__(
self,
config: Data2VecVisionConfig,
in_index: int = 2,
kernel_size: int = 3,
dilation: Union[int, Tuple[int, int]] = 1,
**kwargs
) -> None:
super().__init__(**kwargs)
self.in_channels = config.hidden_size
self.channels = config.auxiliary_channels
self.num_convs = config.auxiliary_num_convs
self.concat_input = config.auxiliary_concat_input
self.in_index = in_index
convs = []
convs.append(
TFData2VecVisionConvModule(
out_channels=self.channels,
kernel_size=kernel_size,
padding="same",
dilation=dilation,
name="convs.0",
)
)
for i in range(self.num_convs - 1):
convs.append(
TFData2VecVisionConvModule(
out_channels=self.channels,
kernel_size=kernel_size,
padding="same",
dilation=dilation,
name=f"conv_module_{i+2}",
)
)
if self.num_convs == 0:
self.convs = [tf.identity]
else:
self.convs = convs
if self.concat_input:
self.conv_cat = TFData2VecVisionConvModule(
out_channels=self.channels, kernel_size=kernel_size, padding="same", name="conv_cat"
)
self.classifier = tf.keras.layers.Conv2D(config.num_labels, kernel_size=1, name="classifier")
def call(self, encoder_hidden_states: tf.Tensor) -> tf.Tensor:
# just take the relevant feature maps
hidden_states = encoder_hidden_states[self.in_index]
output = hidden_states
for layer_module in self.convs:
output = layer_module(output)
if self.concat_input:
output = self.conv_cat(tf.concat([hidden_states, output], axis=-1))
output = self.classifier(output)
return output
@add_start_docstrings(
"""
Data2VecVision Model transformer with a semantic segmentation head on top e.g. for ADE20k, CityScapes.
""",
DATA2VEC_VISION_START_DOCSTRING,
)
class TFData2VecVisionForSemanticSegmentation(TFData2VecVisionPreTrainedModel):
def __init__(self, config: Data2VecVisionConfig, *inputs, **kwargs) -> None:
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.data2vec_vision = TFData2VecVisionMainLayer(config, add_pooling_layer=False, name="data2vec_vision")
# FPNs
self.fpn1 = [
tf.keras.layers.Conv2DTranspose(config.hidden_size, kernel_size=2, strides=2, name="fpn1.0"),
tf.keras.layers.BatchNormalization(name="fpn1.1", momentum=0.9, epsilon=1e-5),
tf.keras.layers.Activation("gelu"),
tf.keras.layers.Conv2DTranspose(config.hidden_size, kernel_size=2, strides=2, name="fpn1.3"),
]
self.fpn2 = [tf.keras.layers.Conv2DTranspose(config.hidden_size, kernel_size=2, strides=2, name="fpn2.0")]
self.fpn3 = tf.identity
self.fpn4 = tf.keras.layers.MaxPool2D(pool_size=2, strides=2)
# Semantic segmentation head(s)
self.decode_head = TFData2VecVisionUperHead(config, name="decode_head")
self.auxiliary_head = (
TFData2VecVisionFCNHead(config, name="auxiliary_head") if config.use_auxiliary_head else None
)
def compute_loss(self, logits, auxiliary_logits, labels):
# upsample logits to the images' original size
if len(shape_list(labels)) > 3:
label_interp_shape = shape_list(labels)[1:-1]
else:
label_interp_shape = shape_list(labels)[-2:]
upsampled_logits = tf.image.resize(logits, size=label_interp_shape, method="bilinear")
if auxiliary_logits is not None:
upsampled_auxiliary_logits = tf.image.resize(auxiliary_logits, size=label_interp_shape, method="bilinear")
# compute weighted loss
loss_fct = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction="none")
# Copied from https://www.tensorflow.org/text/tutorials/transformer#loss_and_metrics.
# Utility to mask the index to ignore during computing the loss.
def masked_loss(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, self.config.semantic_loss_ignore_index))
loss_ = loss_fct(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
reduced_masked_loss = tf.reduce_sum(loss_) / tf.reduce_sum(mask)
return tf.reshape(reduced_masked_loss, (1,))
main_loss = masked_loss(labels, upsampled_logits)
auxiliary_loss = masked_loss(labels, upsampled_auxiliary_logits)
loss = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
return loss
@unpack_inputs
@add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFSemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
pixel_values: Optional[tf.Tensor] = None,
head_mask: Optional[tf.Tensor] = None,
labels: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, TFSemanticSegmenterOutput]:
r"""
labels (`tf.Tensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
Returns:
Examples:
```python
>>> from transformers import AutoFeatureExtractor, TFData2VecVisionForSemanticSegmentation
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/data2vec-vision-base")
>>> model = TFData2VecVisionForSemanticSegmentation.from_pretrained("facebook/data2vec-vision-base")
>>> inputs = feature_extractor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> # logits are of shape (batch_size, num_labels, height, width)
>>> logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
outputs = self.data2vec_vision(
pixel_values,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=True, # we need the intermediate hidden states
return_dict=return_dict,
)
encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
# only keep certain features, and reshape
# note that we do +1 as the encoder_hidden_states also includes the initial embeddings
features = [feature for idx, feature in enumerate(encoder_hidden_states) if idx + 1 in self.config.out_indices]
batch_size = shape_list(pixel_values)[0]
patch_resolution = self.config.image_size // self.config.patch_size
def reshape_features(x):
x = tf.reshape(x, (batch_size, patch_resolution, patch_resolution, -1))
return x
features = [reshape_features(x[:, 1:, :]) for x in features]
# apply FPNs
ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
for module in ops[0]:
features[0] = module(features[0])
features[1] = ops[1][0](features[1])
for i in range(len(features[2:])):
features[i + 2] = ops[i + 2](features[i + 2])
logits = self.decode_head(features)
# Tranpose the logits to maintain consistency in the output formats.
transposed_logits = tf.transpose(logits, perm=[0, 3, 1, 2])
auxiliary_logits = None
if self.auxiliary_head is not None:
auxiliary_logits = self.auxiliary_head(features)
loss = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one")
else:
loss = self.compute_loss(logits, auxiliary_logits, labels)
if not return_dict:
if output_hidden_states:
output = (logits,) + outputs[1:]
else:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSemanticSegmenterOutput(
loss=loss,
logits=transposed_logits,
hidden_states=outputs.hidden_states if output_hidden_states else None,
attentions=outputs.attentions,
)
def serving_output(self, output: TFSemanticSegmenterOutput) -> TFSemanticSegmenterOutput:
hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attentions = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSemanticSegmenterOutput(logits=output.logits, hidden_states=hidden_states, attentions=attentions)
| [
"[email protected]"
] | |
6497240502fa621dc2ea8c4dcbce6f85011972b3 | e8b6a669bdec937a4226e749a98c7e3905e327db | /rainbow/settings.py | 445b701eab004e8de35ade8739bda4dbea1d130e | [] | no_license | danielmoniz/Rainbow | bef52a7bd18f225d48822aa563af03bbba862b9e | a9085476dc83a582b87927251cc269f228ecf557 | refs/heads/master | 2016-09-11T03:58:41.528607 | 2012-06-13T19:39:58 | 2012-06-13T19:39:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,675 | py | # Django settings for rainbow project.
from private_settings import get_database_settings
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
# This data comes from private_settings.py
DATABASES = get_database_settings()
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = '/home/daniel/python_practice/rainbow/sitestatic/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# The URL all users will be redirected to after login.
# @TODO Make this dynamic! Users should be redirected to their last location.
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/login'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
"/home/daniel/projects/rainbow/static",
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '$w&7dt1vzfgex6d0(_jzrf&&k^7j8gm&18r9kawiufns*59(e3'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'rainbow.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'rainbow.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
"/home/daniel/projects/rainbow/django_templates",
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.webdesign',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# @TODO Surely I need to put the installed apps here? Eg. build_world and users
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.request",
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"[email protected]"
] | |
b65f1abafd197004b408adad6774a73815be6aa0 | 6b9adefb8c3730e1b9edab5605e86ee4f1cfe53c | /treedb/__init__.py | 610918df1417c888efe026a930ef6e857065fd8c | [
"MIT"
] | permissive | glottolog/treedb | 8ac4b5dd6923a196ceb02f191200cd8053a2cd88 | 81e6a855e5d69bebc86e1fca05c938621c87ba7c | refs/heads/master | 2023-07-21T04:04:27.709761 | 2023-07-17T20:10:20 | 2023-07-17T20:10:20 | 194,383,732 | 5 | 2 | MIT | 2022-05-24T17:48:32 | 2019-06-29T08:41:10 | Jupyter Notebook | UTF-8 | Python | false | false | 3,066 | py | """Load Glottolog lanuoid tree ``md.ini`` files into SQLite3 database."""
from ._globals import SESSION as Session # noqa: N811
from ._tools import sha256sum
from .backend import (print_versions,
set_engine,
connect,
scalar,
iterrows)
from .backend.export import (print_dataset,
print_schema,
print_query_sql,
backup,
dump_sql,
csv_zipfile,
print_rows,
write_csv,
hash_csv)
from .backend.load import main as load
from .backend.models import Dataset, Producer, Config
from .backend.pandas import pd_read_sql, pd_read_json_lines
from .backend.sqlite_master import print_table_sql, select_tables_nrows
from .backend.views import TABLES as views # noqa: N811
from .languoids import set_root, iterfiles
from .checks import check, compare_languoids
from .export import (print_languoid_stats,
iterlanguoids,
checksum,
write_json_lines as write_languoids,
pd_read_languoids,
write_files)
from .glottolog import glottolog_version, checkout_or_clone
from .logging_ import configure_logging
from .models import LEVEL, Languoid
from .queries import (get_example_query,
get_json_query as get_languoids_query,
iterdescendants)
from .settings import configure, get_default_root
__all__ = ['Session',
'sha256sum',
'print_versions',
'set_engine', 'connect', 'scalar', 'iterrows',
'print_dataset',
'print_schema', 'print_query_sql',
'backup', 'dump_sql', 'csv_zipfile',
'print_rows', 'write_csv', 'hash_csv',
'load',
'Dataset', 'Producer', 'Config',
'pd_read_sql', 'pd_read_json_lines',
'print_table_sql', 'select_tables_nrows',
'views',
'set_root', 'iterfiles',
'check', 'compare_languoids',
'print_languoid_stats',
'iterlanguoids',
'checksum',
'write_languoids',
'pd_read_languoids',
'write_files',
'glottolog_version', 'checkout_or_clone',
'configure_logging',
'LEVEL', 'Languoid',
'get_example_query',
'get_languoids_query',
'iterdescendants',
'configure',
'engine', 'root']
__title__ = 'treedb'
__version__ = '2.6.3.dev0'
__author__ = 'Sebastian Bank <[email protected]>'
__license__ = 'MIT, see LICENSE.txt'
__copyright__ = 'Copyright (c) 2017-2023 Sebastian Bank'
# default engine: in-memory database
engine = set_engine(None)
# default root: GLOTTOLOG_REPO_ROOT, or treedb.ini glottolog:repo_root, or ./glottolog
root = set_root(get_default_root(env_var='GLOTTOLOG_REPO_ROOT'))
| [
"[email protected]"
] | |
a0c813f549809f79f8ddebdafa8f599069dbc6db | 79d512d58ae10492c3e82a475d30f3026561676b | /env/bin/pip3.8 | 6761b455abe33e3eb91291e20bd91dd5fde0c6bf | [] | no_license | puskarkarki/BlogApplication | 6b63c72e1583ccb7a9b7e962e4d8e4c32b11fef0 | 9cc67e79d75cc6e6f83bd3b81ff02142e0c37efa | refs/heads/master | 2023-07-22T15:20:40.465751 | 2021-09-03T10:19:05 | 2021-09-03T10:19:05 | 381,980,331 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | 8 | #!/home/linux/PycharmProjects/blog/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
188529fb0dbd729ac43830eb4e0ca46e6b0fad6a | 88be4d5657d19462eb1d74d2d4d98180b423a889 | /scripts/plot_experiment.py | 6579ab2bdda9aaa2117fdcbaab143a8dce51aafd | [
"BSD-3-Clause"
] | permissive | domingoesteban/robolearn | bc58278fe38894f4ca9ec9e657ee13a479a368b7 | 0d20125425c352b80ef2eeed1c0b11ab6497b11a | refs/heads/master | 2020-04-15T22:38:25.343229 | 2019-01-29T17:01:42 | 2019-01-29T17:01:42 | 165,080,647 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,524 | py | import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from robolearn.utils.plots import plot_process_iu_returns
from robolearn.utils.plots import plot_process_iu_avg_rewards
from robolearn.utils.plots import plot_process_iu_policies
from robolearn.utils.plots import plot_process_iu_values_errors
from robolearn.utils.plots import plot_process_iu_alphas
from robolearn.utils.plots import plot_process_general_data
from robolearn.utils.plots.learning_process_plots import plot_process_haarnoja
import json
def main(args):
# Load environment
dirname = os.path.dirname(args.file)
with open(os.path.join(dirname, 'variant.json')) as json_data:
algo_name = json.load(json_data)['algo_name']
# Plot according to RL algorithm
if algo_name in ['HIUSAC', 'HIUSACNEW', 'SAC', 'HIUSACEpisodic']:
plot_process_iu_values_errors(csv_file=args.file, n_unintentional=args.un,
block=False)
plot_process_iu_policies(csv_file=args.file, n_unintentional=args.un,
block=False, plot_intentional=args.no_in,
deterministic=False)
plot_process_iu_alphas(csv_file=args.file, n_unintentional=args.un,
block=False)
plot_process_iu_returns(csv_file=args.file, n_unintentional=args.un,
block=False)
plot_process_iu_avg_rewards(csv_file=args.file,
n_unintentional=args.un,
block=False)
elif algo_name in ['HIUDDPG']:
plot_process_iu_policies(csv_file=args.file, n_unintentional=args.un,
block=False, plot_intentional=args.no_in,
deterministic=True)
plot_process_iu_returns(csv_file=args.file, n_unintentional=args.un,
block=False)
else:
plot_process_general_data(csv_file=args.file, block=False)
# plot_process_haarnoja(csv_file=args.file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str, default='./progress.csv',
help='path to the progress.csv file')
parser.add_argument('--un', type=int, default=-1,
help='Unintentional id')
parser.add_argument('--no_in', action='store_false')
args = parser.parse_args()
main(args)
input('Press a key to close script')
| [
"[email protected]"
] | |
9b5418cd23ca662fe1c45fcca5e76495bc07df0a | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-iotanalytics/huaweicloudsdkiotanalytics/v1/model/obs_content_req.py | 22822ea39ffd91a7bf569bf3a12ad4b92bc758b9 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,178 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ObsContentReq:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'bucket_name': 'str',
'ak': 'str',
'sk': 'str'
}
attribute_map = {
'bucket_name': 'bucket_name',
'ak': 'ak',
'sk': 'sk'
}
def __init__(self, bucket_name=None, ak=None, sk=None):
"""ObsContentReq
The model defined in huaweicloud sdk
:param bucket_name: 桶名称
:type bucket_name: str
:param ak: 租户的AK
:type ak: str
:param sk: 租户的SK
:type sk: str
"""
self._bucket_name = None
self._ak = None
self._sk = None
self.discriminator = None
self.bucket_name = bucket_name
self.ak = ak
self.sk = sk
@property
def bucket_name(self):
"""Gets the bucket_name of this ObsContentReq.
桶名称
:return: The bucket_name of this ObsContentReq.
:rtype: str
"""
return self._bucket_name
@bucket_name.setter
def bucket_name(self, bucket_name):
"""Sets the bucket_name of this ObsContentReq.
桶名称
:param bucket_name: The bucket_name of this ObsContentReq.
:type bucket_name: str
"""
self._bucket_name = bucket_name
@property
def ak(self):
"""Gets the ak of this ObsContentReq.
租户的AK
:return: The ak of this ObsContentReq.
:rtype: str
"""
return self._ak
@ak.setter
def ak(self, ak):
"""Sets the ak of this ObsContentReq.
租户的AK
:param ak: The ak of this ObsContentReq.
:type ak: str
"""
self._ak = ak
@property
def sk(self):
"""Gets the sk of this ObsContentReq.
租户的SK
:return: The sk of this ObsContentReq.
:rtype: str
"""
return self._sk
@sk.setter
def sk(self, sk):
"""Sets the sk of this ObsContentReq.
租户的SK
:param sk: The sk of this ObsContentReq.
:type sk: str
"""
self._sk = sk
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ObsContentReq):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
65ae635d43801f0ac9401fab6afbe228040b58f9 | 8f8ac99fd3ed9ceb36778b404f6fdd0b6899d3f4 | /pyobjc-framework-Cocoa/PyObjCTest/test_cfuuid.py | 12c65667350ddf8c8be73389156dd615e6a3126b | [
"MIT"
] | permissive | strogo/pyobjc | ac4201c7742eb75348328eeecb7eedf4e3458de3 | 2579c5eaf44b0c5af77ee195c417d2c65e72dfda | refs/heads/master | 2023-07-13T00:41:56.448005 | 2021-08-24T06:42:53 | 2021-08-24T06:42:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,982 | py | import re
import CoreFoundation
from PyObjCTools.TestSupport import TestCase
class TestCFUUIDAPI(TestCase):
def testTypes(self):
self.assertIsCFType(CoreFoundation.CFUUIDRef)
def testTypeID(self):
v = CoreFoundation.CFUUIDGetTypeID()
self.assertIsInstance(v, int)
def testCreate(self):
self.assertResultIsCFRetained(CoreFoundation.CFUUIDCreate)
uuid = CoreFoundation.CFUUIDCreate(None)
self.assertIsNot(uuid, None)
self.assertIsInstance(uuid, CoreFoundation.CFUUIDRef)
text = CoreFoundation.CFUUIDCreateString(None, uuid)
self.assertIsInstance(text, str)
m = re.match("^[0-9A-Z]{8}(-[0-9A-Z]{4}){3}-[0-9A-Z]{12}$", text)
self.assertIsNot(m, None)
def testCreateWithBytes(self):
self.assertResultIsCFRetained(CoreFoundation.CFUUIDCreateWithBytes)
uuid = CoreFoundation.CFUUIDCreateWithBytes(
None, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
)
self.assertIsNot(uuid, None)
self.assertIsInstance(uuid, CoreFoundation.CFUUIDRef)
self.assertResultIsCFRetained(CoreFoundation.CFUUIDCreateString)
text = CoreFoundation.CFUUIDCreateString(None, uuid)
self.assertEqual(text, "01020304-0506-0708-090A-0B0C0D0E0F10")
self.assertRaises(
ValueError,
CoreFoundation.CFUUIDCreateWithBytes,
None,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
300,
)
self.assertRaises(
ValueError,
CoreFoundation.CFUUIDCreateWithBytes,
None,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
300,
16,
)
def testCreateFromString(self):
self.assertResultIsCFRetained(CoreFoundation.CFUUIDCreateFromString)
uuid1 = CoreFoundation.CFUUIDCreateFromString(
None, "01020304-0506-0708-090A-0B0C0D0E0F10"
)
self.assertIsNot(uuid1, None)
self.assertIsInstance(uuid1, CoreFoundation.CFUUIDRef)
text = CoreFoundation.CFUUIDCreateString(None, uuid1)
self.assertEqual(text, "01020304-0506-0708-090A-0B0C0D0E0F10")
uuid2 = CoreFoundation.CFUUIDCreateFromString(
None, "01020304-0506-0708-090A-0B0C0D0E0F10"
)
text = CoreFoundation.CFUUIDCreateString(None, uuid2)
self.assertEqual(text, "01020304-0506-0708-090A-0B0C0D0E0F10")
# CoreFoundation.CFUUID interns values
self.assertIs(uuid1, uuid2)
def testGetBytes(self):
uuid = CoreFoundation.CFUUIDCreateWithBytes(
None, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
)
self.assertIsNot(uuid, None)
self.assertIsInstance(uuid, CoreFoundation.CFUUIDRef)
bytes_value = CoreFoundation.CFUUIDGetUUIDBytes(uuid)
self.assertIsInstance(bytes_value, CoreFoundation.CFUUIDBytes)
self.assertEqual(bytes_value.byte0, 1)
self.assertEqual(bytes_value.byte1, 2)
self.assertEqual(bytes_value.byte2, 3)
self.assertEqual(bytes_value.byte3, 4)
self.assertEqual(bytes_value.byte4, 5)
self.assertEqual(bytes_value.byte5, 6)
self.assertEqual(bytes_value.byte6, 7)
self.assertEqual(bytes_value.byte7, 8)
self.assertEqual(bytes_value.byte8, 9)
self.assertEqual(bytes_value.byte9, 10)
self.assertEqual(bytes_value.byte10, 11)
self.assertEqual(bytes_value.byte11, 12)
self.assertEqual(bytes_value.byte12, 13)
self.assertEqual(bytes_value.byte13, 14)
self.assertEqual(bytes_value.byte14, 15)
self.assertEqual(bytes_value.byte15, 16)
def testConstant(self):
# This is an interesting one, the result of
# CoreFoundation.CFUUIDGetConstantUUIDWithBytes should not be released.
uuid = CoreFoundation.CFUUIDGetConstantUUIDWithBytes(None, *range(16))
CoreFoundation.CFRetain(
CoreFoundation.CFUUIDGetConstantUUIDWithBytes
) # Ensure the value won't be released.
self.assertIsNot(uuid, None)
self.assertIsInstance(uuid, CoreFoundation.CFUUIDRef)
s = CoreFoundation.CFUUIDCreateString(None, uuid)
uuid = None
del uuid
uuid = CoreFoundation.CFUUIDGetConstantUUIDWithBytes(None, *range(16))
self.assertIsNot(uuid, None)
self.assertIsInstance(uuid, CoreFoundation.CFUUIDRef)
t = CoreFoundation.CFUUIDCreateString(None, uuid)
self.assertEqual(s, t)
def testCreateFromUUIDBytes(self):
bytes_value = CoreFoundation.CFUUIDBytes(*range(16, 32))
uuid = CoreFoundation.CFUUIDCreateFromUUIDBytes(None, bytes_value)
self.assertIsNot(uuid, None)
self.assertIsInstance(uuid, CoreFoundation.CFUUIDRef)
text = CoreFoundation.CFUUIDCreateString(None, uuid)
self.assertEqual(text, "10111213-1415-1617-1819-1A1B1C1D1E1F")
def testStructs(self):
o = CoreFoundation.CFUUIDBytes()
self.assertHasAttr(o, "byte0")
self.assertHasAttr(o, "byte1")
self.assertHasAttr(o, "byte2")
self.assertHasAttr(o, "byte3")
self.assertHasAttr(o, "byte4")
self.assertHasAttr(o, "byte5")
self.assertHasAttr(o, "byte6")
self.assertHasAttr(o, "byte7")
self.assertHasAttr(o, "byte8")
self.assertHasAttr(o, "byte9")
self.assertHasAttr(o, "byte10")
self.assertHasAttr(o, "byte11")
self.assertHasAttr(o, "byte12")
self.assertHasAttr(o, "byte13")
self.assertHasAttr(o, "byte14")
self.assertHasAttr(o, "byte15")
| [
"[email protected]"
] | |
d3778584e95ef333ce94c9c0141d55f17ae297a7 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2214/60586/309439.py | 43887ed3a7a647d20cd7bf664444a594497e9b76 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | a=input()
b=input()
if a=='1+1i' and 'b==1+1i':
print("0+2i")
elif a=='0+1i' and 'b==0+1i':
print("-1+0i")
else:
print("0+-2i")
| [
"[email protected]"
] | |
9f35131f1f28ffc0f2fd5ab8325e04833ad5fd83 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/semantic_segmentation/BiseNetV1_for_PyTorch/configs/pspnet/pspnet_r18-d8_4x4_512x512_80k_vaihingen.py | a53fcfa8fe36083934a423e9b34464549cde83be | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 976 | py | # Copyright (c) Facebook, Inc. and its affiliates.
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------
_base_ = './pspnet_r50-d8_4x4_512x512_80k_vaihingen.py'
model = dict(
pretrained='open-mmlab://resnet18_v1c',
backbone=dict(depth=18),
decode_head=dict(
in_channels=512,
channels=128,
),
auxiliary_head=dict(in_channels=256, channels=64))
| [
"[email protected]"
] | |
30bc551d847eeb3753771764c28f595558bbc9a0 | e3b5e20bcb560a3c37c09f728b9340b1715c1818 | /venv/lib/python3.7/site-packages/plotly/validators/scattercarpet/_hovertextsrc.py | 748253ef5dd9d965185ead830412b2629267562e | [
"MIT"
] | permissive | 180Studios/LoginApp | 63bc50b1f91e7221c7581627ab166eeb01758f5c | 66ff684a81b23d8f45eef2c56be19a2afd95ab29 | refs/heads/master | 2022-12-24T00:33:08.481826 | 2020-02-03T05:14:41 | 2020-02-03T05:14:41 | 144,414,562 | 0 | 1 | MIT | 2022-12-08T01:38:26 | 2018-08-11T19:57:44 | Python | UTF-8 | Python | false | false | 498 | py | import _plotly_utils.basevalidators
class HovertextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='hovertextsrc',
parent_name='scattercarpet',
**kwargs
):
super(HovertextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| [
"[email protected]"
] | |
35d3332b9b2acae00406b27ed618a1477d42c45d | 3474b315da3cc5cb3f7823f19a18b63a8da6a526 | /scratch/KRAMS/src/apps/scratch/faezeh/working_area/inputexport.py | 1edffac01025a9a4ef04fe864a2eaeb85fcefe02 | [] | no_license | h4ck3rm1k3/scratch | 8df97462f696bc2be00f1e58232e1cd915f0fafd | 0a114a41b0d1e9b2d68dbe7af7cf34db11512539 | refs/heads/master | 2021-01-21T15:31:38.718039 | 2013-09-19T10:48:24 | 2013-09-19T10:48:24 | 29,173,525 | 0 | 0 | null | 2015-01-13T04:58:57 | 2015-01-13T04:58:56 | null | UTF-8 | Python | false | false | 4,034 | py | #!/usr/bin/env python
"""
This demonstrates how to create a plot offscreen and save it to an image
file on disk.
"""
# Standard library imports
import os, sys
# Major library imports
from numpy import fabs, linspace, pi, sin
from scipy.special import jn
# Enthought library imports
from enthought.traits.api import false
# Chaco imports
from enthought.chaco.api import ArrayPlotData, Plot, PlotGraphicsContext
from enthought.chaco.example_support import COLOR_PALETTE
from enthought.traits.api import HasTraits
from enthought.traits.api import Float, HasTraits, Button
from enthought.traits.ui.menu import OKButton, CancelButton
from enthought.traits.ui.api import View, Item, InstanceEditor
import os
DPI = 72.0
# This is a bit of a hack, to work around the fact that line widths don't scale
# with the GraphicsContext's CTM.
dpi_scale = DPI / 72.0
def create_plot():
numpoints = 100
low = -5
high = 15.0
x = linspace(low, high, numpoints)
pd = ArrayPlotData(index=x)
p = Plot(pd, bgcolor="lightgray", padding=50, border_visible=True)
for i in range(10):
pd.set_data("y" + str(i), jn(i,x))
p.plot(("index", "y" + str(i)), color=tuple(COLOR_PALETTE[i]),
width = 2.0 * dpi_scale)
p.x_grid.visible = True
# p.x_grid.line_width *= dpi_scale
p.x_grid.line_width = InputParameter().width
p.y_grid.visible = True
# p.y_grid.line_width *= dpi_scale
p.y_grid.line_width = InputParameter().height
p.legend.visible = True
return p
def draw_plot(filename, size=(800,600)):
container = create_plot()
container.outer_bounds = list(size)
container.do_layout(force=True)
gc = PlotGraphicsContext(size, dpi=DPI)
gc.render_component(container)
gc.save(filename)
return
def draw_pdf(filename, size=(800,600), width=0.0, height=0.0):
from enthought.chaco.pdf_graphics_context import PdfPlotGraphicsContext
container = create_plot()
container.bounds = list(size)
container.do_layout(force=True)
width = InputParameter().width
height = InputParameter().height
gc = PdfPlotGraphicsContext(filename=filename, dest_box = (0.5, 0.5, width, height))
gc.render_component(container)
gc.save()
def get_directory(filename):
print 'Please enter a path in which to place generated plots.'
print 'Press <ENTER> to generate in the current directory.'
path = raw_input('Path: ').strip()
# /home/faeze/ as path
if len(path) > 0 and not os.path.exists(path):
print 'The given path does not exist.'
sys.exit()
if not os.path.isabs(path):
print 'Creating image: ' + os.path.join(os.getcwd(), path, filename)
else:
print 'Creating image: ' + os.path.join(path, filename)
return os.path.join(path, filename)
class InputParameter(HasTraits):
width = 3.0
height = 4.0
class InputParam(HasTraits):
height = Float()
width = Float()
export_param = Button()
def _export_param_fired(self):
test_file = os.path.join('', 'Export_file')
output_file = open(test_file + '.rtf','w')
output_file.write(self.height.__str__() + '\n'+ self.width.__str__())
# print 'width', self.width
# print 'height', self.height
view_traits = View( Item("height"),
Item("width"),
Item('export_param', label = 'Export to file', style='simple', show_label = False),
resizable = True,
buttons = [ OKButton, CancelButton ],
height = 0.2,
width = 0.2 )
if __name__ == "__main__":
draw_plot(get_directory('noninteractive.png'), size=(800, 600))
# If you have ReportLab installed, you can uncomment the following:
draw_pdf(get_directory('noninteractive.pdf'), size=(400,300))
ip = InputParam()
ip.configure_traits()
# EOF
| [
"Axel@Axel-Pc"
] | Axel@Axel-Pc |
10fbec86fd6c3e609e74bfe53d632b78788da28c | 08c8e80dc009166a8d678fd36b34dc6ddbbeecc7 | /TTRPlayer.py | 05d919340b3b6ecf7d02b12c5726ee121dbdafb9 | [] | no_license | wqa/TicketToRide | 12f6361f2b1a0461c645817c505d0ebf7a3b9ea8 | dbf9ea161c5bbc456b3980a019b93dc1499ba83d | refs/heads/master | 2020-03-07T21:06:42.089183 | 2018-04-02T07:52:57 | 2018-04-02T07:52:57 | 127,717,750 | 0 | 0 | null | 2018-04-02T07:00:15 | 2018-04-02T07:00:15 | null | UTF-8 | Python | false | false | 2,626 | py | import collections
class Player(object):
def __init__(self,
startingHand,
startingTickets,
playerBoard,
playerPosition,
numTrains
):
"""orderNumber: int
startingHand: list
startingTickets: list
playerBoard: PlayerBoard object from the TTRBoard module
playerPosition: int
"""
self.name = '' #ask for them to enter it on first turn
#implimented as a collection to avoid O(n) hand.remove(x)
self.hand = collections.Counter(startingHand)
self.tickets = {x:False for x in startingTickets}
self.numTrains = numTrains
self.points = 0
self.playerPosition = playerPosition
#custom board to represent
self.playerBoard = playerBoard
def removeCardsFromHand(self, color, numColor):
"""removes one ore more cards from hand
assumes all cards are in hand, error if not
cards: list
"""
assert self.hand[color] >= numColor
self.hand[color] -= numColor
#add card to hand
def addCardToHand(self, card):
"""adds a single card to hand
assumes card is a valid choice
card: String
"""
if card != None:
self.hand[card] += 1
#add ticket to hand
def addTicket(self, ticket):
"""adds a single ticket to tickets
ticket: tuple(city1, city2, value)
"""
self.tickets[ticket] = False
def completeTicket(self, ticket):
"""updates the value in the tickets dict to True for key: ticket
ticket: tuple(city1, city2, value)
"""
assert ticket in self.tickets
self.tickets = True
def getHand(self):
return self.hand
def addPoints(self, numPoints):
self.points += numPoints
def subtractPoints(self, numPoints):
self.points -= numPoints
def getPoints(self):
return self.points
def getTickets(self):
return self.tickets
def getNumTrains(self):
return self.numTrains
def playNumTrains(self, numTrains):
assert numTrains <= self.numTrains
self.numTrains -= numTrains
def setPlayerName(self, name):
"""sets playerName to name
name: string
"""
self.name = name
def getName(self):
return self.name
| [
"[email protected]"
] | |
4c5d1f202e7cb831b781f2fdf9e12481448d3c4d | 7482a2601861b61f61ad082dbca521a94c36dc92 | /image_captioning/config/defaults.py | 3c6c9351b41efc3e54c2292e17e424a4652735c1 | [] | no_license | congve1/ImageCaptioning | 0da1945ac27e406c388824f47e7e4545691ef0a1 | ae2d4ec2dc45bc00ff12cde4f55197654100c309 | refs/heads/master | 2020-05-04T17:17:42.993462 | 2019-07-07T08:58:28 | 2019-07-07T08:58:28 | 179,305,660 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,738 | py | import os
from yacs.config import CfgNode as CN
# ------------------------------------------------------------------------------
# Convention about Tranining / Test specific parameters
# ------------------------------------------------------------------------------
# Whenever an argument can be either used for training or for testing, the
# corresponding name will be post-fixed by a _TRAIN for a training parameter,
# or _TEST for a test-specific parameter.
# For example, the number of images during training will be
# IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be
# IMAGES_PER_BATCH_TEST
# ------------------------------------------------------------------------------
# Config definition
# ------------------------------------------------------------------------------
_C = CN()
_C.MODEL = CN()
_C.MODEL.DEVICE = "cuda"
# if the WEIGHT starts with a catalog://, like :R-50, the code will look for
# the path in paths_catalog. Else, it will use it as the specified absolute
# path
_C.MODEL.WEIGHT = ""
# ------------------------------------------------------------------------------
# INPUT
# ------------------------------------------------------------------------------
_C.INPUT = CN()
_C.INPUT.SIZE = 256
# ------------------------------------------------------------------------------
# VOCAB
# ------------------------------------------------------------------------------
_C.VOCAB = CN()
_C.VOCAB.WORD_COUNT_THRESHOLD = 5
# ------------------------------------------------------------------------------
# Dataset
# ------------------------------------------------------------------------------
_C.DATASET = CN()
_C.DATASET.SEQ_MAX_LEN = 20 # 50 in all coco captions
_C.DATASET.SEQ_PER_IMG = 5
_C.DATASET.TRAIN = ''
_C.DATASET.VAL = ''
_C.DATASET.TEST = ''
_C.DATASET.VOCAB_PATH = ''
# ------------------------------------------------------------------------------
# DataLoader
# ------------------------------------------------------------------------------
_C.DATALOADER = CN()
_C.DATALOADER.NUM_WORKERS = 0
# ------------------------------------------------------------------------------
# Encoder options
# ------------------------------------------------------------------------------
_C.MODEL.ENCODER = CN()
# The encoder conv body to use
# The string must match a function that is imported in modeling.model_builder
_C.MODEL.ENCODER.CONV_BODY = "R-101-C5"
# Add StopGrad at a specified stage so the bottom layers are frozen
_C.MODEL.ENCODER.FREEZE_CONV_BODY_AT = 0
_C.MODEL.ENCODER.ATT_SIZE = 14
# 2048 for C5; 1024 for C4 ### must be consistent with CONV_BODY
_C.MODEL.ENCODER.FEATURE_SIZE = 2048
# ------------------------------------------------------------------------------
# ResNe[X]t options (ResNets = {ResNet, ResNeXt})
# ------------------------------------------------------------------------------
_C.MODEL.RESNETS = CN()
# Number of groups to use: 1 ==> ResNet; > 1 ==> ResNeXt
_C.MODEL.RESNETS.NUM_GROUPS = 1
# Baseline width of each group
_C.MODEL.RESNETS.WIDTH_PER_GROUP = 64
# Place the stride 2 conv on the 1x1 filter
# Use True only for the original MSRA ResNet; Use False for C2 and Torch models
_C.MODEL.RESNETS.STRIDE_IN_1X1 = False
# Residual Transformation function
_C.MODEL.RESNETS.TRANS_FUNC = "BottleneckWithBatchNorm"
# ResNet's stem function (conv1 and pool1)
_C.MODEL.RESNETS.STEM_FUNC = "StemWithBatchNorm"
# Apply dilation in stage "res5"
_C.MODEL.RESNETS.RES5_DILATION = 1
_C.MODEL.RESNETS.BACKBONE_OUT_CHANNELS = 256
_C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256
_C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64
# ---------------------------------------------------------------------------- #
# FPN options
# ---------------------------------------------------------------------------- #
_C.MODEL.FPN = CN()
_C.MODEL.FPN.USE_GN = False
_C.MODEL.FPN.USE_RELU = False
# -------------------------------------------------------------------------------
# Group Norm
# -------------------------------------------------------------------------------
_C.MODEL.GROUP_NORM = CN()
# Number of dimensions per group in GroupNorm (-1 if using NUM_GROUPS)
_C.MODEL.GROUP_NORM.DIM_PER_GP = -1
# Number of groups in GroupNorm(-1 if using DIM_PER_GP)
_C.MODEL.GROUP_NORM.NUM_GROUPS = 32
# GroupNorm's small constant in the denominator
_C.MODEL.GROUP_NORM.EPS = 1e-5
# ------------------------------------------------------------------------------
# Decoder options
# ------------------------------------------------------------------------------
_C.MODEL.DECODER = CN()
_C.MODEL.DECODER.ARCH = "TopDown"
# word embedding size
_C.MODEL.DECODER.EMBEDDING_SIZE = 512
# num of hidden units of the rnn
_C.MODEL.DECODER.HIDDEN_SIZE = 512
# strength of dropout in the language model rnn.
_C.MODEL.DECODER.DROPOUT_PROB = 0.5
# the hidden size of the attention in MLP, only useful in show_attend_tell;
# 0 if not using hidden layer
_C.MODEL.DECODER.ATT_HIDDEN_SIZE = 512
_C.MODEL.DECODER.BEAM_SIZE = 3
# ------------------------------------------------------------------------------
# Solver
# ------------------------------------------------------------------------------
_C.SOLVER = CN()
_C.SOLVER.MAX_ITER = 40000
_C.SOLVER.OPTIMIZER = "SGD"
_C.SOLVER.SCHEDULER = "WarmupMultiStepLR"
_C.SOLVER.BASE_LR = 0.1
_C.SOLVER.BIAS_LR_FACTOR = 2
# after how many iterations to start self-critical training
# -1 for disable, 0 from the beginning
_C.SOLVER.SCST_AFTER = -1
# clip gradients at this norm
_C.SOLVER.GRAD_CLIP = 10.0
_C.SOLVER.MOMENTUM = 0.9
_C.SOLVER.WEIGHT_DECAY = 0.0005
_C.SOLVER.WEIGHT_DECAY_BIAS = 0
# Adam beta
_C.SOLVER.BETAS = (0.9, 0.999)
# SGDR settings
_C.SOLVER.T_MAX = 5000
_C.SOLVER.T_MULTI = 2
_C.SOLVER.ETA_MIN = 0.00001
# WarmupMultiStep Scheduler settings
_C.SOLVER.GAMMA = 0.1
_C.SOLVER.STEPS = (30000, )
_C.SOLVER.WARMUP_FACTOR = 1.0 / 3
_C.SOLVER.WARMUP_ITERS = 500
_C.SOLVER.WARMUP_METHOD = "linear"
# Step Scheduler settings
_C.SOLVER.STEP_SIZE = 1200
_C.SOLVER.CHECKPOINT_PERIOD = 2500
_C.SOLVER.LOG_PERIOD = 100
_C.SOLVER.VAL_PERIOD = 1000
# Number of images per batch
# This is global
_C.SOLVER.IMS_PER_BATCH = 16
_C.SOLVER.METRIC_LOGGER_NAME = 'model'
# ------------------------------------------------------------------------------
# Specific test options
# ------------------------------------------------------------------------------
_C.TEST = CN()
# Number of images per batch
# This is global
_C.TEST.IMS_PER_BATCH = 8
_C.TEST.BEAM_SIZE = 3
# ------------------------------------------------------------------------------
# Misc options
# ------------------------------------------------------------------------------
_C.OUTPUT_DIR = "save"
_C.PATHS_CATALOG = os.path.join(os.path.dirname(__file__), "paths_catalog_lmdb.py")
| [
"[email protected]"
] | |
8c636ef816876d5a9d8bccdf63b5af6f4356b911 | 252b3451ad9683166937152444fedec8b5da6647 | /obsolete/py_deprecated/LookupTables.py | cd7936866ad410f7fce43018c4363adbe2b33474 | [
"MIT"
] | permissive | faithcomesbyhearing/dbp-etl | 993c4c329d8f1950234f02f7fb048ec29b1883da | eb56863415d0d83f7f7928d0fcf927425c039f95 | refs/heads/master | 2023-08-08T15:11:37.815057 | 2023-07-10T16:10:21 | 2023-07-10T16:10:21 | 204,502,403 | 1 | 1 | MIT | 2023-07-24T23:39:37 | 2019-08-26T15:12:47 | Python | UTF-8 | Python | false | false | 5,498 | py | # LookupTables
class LookupTables:
def __init__(self):
# these are not being used yet?
self.otBooks=[ "GEN", "EXO", "LEV", "NUM", "DEU", "JOS", "JDG", "RUT",
"1SA", "2SA", "1KI", "2KI", "1CH", "2CH", "EZR", "NEH",
"EST", "JOB", "PSA", "PRO", "ECC", "SNG", "ISA", "JER",
"LAM", "EZK", "DAN", "HOS", "JOL", "AMO", "OBA", "JON",
"MIC", "NAM", "HAB", "ZEP", "HAG", "ZEC", "MAL"]
self.ntBooks=[ "MAT", "MRK", "LUK", "JHN", "ACT", "ROM", "1CO", "2CO",
"GAL", "EPH", "PHP", "COL", "1TH", "2TH", "1TI", "2TI",
"TIT", "PHM", "HEB", "JAS", "1PE", "2PE", "1JN", "2JN",
"3JN", "JUD", "REV"]
self.apBooks=[ "1ES", "1MA", "1MQ", "2BA", "2ES", "2MA", "2MQ", "3MA",
"3MQ", "4BA", "4MA", "5EZ", "6EZ", "BAR", "BEL", "DAG",
"ENO", "ESG", "EZA", "JDT", "JUB", "LAO", "LBA", "LJE",
"MAN", "ODA", "PS2", "PS3", "PSS", "REP", "S3Y", "SIR",
"SUS", "TOB", "WIS"]
def bookIdBySequence(self, seqCode):
seqDict = {
"B01": "MAT",
"B02": "MRK",
"B03": "LUK",
"B04": "JHN",
"B05": "ACT",
"B06": "ROM",
"B07": "1CO",
"B08": "2CO",
"B09": "GAL",
"B10": "EPH",
"B11": "PHP",
"B12": "COL",
"B13": "1TH",
"B14": "2TH",
"B15": "1TI",
"B16": "2TI",
"B17": "TIT",
"B18": "PHM",
"B19": "HEB",
"B20": "JAS",
"B21": "1PE",
"B22": "2PE",
"B23": "1JN",
"B24": "2JN",
"B25": "3JN",
"B26": "JUD",
"B27": "REV",
"A01": "GEN",
"A02": "EXO",
"A03": "LEV",
"A04": "NUM",
"A05": "DEU",
"A06": "JOS",
"A07": "JDG",
"A08": "RUT",
"A09": "1SA",
"A10": "2SA",
"A11": "1KI",
"A12": "2KI",
"A13": "1CH",
"A14": "2CH",
"A15": "EZR",
"A16": "NEH",
"A17": "EST",
"A18": "JOB",
"A19": "PSA",
"A20": "PRO",
"A21": "ECC",
"A22": "SNG",
"A23": "ISA",
"A24": "JER",
"A25": "LAM",
"A26": "EZK",
"A27": "DAN",
"A28": "HOS",
"A29": "JOL",
"A30": "AMO",
"A31": "OBA",
"A32": "JON",
"A33": "MIC",
"A34": "NAM",
"A35": "HAB",
"A36": "ZEP",
"A37": "HAG",
"A38": "ZEC",
"A39": "MAL"}
return seqDict.get(seqCode)
# This should replaced with a query to table books after more is added
def bookIdBy2Char(self, twoCharCode):
twoCharDict = {
# New Testament
"MT": "MAT",
"MK": "MRK",
"LK": "LUK",
"JN": "JHN",
"AC": "ACT",
"RM": "ROM",
"C1": "1CO",
"C2": "2CO",
"GL": "GAL",
"EP": "EPH",
"PP": "PHP",
"CL": "COL",
"H1": "1TH",
"H2": "2TH",
"T1": "1TI",
"T2": "2TI",
"TT": "TIT",
"PM": "PHM",
"HB": "HEB",
"JM": "JAS",
"P1": "1PE",
"P2": "2PE",
"J1": "1JN",
"J2": "2JN",
"J3": "3JN",
"JD": "JUD",
"RV": "REV",
# Old Testament
"GN": "GEN",
"EX": "EXO",
"LV": "LEV",
"NU": "NUM",
"DT": "DEU",
"JS": "JOS",
"JG": "JDG",
"RT": "RUT",
"S1": "1SA",
"S2": "2SA",
"K1": "1KI",
"K2": "2KI",
"R1": "1CH",
"R2": "2CH",
"ER": "EZR",
"NH": "NEH",
"ET": "EST",
"JB": "JOB",
"PS": "PSA",
"PR": "PRO",
"EC": "ECC",
"SS": "SNG",
"IS": "ISA",
"JR": "JER",
"LM": "LAM",
"EK": "EZK",
"DN": "DAN",
"HS": "HOS",
"JL": "JOL",
"AM": "AMO",
"OB": "OBA",
"JH": "JON",
"MC": "MIC",
"NM": "NAM",
"HK": "HAB",
"ZP": "ZEP",
"HG": "HAG",
"ZC": "ZEC",
"ML": "MAL",
# Apocrypha
"E1": "1ES", # 3 Esdras
"E2": "2ES", # 4 Esdras
"M1": "1MA", # 1 Maccabees
"M2": "2MA", # 2 Maccabees
"M3": "3MA", # 3 Maccabees
"M4": "4MA", # 4 Maccabees
"BR": "BAR", # First book of Baruch
"BL": "BEL", # Rest of Daniel
"DG": "DAG", # Daniel 14
"EG": "ESG", # Greek Esther
"JT": "JDT", # Judith
#"LJ": None, # Apocryphal something
#"PA": None, # Apocryphal something
#"PN": None, # Apocryphal something
"PX": "PS2", # Psalms 152
"SR": "SIR", # Sirach
"SN": "SUS", # Greek Daniel
"TB": "TOB", # Tobit
"WS": "WIS", # Wisdom of Solomon
# USFM Peripheral Book Codes
"FR": "FRT", # Front Matter
"IN": "INT", # Introduction
"BK": "BAK", # Back Matter
"CN": "CNC", # Concordance
"GS": "GLO", # Glossary
"TX": "TDX", # Topical Index
"OH": "OTH", # Other
"XA": "XXA", #
"XB": "XXB", #
"XC": "XXC", #
"XD": "XXD", #
"XE": "XXE", #
"XF": "XXF", #
"XG": "XXG" #
}
return twoCharDict.get(twoCharCode)
def scriptCode(self, script):
scriptDic = {
"Amharic":"Ethi",
"Arabic":"Arab",
"Armenian":"Armn",
"Bengali":"Beng",
"Bengali Script":"Beng",
"Berber":"Tfng",
"Burmese":"Mymr",
"Canadian Aboriginal Syllabic":"Cans",
"Canadian Aboriginal Syllabics":"Cans",
"Cherokee Sylabary":"Cher",
"Cyrillic":"Cyrl",
"Devanagari":"Deva",
"Devangari":"Deva",
"Ethiopic":"Ethi",
"Ethoiopic":"Ethi",
"Ethopic":"Ethi",
"Ge'ez":"Ethi",
"Greek":"Grek",
"Gujarati":"Gujr",
"Gurmukhi":"Guru",
"Han":"Hani",
"Hangul (Korean)":"Kore",
"Hebrew":"Hebr",
"Japanese":"Jpan",
"Kannada":"Knda",
"Khmer":"Khmr",
"Khmer Script":"Khmr",
"Lao":"Laoo",
"Latin":"Latn",
"Latin (Africa)":"Latn",
"Latin (African)":"Latn",
"Latin (Latin America)":"Latn",
"Latin (Latin American)":"Latn",
"Latin (PNG)":"Latn",
"Latin (SE Asia)":"Latn",
"Malayalam":"Mlym",
"NA":"Zyyy",
"Oriya":"Orya",
"Tamil":"Taml",
"Telugu":"Telu",
"Thai":"Thai",
"Tibetan":"Tibt"}
return scriptDic[script] # not found should be a fatal error. That is intentional
| [
"[email protected]"
] | |
27b669ce53b5e339d24c950c5bbf673fec5fd017 | 1ca315ac6f10d91784c9d2e9e0c993d1a528c644 | /locators/class_parse_locators.py | 1168ff983b02f53c51788fee41cf24dd739271cf | [] | no_license | osakoh/Scrapping-example | 711df9e735f62dfdf5aad5891f8cda4cda678f44 | 6964f86f56a5797c8152f2e50048190d331ec6d4 | refs/heads/main | 2022-12-27T20:08:17.684832 | 2020-10-09T17:18:11 | 2020-10-09T17:18:11 | 302,692,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | class ParsedItemLocators:
"""
Locates an item in the HTML page.
Allows us to see what our code will be looking at as well as change it quickly if the locator changes.
"""
NAME_LOCATOR = 'article.product_pod h3 a'
LINK_LOCATOR = 'article.product_pod h3 a'
PRICE_LOCATOR = 'article.product_pod div.product_price p.price_color'
RATING_LOCATOR = 'article.product_pod p.star-rating' | [
"[email protected]"
] | |
718ce3d58ea9a8e84fe1c8a98f3bed47d617e276 | 743fe3fd926c4f23353e4d2801b007721f3dd1a1 | /docstrings/bitmap.py | f9d253fbdea77806791284b8dfc21e3431bef2d8 | [
"BSD-2-Clause-Views"
] | permissive | mdboom/freetypy | 357cc7570987bf07daa3abd348ed616ad085186a | 01b72ad35a613f1366accf16318d078b1e0dfc83 | refs/heads/master | 2021-01-18T07:03:53.232617 | 2015-11-04T01:20:01 | 2015-11-04T01:20:01 | 45,196,480 | 1 | 1 | null | 2015-10-29T16:38:09 | 2015-10-29T16:38:09 | null | UTF-8 | Python | false | false | 5,308 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Michael Droettboom All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be interpreted
# as representing official policies, either expressed or implied, of
# the FreeBSD Project.
from __future__ import print_function, unicode_literals, absolute_import
Bitmap__init__ = """
A structure used to describe a bitmap or pixmap to the raster.
`Bitmap` supports the Python buffer interface, so it is easy to
convert it to a Numpy array. For example::
>>> import numpy as np
>>> a = np.asarray(bitmap)
"""
Bitmap_buffer = """
Get the bitmap's contents as a buffer.
In most cases, the preferred method to get the data is to cast the
`Bitmap` object to a memoryview, since that will also have size and
type information.
"""
Bitmap_convert = """
Convert a `Bitmap` to 8 bits per pixel. Given a `Bitmap` with depth
1bpp, 2bpp, 4bpp, or 8bpp converts it to one with depth 8bpp, making
the number of used bytes per line (a.k.a. the ‘pitch’) a multiple of
`alignment`.
Parameters
----------
alignment : int, optional
The pitch of the bitmap is a multiple of this parameter. Common
values are 1, 2, or 4.
Returns
-------
target : Bitmap
The bitmap, converted to 8bpp.
"""
Bitmap_num_grays = """
The number of gray levels used in the bitmap. This field is only used
with `PIXEL_MODE.GRAY`.
"""
Bitmap_pitch = """
The number of bytes taken by one bitmap row.
Includes padding.
The pitch is positive when the bitmap has a ‘down’ flow, and negative
when it has an ‘up’ flow. In all cases, the pitch is an offset to add
to a bitmap pointer in order to go down one row.
Note that ‘padding’ means the alignment of a bitmap to a byte border,
and FreeType functions normally align to the smallest possible integer
value.
For the B/W rasterizer, `pitch` is always an even number.
To change the pitch of a bitmap (say, to make it a multiple of 4), use
`Bitmap.convert`. Alternatively, you might use callback functions to
directly render to the application's surface.
"""
Bitmap_pixel_mode = """
The `PIXEL_MODE`, i.e., how pixel bits are stored.
"""
Bitmap_rows = """
The number of bitmap rows.
"""
Bitmap_to_list = """
|freetypy| Convert the bitmap to a nested list.
"""
Bitmap_width = """
The number of pixels in bitmap row.
"""
PIXEL_MODE = """
Constants related to the pixel mode of bitmaps.
- `MONO`: A monochrome bitmap, using 1 bit per pixel. Note that pixels
are stored in most-significant order (MSB), which means that the
left-most pixel in a byte has value 128.
- `GRAY`: An 8-bit bitmap, generally used to represent anti-aliased
glyph images. Each pixel is stored in one byte. Note that the number
of ‘gray’ levels is stored in the ‘num_grays’ field of the Bitmap
structure (it generally is 256).
- `GRAY2`: A 2-bit per pixel bitmap, used to represent embedded
anti-aliased bitmaps in font files according to the OpenType
specification. We haven't found a single font using this format,
however.
- `GRAY4`: A 4-bit per pixel bitmap, representing embedded
anti-aliased bitmaps in font files according to the OpenType
specification. We haven't found a single font using this format,
however.
- `LCD`: An 8-bit bitmap, representing RGB or BGR decimated glyph
images used for display on LCD displays; the bitmap is three times
wider than the original glyph image. See also `RENDER_MODE.LCD`. On
many freetype builds, this functionality will be disabled due to
patent restrictions, in which case the resulting bitmap will be
grayscale.
- `LCD_V`: An 8-bit bitmap, representing RGB or BGR decimated glyph
images used for display on rotated LCD displays; the bitmap is three
times taller than the original glyph image. See also
`RENDER_MODE.LCD_V`. On many freetype builds, this functionality
will be disabled due to patent restrictions, in which case the
resulting bitmap will be grayscale.
"""
| [
"[email protected]"
] | |
bd0261b67d0371e006d269d817132bcd59e26983 | 12101ae802a7da37e84d1c5ed0e48670c7d7e203 | /tools/trains/t010_xgb.py | c74a8dcf19be8dbdc28bfddc2cced35d43a256fd | [] | no_license | guchio3/kaggle-santander-2019 | 4590026e32453c257dd90dd8e47bd6812c5ee6c5 | f2a636bcb60d52ad8a08a472957b846f2b684005 | refs/heads/master | 2020-04-29T18:09:08.640374 | 2019-04-13T13:07:17 | 2019-04-13T13:07:17 | 176,315,965 | 8 | 4 | null | null | null | null | UTF-8 | Python | false | false | 8,815 | py | import datetime
import os
import pickle
import warnings
from itertools import tee
import lightgbm
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
from tqdm import tqdm
from tools.utils.features import (get_all_features, load_features,
select_features)
from tools.utils.logs import dec_timer, log_evaluation, sel_log
from tools.utils.samplings import value_resampling
from tools.utils.visualizations import save_importance
warnings.simplefilter(action='ignore', category=FutureWarning)
NES_DIR = './mnt/inputs/nes_info/'
FEATURE_DIR = './mnt/inputs/features/'
@dec_timer
def t010_xgb_train(args, script_name, configs, logger):
'''
policy
------------
* use original functions only if there's no pre-coded functions
in useful libraries such as sklearn.
todos
------------
* load features
* train the model
* save the followings
* logs
* oofs
* importances
* trained models
* submissions (if test mode)
'''
# -- Prepare for training
exp_time = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
# -- Load train data
sel_log('loading training data ...', None)
trn_ids = pd.read_pickle(
NES_DIR + 'trn_ID_code.pkl.gz', compression='gzip')
tst_ids = pd.read_pickle(
NES_DIR + 'tst_ID_code.pkl.gz', compression='gzip')
target = pd.read_pickle(
NES_DIR + 'target.pkl.gz', compression='gzip')
if args.debug:
sample_idxes = trn_ids.reset_index(
drop=True).sample(
random_state=71,
frac=0.05).index
target = target.iloc[sample_idxes].reset_index(drop=True)
trn_ids = trn_ids.iloc[sample_idxes].reset_index(drop=True)
# load features
if configs['train']['all_features']:
_features = get_all_features(FEATURE_DIR)
else:
_features = configs['features']
trn_tst_df = load_features(_features, FEATURE_DIR, logger=logger)\
.set_index('ID_code')
# feature selection if needed
if configs['train']['feature_selection']:
trn_tst_df = select_features(trn_tst_df,
configs['train']['feature_select_path'],
configs['train']['metric'],
configs['train']['feature_topk'])
features = trn_tst_df.columns
# split train and test
sel_log(f'now splitting the df to train and test ones ...', None)
features_df = trn_tst_df.loc[trn_ids].reset_index(drop=True)
test_features_df = trn_tst_df.loc[tst_ids].reset_index(drop=True)
# -- Split using group k-fold w/ shuffling
# NOTE: this is not stratified, I wanna implement it in the future
if configs['train']['fold_type'] == 'skf':
skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=4221)
folds = skf.split(features_df, target)
configs['train']['single_model'] = False
else:
print(f"ERROR: wrong fold_type, {configs['train']['fold_type']}")
folds, pred_folds = tee(folds)
# -- Make training dataset
# print shape
sel_log(f'used features are {features_df.columns.tolist()}', logger)
sel_log(f'the shape features_df is {features_df.shape}', logger)
# -- CV
# Set params
PARAMS = configs['xgb_params']
if 'nthread' not in PARAMS:
PARAMS['nthread'] = os.cpu_count()
PARAMS['interaction_constraints'] = [[v, 200 + v, 400 + v, 600 + v]
for v in range(200)]
PARAMS['eval_metric'] = "logloss"
PARAMS['objective'] = "binary:logistic"
# PARAMS['booster'] = 'gblinear'
sel_log('start training ...', None)
oofs = []
y_trues = []
val_idxes = []
scores = []
best_iterations = []
cv_model = []
for i, idxes in list(enumerate(folds)):
trn_idx, val_idx = idxes
# -- Data resampling
# Stock original data for validation
fold_features_df, fold_target = value_resampling(
features_df.iloc[trn_idx],
target[trn_idx],
configs['train']['sampling_type'],
configs['train']['sampling_random_state'],
configs['train']['os_lim'],
configs['train']['pos_t'],
configs['train']['neg_t'],
logger=logger)
# make xgb dataset
train_set = xgb.DMatrix(fold_features_df.values,
label=fold_target.values)
valid_set = xgb.DMatrix(features_df.values[val_idx],
label=target.values[val_idx])
pred_set = xgb.DMatrix(features_df.values[val_idx])
# train
booster = xgb.train(
params=PARAMS.copy(),
dtrain=train_set,
num_boost_round=1000000,
evals=[
(valid_set, 'valid'),
],
verbose_eval=10,
early_stopping_rounds=30,
)
# predict using trained model
y_pred = booster.predict(pred_set)# [:, 1]
print(y_pred)
y_true = target.values[val_idx]
oofs.append(y_pred)
y_trues.append(y_true)
val_idxes.append(val_idx)
# Calc AUC
auc = roc_auc_score(y_true, y_pred)
sel_log(f'fold AUC: {auc}', logger=logger)
scores.append(auc)
best_iterations.append(booster.best_iteration)
# save model
cv_model.append(booster)
auc_mean, auc_std = np.mean(scores), np.std(scores)
auc_oof = roc_auc_score(np.concatenate(y_trues), np.concatenate(oofs))
best_iteration_mean = np.mean(best_iterations)
sel_log(
f'AUC_mean: {auc_mean:.5f}, AUC_std: {auc_std:.5f}',
logger)
sel_log(
f'AUC OOF: {auc_oof}',
logger)
sel_log(
f'BEST ITER MEAN: {best_iteration_mean}',
logger)
# -- Post processings
filename_base = f'{script_name}_{exp_time}_{auc_mean:.5}'
# Save oofs
oof_df = pd.DataFrame()
oof_df['ID_code'] = trn_ids.iloc[np.concatenate(val_idxes)]
oof_df['y_val'] = np.concatenate(y_trues)
oof_df['oof_proba'] = np.concatenate(oofs)
oof_df = oof_df.set_index('ID_code').loc[trn_ids]
oof_df.to_csv(
'./mnt/oofs/' +
filename_base +
'_oofs.csv',
index=True)
with open('./mnt/oofs/' + filename_base + '_oofs.pkl', 'wb') as fout:
pickle.dump([val_idxes, oofs], fout)
# Save trained models
with open('./mnt/trained_models/'
+ filename_base
+ '_models.pkl', 'wb') as fout:
pickle.dump(cv_model, fout)
# --- Make submission file
if args.test:
# -- Prediction
sel_log('predicting for test ...', None)
preds = []
preds_no_rank = []
reals = np.load('./mnt/inputs/nes_info/real_samples_indexes.npz.npy')
# for booster in tqdm(cv_model.boosters):
for booster in tqdm(cv_model):
test_set = xgb.DMatrix(test_features_df.values)
pred = booster.predict(test_set)
pred = pd.Series(pred)
# rank avg only using real part
preds_no_rank.append(pred.copy())
pred.iloc[reals] = pred.iloc[reals].rank() / reals.shape
preds.append(pred)
if len(cv_model) > 1:
target_values = np.mean(preds, axis=0)
target_values_no_rank = np.mean(preds_no_rank, axis=0)
else:
target_values = preds[0]
target_values_no_rank = preds[0]
# blend single model
# -- Make submission file
sel_log(f'loading sample submission file ...', None)
sub_df = pd.read_csv(
'./mnt/inputs/origin/sample_submission.csv.zip',
compression='zip')
sub_df.target = target_values
sub_df_no_rank = pd.read_csv(
'./mnt/inputs/origin/sample_submission.csv.zip',
compression='zip')
sub_df_no_rank.target = target_values_no_rank
# print stats
submission_filename = f'./mnt/submissions/{filename_base}_sub.csv.gz'
submission_filename_no_rank = f'./mnt/submissions/{filename_base}_sub_no_rank.csv'
sel_log(f'saving submission file to {submission_filename}', logger)
sub_df.to_csv(submission_filename, compression='gzip', index=False)
sub_df_no_rank.to_csv(
submission_filename_no_rank,
index=False)
if args.submit:
os.system(
f'kaggle competitions submit '
f'santander-customer-transaction-prediction '
f'-f {submission_filename} -m "{args.message}"')
return auc_mean, auc_std, auc_oof
| [
"[email protected]"
] | |
ed3469e684afa8c86b519a431e625fbbd69fd869 | 7030c780db36c7d8efedb1152cf945a3cc248fdb | /python/cuml/thirdparty_adapters/sparsefuncs_fast.py | 21e730ef726063949023d765ade9a3962fedd7a1 | [
"Apache-2.0"
] | permissive | rapidsai/cuml | 546af8151fd2ee0f737cc4e62386d4b0ede74f3d | 7d86042b8de06bc8acce632230fe5821bd36c17d | refs/heads/branch-23.10 | 2023-08-30T19:17:41.816373 | 2023-08-28T13:23:15 | 2023-08-28T13:23:15 | 152,616,802 | 3,615 | 569 | Apache-2.0 | 2023-09-14T00:21:52 | 2018-10-11T15:45:35 | C++ | UTF-8 | Python | false | false | 10,106 | py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from math import ceil
from cuml.internals.safe_imports import gpu_only_import_from
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
cpx = gpu_only_import("cupyx")
cuda = gpu_only_import_from("numba", "cuda")
def csr_mean_variance_axis0(X):
"""Compute mean and variance on the axis 0 of a CSR matrix
Parameters
----------
X : sparse CSR matrix
Input array
Returns
-------
mean and variance
"""
X = X.tocsc()
means, variances, _ = _csc_mean_variance_axis0(X)
return means, variances
def csc_mean_variance_axis0(X):
"""Compute mean and variance on the axis 0 of a CSC matrix
Parameters
----------
X : sparse CSC matrix
Input array
Returns
-------
mean and variance
"""
means, variances, _ = _csc_mean_variance_axis0(X)
return means, variances
def _csc_mean_variance_axis0(X):
"""Compute mean, variance and nans count on the axis 0 of a CSC matrix
Parameters
----------
X : sparse CSC matrix
Input array
Returns
-------
mean, variance, nans count
"""
n_samples, n_features = X.shape
means = cp.empty(n_features)
variances = cp.empty(n_features)
counts_nan = cp.empty(n_features)
start = X.indptr[0]
for i, end in enumerate(X.indptr[1:]):
col = X.data[start:end]
_count_zeros = n_samples - col.size
_count_nans = (col != col).sum()
_mean = cp.nansum(col) / (n_samples - _count_nans)
_variance = cp.nansum((col - _mean) ** 2)
_variance += _count_zeros * (_mean**2)
_variance /= n_samples - _count_nans
means[i] = _mean
variances[i] = _variance
counts_nan[i] = _count_nans
start = end
return means, variances, counts_nan
@cuda.jit
def norm_step2_k(indptr, data, norm):
"""Apply normalization
Parameters
----------
indptr : array
indptr of sparse matrix
data : array
data of sparse matrix
norm: array
norm by which to divide columns
"""
row_i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
inrow_idx = cuda.blockIdx.y * cuda.blockDim.y + cuda.threadIdx.y
if row_i >= indptr.shape[0] - 1:
return
start = indptr[row_i]
end = indptr[row_i + 1]
if inrow_idx >= (end - start):
return
data[start + inrow_idx] /= norm[row_i]
@cuda.jit
def l1_step1_k(indptr, data, norm):
"""Compute norm for L1 normalization"""
row_i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
inrow_idx = cuda.blockIdx.y * cuda.blockDim.y + cuda.threadIdx.y
if row_i >= indptr.shape[0] - 1:
return
start = indptr[row_i]
end = indptr[row_i + 1]
if inrow_idx >= (end - start):
return
val = abs(data[start + inrow_idx])
cuda.atomic.add(norm, row_i, val)
def inplace_csr_row_normalize_l1(X):
"""Normalize CSR matrix inplace with L1 norm
Parameters
----------
X : sparse CSR matrix
Input array
Returns
-------
Normalized matrix
"""
n_rows = X.indptr.shape[0]
max_nnz = cp.diff(X.indptr).max()
tpb = (32, 32)
bpg_x = ceil(n_rows / tpb[0])
bpg_y = ceil(max_nnz / tpb[1])
bpg = (bpg_x, bpg_y)
norm = cp.zeros(n_rows - 1, dtype=X.dtype)
l1_step1_k[bpg, tpb](X.indptr, X.data, norm)
norm_step2_k[bpg, tpb](X.indptr, X.data, norm)
@cuda.jit
def l2_step1_k(indptr, data, norm):
"""Compute norm for L2 normalization"""
row_i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
inrow_idx = cuda.blockIdx.y * cuda.blockDim.y + cuda.threadIdx.y
if row_i >= indptr.shape[0] - 1:
return
start = indptr[row_i]
end = indptr[row_i + 1]
if inrow_idx >= (end - start):
return
val = data[start + inrow_idx]
val *= val
cuda.atomic.add(norm, row_i, val)
def inplace_csr_row_normalize_l2(X):
"""Normalize CSR matrix inplace with L2 norm
Parameters
----------
X : sparse CSR matrix
Input array
Returns
-------
Normalized matrix
"""
n_rows = X.indptr.shape[0]
max_nnz = cp.diff(X.indptr).max()
tpb = (32, 32)
bpg_x = ceil(n_rows / tpb[0])
bpg_y = ceil(max_nnz / tpb[1])
bpg = (bpg_x, bpg_y)
norm = cp.zeros(n_rows - 1, dtype=X.dtype)
l2_step1_k[bpg, tpb](X.indptr, X.data, norm)
norm = cp.sqrt(norm)
norm_step2_k[bpg, tpb](X.indptr, X.data, norm)
@cuda.jit(device=True, inline=True)
def _deg2_column(d, i, j, interaction_only):
"""Compute the index of the column for a degree 2 expansion
d is the dimensionality of the input data, i and j are the indices
for the columns involved in the expansion.
"""
if interaction_only:
return int(d * i - (i**2 + 3 * i) / 2 - 1 + j)
else:
return int(d * i - (i**2 + i) / 2 + j)
@cuda.jit(device=True, inline=True)
def _deg3_column(d, i, j, k, interaction_only):
"""Compute the index of the column for a degree 3 expansion
d is the dimensionality of the input data, i, j and k are the indices
for the columns involved in the expansion.
"""
if interaction_only:
return int(
(
3 * d**2 * i
- 3 * d * i**2
+ i**3
+ 11 * i
- 3 * j**2
- 9 * j
)
/ 6
+ i**2
- 2 * d * i
+ d * j
- d
+ k
)
else:
return int(
(3 * d**2 * i - 3 * d * i**2 + i**3 - i - 3 * j**2 - 3 * j)
/ 6
+ d * j
+ k
)
@cuda.jit
def perform_expansion(
indptr,
indices,
data,
expanded_data,
expanded_indices,
d,
interaction_only,
degree,
expanded_indptr,
):
"""Kernel applying polynomial expansion on CSR matrix"""
row_i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
inrow_idx = cuda.blockIdx.y * cuda.blockDim.y + cuda.threadIdx.y
if row_i >= indptr.shape[0] - 1:
return
expanded_index = expanded_indptr[row_i] + inrow_idx
if expanded_index >= expanded_indptr[row_i + 1]:
return
row_starts = indptr[row_i]
row_ends = indptr[row_i + 1]
i_ptr = row_starts
j_ptr = -1
k_ptr = inrow_idx
if degree == 2:
j_ptr = inrow_idx
for i in range(row_starts, row_ends):
diff = row_ends - i - interaction_only
if j_ptr >= diff:
j_ptr -= diff
else:
i_ptr = i
break
j_ptr += i_ptr + interaction_only
else:
# degree == 3
diff = 0
for i in range(row_starts, row_ends):
for j in range(i + interaction_only, row_ends):
diff = row_ends - j - interaction_only
if k_ptr >= diff:
k_ptr -= diff
else:
j_ptr = j
i_ptr = i
break
if j_ptr != -1:
break
k_ptr += j_ptr + interaction_only
i = indices[i_ptr]
j = indices[j_ptr]
if degree == 2:
col = _deg2_column(d, i, j, interaction_only)
expanded_indices[expanded_index] = col
expanded_data[expanded_index] = data[i_ptr] * data[j_ptr]
else:
# degree == 3
k = indices[k_ptr]
col = _deg3_column(d, i, j, k, interaction_only)
expanded_indices[expanded_index] = col
expanded_data[expanded_index] = data[i_ptr] * data[j_ptr] * data[k_ptr]
def csr_polynomial_expansion(X, interaction_only, degree):
"""Apply polynomial expansion on CSR matrix
Parameters
----------
X : sparse CSR matrix
Input array
Returns
-------
New expansed matrix
"""
assert degree in (2, 3)
interaction_only = 1 if interaction_only else 0
d = X.shape[1]
if degree == 2:
expanded_dimensionality = int((d**2 + d) / 2 - interaction_only * d)
else:
expanded_dimensionality = int(
(d**3 + 3 * d**2 + 2 * d) / 6 - interaction_only * d**2
)
if expanded_dimensionality == 0:
return None
assert expanded_dimensionality > 0
nnz = cp.diff(X.indptr)
if degree == 2:
total_nnz = (nnz**2 + nnz) / 2 - interaction_only * nnz
else:
total_nnz = (
nnz**3 + 3 * nnz**2 + 2 * nnz
) / 6 - interaction_only * nnz**2
del nnz
nnz_cumsum = total_nnz.cumsum(dtype=cp.int64)
total_nnz_max = int(total_nnz.max())
total_nnz = int(total_nnz.sum())
num_rows = X.indptr.shape[0] - 1
expanded_data = cp.empty(shape=total_nnz, dtype=X.data.dtype)
expanded_indices = cp.empty(shape=total_nnz, dtype=X.indices.dtype)
expanded_indptr = cp.empty(shape=num_rows + 1, dtype=X.indptr.dtype)
expanded_indptr[0] = X.indptr[0]
expanded_indptr[1:] = nnz_cumsum
tpb = (32, 32)
bpg_x = ceil(X.indptr.shape[0] / tpb[0])
bpg_y = ceil(total_nnz_max / tpb[1])
bpg = (bpg_x, bpg_y)
perform_expansion[bpg, tpb](
X.indptr,
X.indices,
X.data,
expanded_data,
expanded_indices,
d,
interaction_only,
degree,
expanded_indptr,
)
return cpx.scipy.sparse.csr_matrix(
(expanded_data, expanded_indices, expanded_indptr),
shape=(num_rows, expanded_dimensionality),
)
| [
"[email protected]"
] | |
accdc603e7b050c80d24dd697beff1e270f215ca | 111e6c08c9bfc7a3aeb22ddd69765cfdda2fd38d | /3/day03/day03/urls.py | c42a7fd2660709f6e56eb457c373b4037eedeea3 | [] | no_license | huaxiawudi/yz1806 | 1f9e16292a44f68f7a126fc5e4655f3663ef27b1 | 0bc9ef8f4bd8b8e0ed9c6416caa0e083ec61d3ea | refs/heads/master | 2020-04-07T08:54:53.319535 | 2018-11-20T04:15:45 | 2018-11-20T04:15:45 | 158,233,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,006 | py | """day03 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
# url:http://www.baidu.com:80/index.html?kw=tom#1
from django.conf.urls import url
from django.contrib import admin
from app import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^adduser/$',views.adduser),
url(r'^updateuser/$', views.updateuser),
url(r'^deleteuser/$',views.deleteuser),
url(r'^getqueryset/$',views.getqueryset)
]
| [
"[email protected]"
] | |
740369dd7596e7744e209ee19a1d27384bffd5de | 5923d7526282c63fbf2d56df706c20e4ace3fd3e | /backend/manage.py | 0ba611cbf2733d3b04d711054dce31d06abd9cf8 | [] | no_license | crowdbotics-apps/pushya-4586 | 936b4c5df863a3d1728acf5c5b8456bf60e3ce6e | e01acb275fa18542007ca2932a20896f048ed549 | refs/heads/master | 2023-01-09T15:14:04.158342 | 2019-06-12T22:49:56 | 2019-06-12T22:49:56 | 191,654,775 | 0 | 0 | null | 2023-01-04T00:06:07 | 2019-06-12T22:48:42 | Python | UTF-8 | Python | false | false | 631 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pushya_4586.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
22da5615e5302dcdc23585755542fcf7b9605571 | a41e1498e3c080f47abd8e8e57157548df3ebbf1 | /pandas/tests/indexes/numeric/test_indexing.py | cd28d519313ed36228040361dfbb2a8dccf77be5 | [
"BSD-3-Clause"
] | permissive | pandas-dev/pandas | e7e639454a298bebc272622e66faa9829ea393bb | c7325d7e7e77ecb4a4e57b48bc25265277c75712 | refs/heads/main | 2023-09-01T12:42:07.927176 | 2023-09-01T11:14:10 | 2023-09-01T11:14:10 | 858,127 | 36,166 | 18,728 | BSD-3-Clause | 2023-09-14T21:18:41 | 2010-08-24T01:37:33 | Python | UTF-8 | Python | false | false | 22,761 | py | import numpy as np
import pytest
from pandas.errors import InvalidIndexError
from pandas import (
NA,
Index,
RangeIndex,
Series,
Timestamp,
)
import pandas._testing as tm
from pandas.core.arrays import (
ArrowExtensionArray,
FloatingArray,
)
@pytest.fixture
def index_large():
# large values used in Index[uint64] tests where no compat needed with Int64/Float64
large = [2**63, 2**63 + 10, 2**63 + 15, 2**63 + 20, 2**63 + 25]
return Index(large, dtype=np.uint64)
class TestGetLoc:
def test_get_loc(self):
index = Index([0, 1, 2])
assert index.get_loc(1) == 1
def test_get_loc_raises_bad_label(self):
index = Index([0, 1, 2])
with pytest.raises(InvalidIndexError, match=r"\[1, 2\]"):
index.get_loc([1, 2])
def test_get_loc_float64(self):
idx = Index([0.0, 1.0, 2.0], dtype=np.float64)
with pytest.raises(KeyError, match="^'foo'$"):
idx.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.5$"):
idx.get_loc(1.5)
with pytest.raises(KeyError, match="^True$"):
idx.get_loc(True)
with pytest.raises(KeyError, match="^False$"):
idx.get_loc(False)
def test_get_loc_na(self):
idx = Index([np.nan, 1, 2], dtype=np.float64)
assert idx.get_loc(1) == 1
assert idx.get_loc(np.nan) == 0
idx = Index([np.nan, 1, np.nan], dtype=np.float64)
assert idx.get_loc(1) == 1
# representable by slice [0:2:2]
msg = "'Cannot get left slice bound for non-unique label: nan'"
with pytest.raises(KeyError, match=msg):
idx.slice_locs(np.nan)
# not representable by slice
idx = Index([np.nan, 1, np.nan, np.nan], dtype=np.float64)
assert idx.get_loc(1) == 1
msg = "'Cannot get left slice bound for non-unique label: nan"
with pytest.raises(KeyError, match=msg):
idx.slice_locs(np.nan)
def test_get_loc_missing_nan(self):
# GH#8569
idx = Index([1, 2], dtype=np.float64)
assert idx.get_loc(1) == 0
with pytest.raises(KeyError, match=r"^3$"):
idx.get_loc(3)
with pytest.raises(KeyError, match="^nan$"):
idx.get_loc(np.nan)
with pytest.raises(InvalidIndexError, match=r"\[nan\]"):
# listlike/non-hashable raises TypeError
idx.get_loc([np.nan])
@pytest.mark.parametrize("vals", [[1], [1.0], [Timestamp("2019-12-31")], ["test"]])
def test_get_loc_float_index_nan_with_method(self, vals):
# GH#39382
idx = Index(vals)
with pytest.raises(KeyError, match="nan"):
idx.get_loc(np.nan)
@pytest.mark.parametrize("dtype", ["f8", "i8", "u8"])
def test_get_loc_numericindex_none_raises(self, dtype):
# case that goes through searchsorted and key is non-comparable to values
arr = np.arange(10**7, dtype=dtype)
idx = Index(arr)
with pytest.raises(KeyError, match="None"):
idx.get_loc(None)
def test_get_loc_overflows(self):
# unique but non-monotonic goes through IndexEngine.mapping.get_item
idx = Index([0, 2, 1])
val = np.iinfo(np.int64).max + 1
with pytest.raises(KeyError, match=str(val)):
idx.get_loc(val)
with pytest.raises(KeyError, match=str(val)):
idx._engine.get_loc(val)
class TestGetIndexer:
def test_get_indexer(self):
index1 = Index([1, 2, 3, 4, 5])
index2 = Index([2, 4, 6])
r1 = index1.get_indexer(index2)
e1 = np.array([1, 3, -1], dtype=np.intp)
tm.assert_almost_equal(r1, e1)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize(
"expected,method",
[
(np.array([-1, 0, 0, 1, 1], dtype=np.intp), "pad"),
(np.array([-1, 0, 0, 1, 1], dtype=np.intp), "ffill"),
(np.array([0, 0, 1, 1, 2], dtype=np.intp), "backfill"),
(np.array([0, 0, 1, 1, 2], dtype=np.intp), "bfill"),
],
)
def test_get_indexer_methods(self, reverse, expected, method):
index1 = Index([1, 2, 3, 4, 5])
index2 = Index([2, 4, 6])
if reverse:
index1 = index1[::-1]
expected = expected[::-1]
result = index2.get_indexer(index1, method=method)
tm.assert_almost_equal(result, expected)
def test_get_indexer_invalid(self):
# GH10411
index = Index(np.arange(10))
with pytest.raises(ValueError, match="tolerance argument"):
index.get_indexer([1, 0], tolerance=1)
with pytest.raises(ValueError, match="limit argument"):
index.get_indexer([1, 0], limit=1)
@pytest.mark.parametrize(
"method, tolerance, indexer, expected",
[
("pad", None, [0, 5, 9], [0, 5, 9]),
("backfill", None, [0, 5, 9], [0, 5, 9]),
("nearest", None, [0, 5, 9], [0, 5, 9]),
("pad", 0, [0, 5, 9], [0, 5, 9]),
("backfill", 0, [0, 5, 9], [0, 5, 9]),
("nearest", 0, [0, 5, 9], [0, 5, 9]),
("pad", None, [0.2, 1.8, 8.5], [0, 1, 8]),
("backfill", None, [0.2, 1.8, 8.5], [1, 2, 9]),
("nearest", None, [0.2, 1.8, 8.5], [0, 2, 9]),
("pad", 1, [0.2, 1.8, 8.5], [0, 1, 8]),
("backfill", 1, [0.2, 1.8, 8.5], [1, 2, 9]),
("nearest", 1, [0.2, 1.8, 8.5], [0, 2, 9]),
("pad", 0.2, [0.2, 1.8, 8.5], [0, -1, -1]),
("backfill", 0.2, [0.2, 1.8, 8.5], [-1, 2, -1]),
("nearest", 0.2, [0.2, 1.8, 8.5], [0, 2, -1]),
],
)
def test_get_indexer_nearest(self, method, tolerance, indexer, expected):
index = Index(np.arange(10))
actual = index.get_indexer(indexer, method=method, tolerance=tolerance)
tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp))
@pytest.mark.parametrize("listtype", [list, tuple, Series, np.array])
@pytest.mark.parametrize(
"tolerance, expected",
list(
zip(
[[0.3, 0.3, 0.1], [0.2, 0.1, 0.1], [0.1, 0.5, 0.5]],
[[0, 2, -1], [0, -1, -1], [-1, 2, 9]],
)
),
)
def test_get_indexer_nearest_listlike_tolerance(
self, tolerance, expected, listtype
):
index = Index(np.arange(10))
actual = index.get_indexer(
[0.2, 1.8, 8.5], method="nearest", tolerance=listtype(tolerance)
)
tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp))
def test_get_indexer_nearest_error(self):
index = Index(np.arange(10))
with pytest.raises(ValueError, match="limit argument"):
index.get_indexer([1, 0], method="nearest", limit=1)
with pytest.raises(ValueError, match="tolerance size must match"):
index.get_indexer([1, 0], method="nearest", tolerance=[1, 2, 3])
@pytest.mark.parametrize(
"method,expected",
[("pad", [8, 7, 0]), ("backfill", [9, 8, 1]), ("nearest", [9, 7, 0])],
)
def test_get_indexer_nearest_decreasing(self, method, expected):
index = Index(np.arange(10))[::-1]
actual = index.get_indexer([0, 5, 9], method=method)
tm.assert_numpy_array_equal(actual, np.array([9, 4, 0], dtype=np.intp))
actual = index.get_indexer([0.2, 1.8, 8.5], method=method)
tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp))
@pytest.mark.parametrize("idx_dtype", ["int64", "float64", "uint64", "range"])
@pytest.mark.parametrize("method", ["get_indexer", "get_indexer_non_unique"])
def test_get_indexer_numeric_index_boolean_target(self, method, idx_dtype):
# GH 16877
if idx_dtype == "range":
numeric_index = RangeIndex(4)
else:
numeric_index = Index(np.arange(4, dtype=idx_dtype))
other = Index([True, False, True])
result = getattr(numeric_index, method)(other)
expected = np.array([-1, -1, -1], dtype=np.intp)
if method == "get_indexer":
tm.assert_numpy_array_equal(result, expected)
else:
missing = np.arange(3, dtype=np.intp)
tm.assert_numpy_array_equal(result[0], expected)
tm.assert_numpy_array_equal(result[1], missing)
@pytest.mark.parametrize("method", ["pad", "backfill", "nearest"])
def test_get_indexer_with_method_numeric_vs_bool(self, method):
left = Index([1, 2, 3])
right = Index([True, False])
with pytest.raises(TypeError, match="Cannot compare"):
left.get_indexer(right, method=method)
with pytest.raises(TypeError, match="Cannot compare"):
right.get_indexer(left, method=method)
def test_get_indexer_numeric_vs_bool(self):
left = Index([1, 2, 3])
right = Index([True, False])
res = left.get_indexer(right)
expected = -1 * np.ones(len(right), dtype=np.intp)
tm.assert_numpy_array_equal(res, expected)
res = right.get_indexer(left)
expected = -1 * np.ones(len(left), dtype=np.intp)
tm.assert_numpy_array_equal(res, expected)
res = left.get_indexer_non_unique(right)[0]
expected = -1 * np.ones(len(right), dtype=np.intp)
tm.assert_numpy_array_equal(res, expected)
res = right.get_indexer_non_unique(left)[0]
expected = -1 * np.ones(len(left), dtype=np.intp)
tm.assert_numpy_array_equal(res, expected)
def test_get_indexer_float64(self):
idx = Index([0.0, 1.0, 2.0], dtype=np.float64)
tm.assert_numpy_array_equal(
idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp)
)
target = [-0.1, 0.5, 1.1]
tm.assert_numpy_array_equal(
idx.get_indexer(target, "pad"), np.array([-1, 0, 1], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "backfill"), np.array([0, 1, 2], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "nearest"), np.array([0, 1, 1], dtype=np.intp)
)
def test_get_indexer_nan(self):
# GH#7820
result = Index([1, 2, np.nan], dtype=np.float64).get_indexer([np.nan])
expected = np.array([2], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
def test_get_indexer_int64(self):
index = Index(range(0, 20, 2), dtype=np.int64)
target = Index(np.arange(10), dtype=np.int64)
indexer = index.get_indexer(target)
expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
target = Index(np.arange(10), dtype=np.int64)
indexer = index.get_indexer(target, method="pad")
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
target = Index(np.arange(10), dtype=np.int64)
indexer = index.get_indexer(target, method="backfill")
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_uint64(self, index_large):
target = Index(np.arange(10).astype("uint64") * 5 + 2**63)
indexer = index_large.get_indexer(target)
expected = np.array([0, -1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
target = Index(np.arange(10).astype("uint64") * 5 + 2**63)
indexer = index_large.get_indexer(target, method="pad")
expected = np.array([0, 0, 1, 2, 3, 4, 4, 4, 4, 4], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
target = Index(np.arange(10).astype("uint64") * 5 + 2**63)
indexer = index_large.get_indexer(target, method="backfill")
expected = np.array([0, 1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
@pytest.mark.parametrize("val, val2", [(4, 5), (4, 4), (4, NA), (NA, NA)])
def test_get_loc_masked(self, val, val2, any_numeric_ea_and_arrow_dtype):
# GH#39133
idx = Index([1, 2, 3, val, val2], dtype=any_numeric_ea_and_arrow_dtype)
result = idx.get_loc(2)
assert result == 1
with pytest.raises(KeyError, match="9"):
idx.get_loc(9)
def test_get_loc_masked_na(self, any_numeric_ea_and_arrow_dtype):
# GH#39133
idx = Index([1, 2, NA], dtype=any_numeric_ea_and_arrow_dtype)
result = idx.get_loc(NA)
assert result == 2
idx = Index([1, 2, NA, NA], dtype=any_numeric_ea_and_arrow_dtype)
result = idx.get_loc(NA)
tm.assert_numpy_array_equal(result, np.array([False, False, True, True]))
idx = Index([1, 2, 3], dtype=any_numeric_ea_and_arrow_dtype)
with pytest.raises(KeyError, match="NA"):
idx.get_loc(NA)
def test_get_loc_masked_na_and_nan(self):
# GH#39133
idx = Index(
FloatingArray(
np.array([1, 2, 1, np.nan]), mask=np.array([False, False, True, False])
)
)
result = idx.get_loc(NA)
assert result == 2
result = idx.get_loc(np.nan)
assert result == 3
idx = Index(
FloatingArray(np.array([1, 2, 1.0]), mask=np.array([False, False, True]))
)
result = idx.get_loc(NA)
assert result == 2
with pytest.raises(KeyError, match="nan"):
idx.get_loc(np.nan)
idx = Index(
FloatingArray(
np.array([1, 2, np.nan]), mask=np.array([False, False, False])
)
)
result = idx.get_loc(np.nan)
assert result == 2
with pytest.raises(KeyError, match="NA"):
idx.get_loc(NA)
@pytest.mark.parametrize("val", [4, 2])
def test_get_indexer_masked_na(self, any_numeric_ea_and_arrow_dtype, val):
# GH#39133
idx = Index([1, 2, NA, 3, val], dtype=any_numeric_ea_and_arrow_dtype)
result = idx.get_indexer_for([1, NA, 5])
expected = np.array([0, 2, -1])
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize("dtype", ["boolean", "bool[pyarrow]"])
def test_get_indexer_masked_na_boolean(self, dtype):
# GH#39133
if dtype == "bool[pyarrow]":
pytest.importorskip("pyarrow")
idx = Index([True, False, NA], dtype=dtype)
result = idx.get_loc(False)
assert result == 1
result = idx.get_loc(NA)
assert result == 2
def test_get_indexer_arrow_dictionary_target(self):
pa = pytest.importorskip("pyarrow")
target = Index(
ArrowExtensionArray(
pa.array([1, 2], type=pa.dictionary(pa.int8(), pa.int8()))
)
)
idx = Index([1])
result = idx.get_indexer(target)
expected = np.array([0, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result_1, result_2 = idx.get_indexer_non_unique(target)
expected_1, expected_2 = np.array([0, -1], dtype=np.int64), np.array(
[1], dtype=np.int64
)
tm.assert_numpy_array_equal(result_1, expected_1)
tm.assert_numpy_array_equal(result_2, expected_2)
class TestWhere:
@pytest.mark.parametrize(
"index",
[
Index(np.arange(5, dtype="float64")),
Index(range(0, 20, 2), dtype=np.int64),
Index(np.arange(5, dtype="uint64")),
],
)
def test_where(self, listlike_box, index):
cond = [True] * len(index)
expected = index
result = index.where(listlike_box(cond))
cond = [False] + [True] * (len(index) - 1)
expected = Index([index._na_value] + index[1:].tolist(), dtype=np.float64)
result = index.where(listlike_box(cond))
tm.assert_index_equal(result, expected)
def test_where_uint64(self):
idx = Index([0, 6, 2], dtype=np.uint64)
mask = np.array([False, True, False])
other = np.array([1], dtype=np.int64)
expected = Index([1, 6, 1], dtype=np.uint64)
result = idx.where(mask, other)
tm.assert_index_equal(result, expected)
result = idx.putmask(~mask, other)
tm.assert_index_equal(result, expected)
def test_where_infers_type_instead_of_trying_to_convert_string_to_float(self):
# GH 32413
index = Index([1, np.nan])
cond = index.notna()
other = Index(["a", "b"], dtype="string")
expected = Index([1.0, "b"])
result = index.where(cond, other)
tm.assert_index_equal(result, expected)
class TestTake:
@pytest.mark.parametrize("idx_dtype", [np.float64, np.int64, np.uint64])
def test_take_preserve_name(self, idx_dtype):
index = Index([1, 2, 3, 4], dtype=idx_dtype, name="foo")
taken = index.take([3, 0, 1])
assert index.name == taken.name
def test_take_fill_value_float64(self):
# GH 12631
idx = Index([1.0, 2.0, 3.0], name="xxx", dtype=np.float64)
result = idx.take(np.array([1, 0, -1]))
expected = Index([2.0, 1.0, 3.0], dtype=np.float64, name="xxx")
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = Index([2.0, 1.0, np.nan], dtype=np.float64, name="xxx")
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = Index([2.0, 1.0, 3.0], dtype=np.float64, name="xxx")
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "index -5 is out of bounds for (axis 0 with )?size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
@pytest.mark.parametrize("dtype", [np.int64, np.uint64])
def test_take_fill_value_ints(self, dtype):
# see gh-12631
idx = Index([1, 2, 3], dtype=dtype, name="xxx")
result = idx.take(np.array([1, 0, -1]))
expected = Index([2, 1, 3], dtype=dtype, name="xxx")
tm.assert_index_equal(result, expected)
name = type(idx).__name__
msg = f"Unable to fill values because {name} cannot contain NA"
# fill_value=True
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -1]), fill_value=True)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = Index([2, 1, 3], dtype=dtype, name="xxx")
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "index -5 is out of bounds for (axis 0 with )?size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
class TestContains:
@pytest.mark.parametrize("dtype", [np.float64, np.int64, np.uint64])
def test_contains_none(self, dtype):
# GH#35788 should return False, not raise TypeError
index = Index([0, 1, 2, 3, 4], dtype=dtype)
assert None not in index
def test_contains_float64_nans(self):
index = Index([1.0, 2.0, np.nan], dtype=np.float64)
assert np.nan in index
def test_contains_float64_not_nans(self):
index = Index([1.0, 2.0, np.nan], dtype=np.float64)
assert 1.0 in index
class TestSliceLocs:
@pytest.mark.parametrize("dtype", [int, float])
def test_slice_locs(self, dtype):
index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(index)
assert index.slice_locs(start=2) == (2, n)
assert index.slice_locs(start=3) == (3, n)
assert index.slice_locs(3, 8) == (3, 6)
assert index.slice_locs(5, 10) == (3, n)
assert index.slice_locs(end=8) == (0, 6)
assert index.slice_locs(end=9) == (0, 7)
# reversed
index2 = index[::-1]
assert index2.slice_locs(8, 2) == (2, 6)
assert index2.slice_locs(7, 3) == (2, 5)
@pytest.mark.parametrize("dtype", [int, float])
def test_slice_locs_float_locs(self, dtype):
index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(index)
assert index.slice_locs(5.0, 10.0) == (3, n)
assert index.slice_locs(4.5, 10.5) == (3, 8)
index2 = index[::-1]
assert index2.slice_locs(8.5, 1.5) == (2, 6)
assert index2.slice_locs(10.5, -1) == (0, n)
@pytest.mark.parametrize("dtype", [int, float])
def test_slice_locs_dup_numeric(self, dtype):
index = Index(np.array([10, 12, 12, 14], dtype=dtype))
assert index.slice_locs(12, 12) == (1, 3)
assert index.slice_locs(11, 13) == (1, 3)
index2 = index[::-1]
assert index2.slice_locs(12, 12) == (1, 3)
assert index2.slice_locs(13, 11) == (1, 3)
def test_slice_locs_na(self):
index = Index([np.nan, 1, 2])
assert index.slice_locs(1) == (1, 3)
assert index.slice_locs(np.nan) == (0, 3)
index = Index([0, np.nan, np.nan, 1, 2])
assert index.slice_locs(np.nan) == (1, 5)
def test_slice_locs_na_raises(self):
index = Index([np.nan, 1, 2])
with pytest.raises(KeyError, match=""):
index.slice_locs(start=1.5)
with pytest.raises(KeyError, match=""):
index.slice_locs(end=1.5)
class TestGetSliceBounds:
@pytest.mark.parametrize("side, expected", [("left", 4), ("right", 5)])
def test_get_slice_bounds_within(self, side, expected):
index = Index(range(6))
result = index.get_slice_bound(4, side=side)
assert result == expected
@pytest.mark.parametrize("side", ["left", "right"])
@pytest.mark.parametrize("bound, expected", [(-1, 0), (10, 6)])
def test_get_slice_bounds_outside(self, side, expected, bound):
index = Index(range(6))
result = index.get_slice_bound(bound, side=side)
assert result == expected
| [
"[email protected]"
] | |
8180d20b9bd00bbe47b490f34495c8f341ec30bc | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_narrators.py | 7da72b67f09225deba5ce48d2f216e8a178bf663 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py |
from xai.brain.wordbase.nouns._narrator import _NARRATOR
#calss header
class _NARRATORS(_NARRATOR, ):
def __init__(self,):
_NARRATOR.__init__(self)
self.name = "NARRATORS"
self.specie = 'nouns'
self.basic = "narrator"
self.jsondata = {}
| [
"[email protected]"
] | |
1e694dedbb1c710bec69da84e4e55be2f1a13405 | d21dbab3f374eb42a10f9ec7c434c1ca6fb2bff7 | /Python/03 String/04 Mutations.py | ba1b1e83ce4d06eb5589e674c69d305472bec8cd | [] | no_license | almamuncsit/HackerRank | 5360ad1d54aa01075dba5527f6ae695e4c6d9c7a | 6599cde4c7541ebf27bacff8af02dc0c3eaaa678 | refs/heads/master | 2021-07-06T00:33:25.912754 | 2021-01-13T09:09:29 | 2021-01-13T09:09:29 | 222,364,072 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py |
def mutate_string(string, position, character):
str_list = list(string)
str_list[position] = character
return ''.join(str_list)
if __name__ == '__main__':
s = input()
i, c = input().split()
s_new = mutate_string(s, int(i), c)
print(s_new)
| [
"[email protected]"
] | |
8ef9e1347e94621ccc9ed1cd1d225d3447098d25 | 02e4920166051129d1ca28a0da80405a982f1cfe | /curso_py/ex014.py | ae83f034a8736245163401189b8c4d757af60e64 | [] | no_license | felipeonf/Exercises_Python | 1ab40cea2466d6bb5459b5384a1dde8e1066b3b4 | 8eb2d17a35a6352fd5268a5fa43b834443171c70 | refs/heads/main | 2023-07-23T22:30:13.567469 | 2021-08-25T03:34:33 | 2021-08-25T03:34:33 | 397,062,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | import random
lista = []
i = 0
while i < 4 :
lista.append(input(f'Digite o nome do {1+i}° aluno: '))
i+=1
random.shuffle(lista)
print(f'A ordem de apresentação escolhida foi {lista}')
| [
"[email protected]"
] | |
133bb56e795f1304f909216fc94676e89bfc8e04 | 43eb31fb324240cf6f4150e310c5a7ec4087bbed | /online_inference/requests/make_request.py | 324e23627b425313868aa3e32c1bd9f9e36c1ccb | [] | no_license | made-ml-in-prod-2021/bulaevvi | 67cb63c0573b71eb152d102b4091f79eb887bfd5 | 6c0de66bb24c248b9291c03ddeed95ed1e990c61 | refs/heads/main | 2023-06-01T17:24:32.476388 | 2021-06-22T06:17:21 | 2021-06-22T06:17:21 | 354,891,217 | 0 | 0 | null | 2021-06-22T06:17:21 | 2021-04-05T16:03:26 | Jupyter Notebook | UTF-8 | Python | false | false | 572 | py | import pandas as pd
import requests
import time
ENDPOINT = "http://127.0.0.1:8000/predict"
REQUEST_FILE = "requests.csv"
NUM_REQUESTS = 100
if __name__ == "__main__":
data = pd.read_csv(REQUEST_FILE)
for i in range(NUM_REQUESTS):
request_data = data.iloc[i].to_dict()
request_data["id"] = i
response = requests.get(
ENDPOINT,
json=[request_data],
)
print(f'Request: {request_data}')
print(f'Response CODE: {response.status_code}')
print(f'Response BODY: {response.json()}')
| [
"[email protected]"
] | |
50dda7b0b5ff6c2dd3b252bb5836dffa34c2fd4e | 3f05ce6a332003595064d14b24b57fc36021da92 | /matscholar_web/tests/test_util.py | a630bf3cc334db58f3314765428bfb6b5e431020 | [
"MIT"
] | permissive | materialsintelligence/matscholar-web | 392f845dff515cf7f4f4684d5b105e4c40a40d88 | 95231228fc6da1a596653d774307b10916fb5847 | refs/heads/master | 2023-05-10T06:54:44.213445 | 2023-01-24T20:45:00 | 2023-01-24T20:45:00 | 151,479,404 | 9 | 13 | MIT | 2023-05-02T18:28:28 | 2018-10-03T20:57:22 | CSS | UTF-8 | Python | false | false | 832 | py | import json
import os
import unittest
import matscholar_web
from matscholar_web.util import load_static_data_file
"""
Tests for core utilities.
"""
class TestCoreUtils(unittest.TestCase):
def setUp(self) -> None:
rootdir = os.path.dirname(os.path.abspath(matscholar_web.__file__))
data_dir = os.path.join(rootdir, "assets/data/")
self.test_fname = "test_file.json"
self.test_fpath = os.path.join(data_dir, self.test_fname)
self.true_data = {"a": [1, 2, 3], "b": "something"}
def test_load_static_file(self):
with open(self.test_fpath, "w") as f:
json.dump(self.true_data, f)
loaded_data = load_static_data_file(self.test_fname)
self.assertEqual(self.true_data, loaded_data)
def tearDown(self) -> None:
os.remove(self.test_fpath)
| [
"[email protected]"
] | |
c7d3f584401e6ffd29d0dc4040517f4a44631100 | e9538b7ad6d0ce0ccfbb8e10c458f9e0b73926f6 | /plugins/module_utils/network/ftd/operation.py | 72d3e157b3287863a06c9ac800e2fde6a814e605 | [] | no_license | ansible-collection-migration/misc.not_a_real_collection | b3ef8090c59de9ac30aca083c746ec3595d7f5f5 | 7ab1af924a3db4ada2f714b09bb392614344cb1e | refs/heads/master | 2020-12-18T13:48:51.849567 | 2020-01-22T17:39:18 | 2020-01-22T17:39:18 | 235,400,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,619 | py | # Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.ftd.configuration import ParamName, PATH_PARAMS_FOR_DEFAULT_OBJ
class FtdOperations:
"""
Utility class for common operation names
"""
GET_SYSTEM_INFO = 'getSystemInformation'
GET_MANAGEMENT_IP_LIST = 'getManagementIPList'
GET_DNS_SETTING_LIST = 'getDeviceDNSSettingsList'
GET_DNS_SERVER_GROUP = 'getDNSServerGroup'
def get_system_info(resource):
"""
Executes `getSystemInformation` operation and returns information about the system.
:param resource: a BaseConfigurationResource object to connect to the device
:return: a dictionary with system information about the device and its software
"""
path_params = {ParamName.PATH_PARAMS: PATH_PARAMS_FOR_DEFAULT_OBJ}
system_info = resource.execute_operation(FtdOperations.GET_SYSTEM_INFO, path_params)
return system_info
| [
"[email protected]"
] | |
91322d262a1ae0f8f067202a178f058c74d25da4 | 13800b7827598e76428a335559b7bf11867ec2f0 | /python/ccxt/async_support/ace.py | f298281814bfa4c1defe1543dc960ee5fdbfaf5b | [
"MIT"
] | permissive | ccxt/ccxt | b40a0466f5c430a3c0c6026552ae697aa80ba6c6 | e4065f6a490e6fc4dd7a72b375428b2faa570668 | refs/heads/master | 2023-09-04T03:41:29.787733 | 2023-09-03T19:25:57 | 2023-09-03T19:25:57 | 91,253,698 | 30,798 | 8,190 | MIT | 2023-09-14T21:59:09 | 2017-05-14T15:41:56 | Python | UTF-8 | Python | false | false | 41,723 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.abstract.ace import ImplicitAPI
from ccxt.base.types import OrderSide
from ccxt.base.types import OrderType
from typing import Optional
from typing import List
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import AuthenticationError
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class ace(Exchange, ImplicitAPI):
def describe(self):
return self.deep_extend(super(ace, self).describe(), {
'id': 'ace',
'name': 'ACE',
'countries': ['TW'], # Taiwan
'version': 'v2',
'rateLimit': 100,
'pro': False,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'cancelAllOrders': False,
'cancelOrder': True,
'cancelOrders': False,
'createOrder': True,
'editOrder': False,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchClosedOrders': False,
'fetchCurrencies': False,
'fetchDepositAddress': False,
'fetchDeposits': False,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchMarginMode': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': False,
'fetchOrderTrades': True,
'fetchPositionMode': False,
'fetchPositions': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': False,
'fetchTrades': False,
'fetchTradingFee': False,
'fetchTradingFees': False,
'fetchTransactionFees': False,
'fetchTransactions': False,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawal': False,
'fetchWithdrawals': False,
'setLeverage': False,
'setMarginMode': False,
'transfer': False,
'withdraw': False,
'ws': False,
},
'timeframes': {
'1m': 1,
'5m': 5,
'10m': 10,
'30m': 10,
'1h': 60,
'2h': 120,
'4h': 240,
'8h': 480,
'12h': 720,
'1d': 24,
'1w': 70,
'1M': 31,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/216908003-fb314cf6-e66e-471c-b91d-1d86e4baaa90.jpg',
'api': {
'public': 'https://ace.io/polarisex',
'private': 'https://ace.io/polarisex/open',
},
'www': 'https://ace.io/',
'doc': [
'https://github.com/ace-exchange/ace-offical-api-docs',
],
'fees': 'https://helpcenter.ace.io/hc/zh-tw/articles/360018609132-%E8%B2%BB%E7%8E%87%E8%AA%AA%E6%98%8E',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'api': {
'public': {
'get': [
'oapi/v2/list/tradePrice',
'oapi/v2/list/marketPair',
'open/v2/public/getOrderBook',
],
},
'private': {
'post': [
'v2/coin/customerAccount',
'v2/kline/getKline',
'v2/order/order',
'v2/order/cancel',
'v2/order/getOrderList',
'v2/order/showOrderStatus',
'v2/order/showOrderHistory',
'v2/order/getTradeList',
],
},
},
'fees': {
'trading': {
'percentage': True,
'maker': self.parse_number('0.0005'),
'taker': self.parse_number('0.001'),
},
},
'options': {
'brokerId': 'ccxt',
},
'precisionMode': TICK_SIZE,
'exceptions': {
'exact': {
'2003': InvalidOrder,
'2004': InvalidOrder,
'2005': InvalidOrder,
'2021': InsufficientFunds,
'2036': InvalidOrder,
'2039': InvalidOrder,
'2053': InvalidOrder,
'2061': BadRequest,
'2063': InvalidOrder,
'9996': BadRequest,
'10012': AuthenticationError,
'20182': AuthenticationError,
'20183': InvalidOrder,
},
'broad': {
},
},
'commonCurrencies': {
},
})
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for ace
see https://github.com/ace-exchange/ace-official-api-docs/blob/master/api_v2.md#oapi-api---market-pair
:param dict [params]: extra parameters specific to the exchange api endpoint
:returns dict[]: an array of objects representing market data
"""
response = await self.publicGetOapiV2ListMarketPair()
#
# [
# {
# "symbol":"BTC/USDT",
# "base":"btc",
# "baseCurrencyId": "122"
# "quote":"usdt",
# "basePrecision":"8",
# "quotePrecision":"5",
# "minLimitBaseAmount":"0.1",
# "maxLimitBaseAmount":"480286"
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
base = self.safe_string(market, 'base')
baseCode = self.safe_currency_code(base)
quote = self.safe_string(market, 'quote')
quoteCode = self.safe_currency_code(quote)
symbol = base + '/' + quote
result.append({
'id': self.safe_string(market, 'symbol'),
'uppercaseId': None,
'symbol': symbol,
'base': baseCode,
'baseId': self.safe_integer(market, 'baseCurrencyId'),
'quote': quoteCode,
'quoteId': self.safe_integer(market, 'quoteCurrencyId'),
'settle': None,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'derivative': False,
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'limits': {
'amount': {
'min': self.safe_number(market, 'minLimitBaseAmount'),
'max': self.safe_number(market, 'maxLimitBaseAmount'),
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
'leverage': {
'min': None,
'max': None,
},
},
'precision': {
'price': self.parse_number(self.parse_precision(self.safe_string(market, 'quotePrecision'))),
'amount': self.parse_number(self.parse_precision(self.safe_string(market, 'basePrecision'))),
},
'active': None,
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
#
# {
# "base_volume":229196.34035399999,
# "last_price":11881.06,
# "quote_volume":19.2909
# }
#
marketId = self.safe_string(ticker, 'id')
symbol = self.safe_symbol(marketId, market)
return self.safe_ticker({
'symbol': symbol,
'timestamp': None,
'datetime': None,
'high': None,
'low': None,
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': None,
'close': self.safe_string(ticker, 'last_price'),
'last': self.safe_string(ticker, 'last_price'),
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_string(ticker, 'base_volume'),
'quoteVolume': self.safe_string(ticker, 'quote_volume'),
'info': ticker,
}, market)
async def fetch_ticker(self, symbol: str, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
see https://github.com/ace-exchange/ace-official-api-docs/blob/master/api_v2.md#oapi-api---trade-data
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict [params]: extra parameters specific to the ace api endpoint
:returns dict: a `ticker structure <https://github.com/ccxt/ccxt/wiki/Manual#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetOapiV2ListTradePrice(params)
marketId = market['id']
ticker = self.safe_value(response, marketId, {})
#
# {
# "BTC/USDT":{
# "base_volume":229196.34035399999,
# "last_price":11881.06,
# "quote_volume":19.2909
# }
# }
#
return self.parse_ticker(ticker, market)
async def fetch_tickers(self, symbols: Optional[List[str]] = None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
see https://github.com/ace-exchange/ace-official-api-docs/blob/master/api_v2.md#oapi-api---trade-data
:param str[]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict [params]: extra parameters specific to the ace api endpoint
:returns dict: a dictionary of `ticker structures <https://github.com/ccxt/ccxt/wiki/Manual#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetOapiV2ListTradePrice()
#
# {
# "BTC/USDT":{
# "base_volume":229196.34035399999,
# "last_price":11881.06,
# "quote_volume":19.2909
# }
# }
#
tickers = []
pairs = list(response.keys())
for i in range(0, len(pairs)):
marketId = pairs[i]
market = self.safe_market(marketId)
rawTicker = self.safe_value(response, marketId)
ticker = self.parse_ticker(rawTicker, market)
tickers.append(ticker)
return self.filter_by_array(tickers, 'symbol', symbols)
async def fetch_order_book(self, symbol: str, limit: Optional[int] = None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
see https://github.com/ace-exchange/ace-official-api-docs/blob/master/api_v2.md#open-api---order-books
:param str symbol: unified symbol of the market to fetch the order book for
:param int [limit]: the maximum amount of order book entries to return
:param dict [params]: extra parameters specific to the ace api endpoint
:returns dict: A dictionary of `order book structures <https://github.com/ccxt/ccxt/wiki/Manual#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
market = self.market(symbol)
request = {
'quoteCurrencyId': market['quoteId'],
'baseCurrencyId': market['baseId'],
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetOpenV2PublicGetOrderBook(self.extend(request, params))
#
# {
# "attachment": {
# "baseCurrencyId": "2",
# "quoteCurrencyId": "14",
# "baseCurrencyName": "BTC",
# "quoteCurrencyName": "USDT",
# "bids": [
# [
# "0.0009",
# "19993.53"
# ],
# [
# "0.001",
# "19675.33"
# ],
# [
# "0.001",
# "19357.13"
# ]
# ],
# "asks": [
# [
# "0.001",
# "20629.92"
# ],
# [
# "0.001",
# "20948.12"
# ]
# ]
# },
# "message": null,
# "parameters": null,
# "status": 200
# }
#
orderBook = self.safe_value(response, 'attachment')
return self.parse_order_book(orderBook, market['symbol'], None, 'bids', 'asks')
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "changeRate": 0,
# "volume": 0,
# "closePrice": 101000.0,
# "lowPrice": 101000.0,
# "highPrice": 101000.0,
# "highPrice": 1573195740000L,
# "openPrice": 101000.0,
# "current": 101000.0,
# "currentTime": "2019-11-08 14:49:00",
# "createTime": "2019-11-08 14:49:00"
# }
#
dateTime = self.safe_string(ohlcv, 'createTime')
timestamp = self.parse8601(dateTime)
if timestamp is not None:
timestamp = timestamp - 28800000 # 8 hours
return [
timestamp,
self.safe_number(ohlcv, 'openPrice'),
self.safe_number(ohlcv, 'highPrice'),
self.safe_number(ohlcv, 'lowPrice'),
self.safe_number(ohlcv, 'closePrice'),
self.safe_number(ohlcv, 'volume'),
]
async def fetch_ohlcv(self, symbol: str, timeframe='1m', since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
see https://github.com/ace-exchange/ace-official-api-docs/blob/master/api_v2.md#open-api---klinecandlestick-data
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int [since]: timestamp in ms of the earliest candle to fetch
:param int [limit]: the maximum amount of candles to fetch
:param dict [params]: extra parameters specific to the ace api endpoint
:returns int[][]: A list of candles ordered, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'duration': self.timeframes[timeframe],
'quoteCurrencyId': market['quoteId'],
'baseCurrencyId': market['baseId'],
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['startTime'] = since
response = await self.privatePostV2KlineGetKline(self.extend(request, params))
data = self.safe_value(response, 'attachment', [])
#
# {
# "attachment":[
# {
# "changeRate": 0,
# "closePrice": 101000.0,
# "volume": 0,
# "lowPrice": 101000.0,
# "highPrice": 101000.0,
# "highPrice": 1573195740000L,
# "openPrice": 101000.0,
# "current": 101000.0,
# "currentTime": "2019-11-08 14:49:00",
# "createTime": "2019-11-08 14:49:00"
# }
# ]
# }
#
return self.parse_ohlcvs(data, market, timeframe, since, limit)
def parse_order_status(self, status):
statuses = {
'0': 'open',
'1': 'open',
'2': 'closed',
'4': 'canceled',
'5': 'canceled',
}
return self.safe_string(statuses, status, None)
def parse_order(self, order, market=None):
#
# createOrder
# "15697850529570392100421100482693"
#
# fetchOpenOrders
# {
# "uid": 0,
# "orderNo": "16113081376560890227301101413941",
# "orderTime": "2021-01-22 17:35:37",
# "orderTimeStamp": 1611308137656,
# "baseCurrencyId": 1,
# "baseCurrencyName": "TWD",
# "quoteCurrencyId": 14,
# "quoteCurrencyName": "USDT",
# "buyOrSell": "1",
# "num": "6.0000000000000000",
# "price": "32.5880000000000000",
# "remainNum": "2.0000000000000000",
# "tradeNum": "4.0000000000000000",
# "tradePrice": "31.19800000000000000000",
# "tradeAmount": "124.7920000000000000",
# "tradeRate": "0.66666666666666666667",
# "status": 1,
# "type": 1
# }
#
id = None
timestamp = None
symbol = None
price = None
amount = None
side = None
type = None
status = None
filled = None
remaining = None
average = None
if isinstance(order, str):
id = order
else:
id = self.safe_string(order, 'orderNo')
timestamp = self.safe_integer(order, 'orderTimeStamp')
if timestamp is None:
dateTime = self.safe_string(order, 'orderTime')
if dateTime is not None:
timestamp = self.parse8601(dateTime)
timestamp = timestamp - 28800000 # 8 hours
orderSide = self.safe_number(order, 'buyOrSell')
if orderSide is not None:
side = 'buy' if (orderSide == 1) else 'sell'
amount = self.safe_string(order, 'num')
price = self.safe_string(order, 'price')
quoteId = self.safe_string(order, 'quoteCurrencyName')
baseId = self.safe_string(order, 'baseCurrencyName')
if quoteId is not None and baseId is not None:
symbol = baseId + '/' + quoteId
orderType = self.safe_number(order, 'type')
if orderType is not None:
type = 'limit' if (orderType == 1) else 'market'
filled = self.safe_string(order, 'tradeNum')
remaining = self.safe_string(order, 'remainNum')
status = self.parse_order_status(self.safe_string(order, 'status'))
average = self.safe_string(order, 'averagePrice')
return self.safe_order({
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'amount': amount,
'cost': None,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
'trades': None,
'info': order,
}, market)
async def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):
"""
create a trade order
see https://github.com/ace-exchange/ace-official-api-docs/blob/master/api_v2.md#open-api---new-order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float [price]: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict [params]: extra parameters specific to the ace api endpoint
:returns dict: an `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
orderType = type.upper()
orderSide = side.upper()
request = {
'baseCurrencyId': market['baseId'],
'quoteCurrencyId': market['quoteId'],
'type': 1 if (orderType == 'LIMIT') else 2,
'buyOrSell': 1 if (orderSide == 'BUY') else 2,
'num': self.amount_to_precision(symbol, amount),
}
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
response = await self.privatePostV2OrderOrder(self.extend(request, params))
#
# {
# "attachment": "15697850529570392100421100482693",
# "message": null,
# "parameters": null,
# "status": 200
# }
#
data = self.safe_value(response, 'attachment')
return self.parse_order(data, market)
async def cancel_order(self, id: str, symbol: Optional[str] = None, params={}):
"""
cancels an open order
see https://github.com/ace-exchange/ace-official-api-docs/blob/master/api_v2.md#open-api---cancel-order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict [params]: extra parameters specific to the ace api endpoint
:returns dict: An `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
await self.load_markets()
request = {
'orderNo': id,
}
response = await self.privatePostV2OrderCancel(self.extend(request, params))
#
# {
# "attachment": 200,
# "message": null,
# "parameters": null,
# "status": 200
# }
#
return response
async def fetch_order(self, id: str, symbol: Optional[str] = None, params={}):
"""
fetches information on an order made by the user
see https://github.com/ace-exchange/ace-official-api-docs/blob/master/api_v2.md#open-api---order-status
:param str symbol: unified symbol of the market the order was made in
:param dict [params]: extra parameters specific to the ace api endpoint
:returns dict: An `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
await self.load_markets()
request = {
'orderNo': id,
}
response = await self.privatePostV2OrderShowOrderStatus(self.extend(request, params))
#
# {
# "attachment": {
# "buyOrSell": 1,
# "averagePrice": "490849.75000000",
# "num": "0.00000000",
# "orderTime": "2022-11-29 18:03:06.318",
# "price": "490849.75000000",
# "status": 4,
# "tradeNum": "0.02697000",
# "remainNum": "0.97303000",
# "baseCurrencyId": 2,
# "baseCurrencyName": "BTC",
# "quoteCurrencyId": 1,
# "quoteCurrencyName": "TWD",
# "orderNo": "16697161898600391472461100244406"
# },
# "message": null,
# "parameters": null,
# "status": 200
# }
#
data = self.safe_value(response, 'attachment')
return self.parse_order(data, None)
async def fetch_open_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all unfilled currently open orders
see https://github.com/ace-exchange/ace-official-api-docs/blob/master/api_v2.md#open-api---order-list
:param str symbol: unified market symbol of the market orders were made in
:param int [since]: the earliest time in ms to fetch orders for
:param int [limit]: the maximum number of orde structures to retrieve
:param dict [params]: extra parameters specific to the ace api endpoint
:returns Order[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders() requires the symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'quoteCurrencyId': market['quoteId'],
'baseCurrencyId': market['baseId'],
# 'start': 0,
}
if limit is not None:
request['size'] = limit
response = await self.privatePostV2OrderGetOrderList(self.extend(request, params))
orders = self.safe_value(response, 'attachment')
#
# {
# "attachment": [
# {
# "uid": 0,
# "orderNo": "16113081376560890227301101413941",
# "orderTime": "2021-01-22 17:35:37",
# "orderTimeStamp": 1611308137656,
# "baseCurrencyId": 1,
# "baseCurrencyName": "TWD",
# "quoteCurrencyId": 14,
# "quoteCurrencyName": "USDT",
# "buyOrSell": "1",
# "num": "6.0000000000000000",
# "price": "32.5880000000000000",
# "remainNum": "2.0000000000000000",
# "tradeNum": "4.0000000000000000",
# "tradePrice": "31.19800000000000000000",
# "tradeAmount": "124.7920000000000000",
# "tradeRate": "0.66666666666666666667",
# "status": 1,
# "type": 1
# }
# ],
# "message": null,
# "parameters": null,
# "status": 200
# }
#
return self.parse_orders(orders, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchOrderTrades
# {
# "amount": 0.0030965,
# "tradeNo": "15681920522485652100751000417788",
# "price": "0.03096500",
# "num": "0.10000000",
# "bi": 1,
# "time": "2019-09-11 16:54:12.248"
# }
#
# fetchMyTrades
# {
# "buyOrSell": 1,
# "orderNo": "16708156853695560053601100247906",
# "num": "1",
# "price": "16895",
# "orderAmount": "16895",
# "tradeNum": "0.1",
# "tradePrice": "16895",
# "tradeAmount": "1689.5",
# "fee": "0",
# "feeSave": "0",
# "status": 1,
# "isSelf": False,
# "tradeNo": "16708186395087940051961000274150",
# "tradeTime": "2022-12-12 12:17:19",
# "tradeTimestamp": 1670818639508,
# "quoteCurrencyId": 14,
# "quoteCurrencyName": "USDT",
# "baseCurrencyId": 2,
# "baseCurrencyName": "BTC"
# }
id = self.safe_string(trade, 'tradeNo')
price = self.safe_string(trade, 'price')
amount = self.safe_string(trade, 'num')
timestamp = self.safe_integer(trade, 'tradeTimestamp')
if timestamp is None:
datetime = self.safe_string_2(trade, 'time', 'tradeTime')
timestamp = self.parse8601(datetime)
timestamp = timestamp - 28800000 # 8 hours normalize timestamp
symbol = market['symbol']
quoteId = self.safe_string(trade, 'quoteCurrencyName')
baseId = self.safe_string(trade, 'baseCurrencyName')
if quoteId is not None and baseId is not None:
symbol = baseId + '/' + quoteId
side = None
tradeSide = self.safe_number(trade, 'buyOrSell')
if tradeSide is not None:
side = 'buy' if (tradeSide == 1) else 'sell'
feeString = self.safe_string(trade, 'fee')
fee = None
if feeString is not None:
feeSaveString = self.safe_string(trade, 'feeSave')
fee = {
'cost': Precise.string_sub(feeString, feeSaveString),
'currency': quoteId,
}
return self.safe_trade({
'info': trade,
'id': id,
'order': self.safe_string(trade, 'orderNo'),
'symbol': symbol,
'side': side,
'type': None,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': None,
'fee': fee,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
}, market)
async def fetch_order_trades(self, id: str, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all the trades made from a single order
see https://github.com/ace-exchange/ace-official-api-docs/blob/master/api_v2.md#open-api---order-history
:param str id: order id
:param str symbol: unified market symbol
:param int [since]: the earliest time in ms to fetch trades for
:param int [limit]: the maximum number of trades to retrieve
:param dict [params]: extra parameters specific to the ace api endpoint
:returns dict[]: a list of `trade structures <https://github.com/ccxt/ccxt/wiki/Manual#trade-structure>`
"""
await self.load_markets()
market = self.safe_market(symbol)
request = {
'orderNo': id,
}
response = await self.privatePostV2OrderShowOrderHistory(self.extend(request, params))
#
# {
# "attachment": {
# "order": {
# "buyOrSell": 1,
# "averagePrice": "491343.74000000",
# "num": "1.00000000",
# "orderTime": "2022-11-29 18:32:22.232",
# "price": "491343.74000000",
# "status": 1,
# "tradeNum": "0.01622200",
# "remainNum": "0.98377800",
# "baseCurrencyId": 2,
# "baseCurrencyName": "BTC",
# "quoteCurrencyId": 1,
# "quoteCurrencyName": "TWD",
# "orderNo": "16697179457740441472471100214402"
# },
# "trades": [
# {
# "price": "491343.74000000",
# "num": "0.01622200",
# "time": "2022-11-29 18:32:25.789",
# "tradeNo": "16697179457897791471461000223437",
# "amount": "7970.57815028"
# }
# ]
# },
# "message": null,
# "parameters": null,
# "status": 200
# }
#
data = self.safe_value(response, 'attachment')
trades = self.safe_value(data, 'trades')
if trades is None:
return trades
return self.parse_trades(trades, market, since, limit)
async def fetch_my_trades(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all trades made by the user
see https://github.com/ace-exchange/ace-official-api-docs/blob/master/api_v2.md#open-api---trade-list
:param str symbol: unified symbol of the market to fetch trades for
:param int [since]: timestamp in ms of the earliest trade to fetch
:param int [limit]: the maximum amount of trades to fetch
:param dict [params]: extra parameters specific to the ace api endpoint
:returns Trade[]: a list of `trade structures <https://github.com/ccxt/ccxt/wiki/Manual#public-trades>`
"""
await self.load_markets()
market = self.safe_market(symbol)
request = {
# 'buyOrSell': 1,
# 'start': 0,
}
if market['id'] is not None:
request['quoteCurrencyId'] = market['quoteId']
request['baseCurrencyId'] = market['baseId']
if limit is not None:
request['size'] = limit # default 10, max 500
response = await self.privatePostV2OrderGetTradeList(self.extend(request, params))
#
# {
# "attachment": [
# {
# "buyOrSell": 1,
# "orderNo": "16708156853695560053601100247906",
# "num": "1",
# "price": "16895",
# "orderAmount": "16895",
# "tradeNum": "0.1",
# "tradePrice": "16895",
# "tradeAmount": "1689.5",
# "fee": "0",
# "feeSave": "0",
# "status": 1,
# "isSelf": False,
# "tradeNo": "16708186395087940051961000274150",
# "tradeTime": "2022-12-12 12:17:19",
# "tradeTimestamp": 1670818639508,
# "quoteCurrencyId": 14,
# "quoteCurrencyName": "USDT",
# "baseCurrencyId": 2,
# "baseCurrencyName": "BTC"
# }
# ],
# "message": null,
# "parameters": null,
# "status": 200
# }
#
trades = self.safe_value(response, 'attachment', [])
return self.parse_trades(trades, market, since, limit)
def parse_balance(self, response):
#
# [
# {
# "currencyId": 4,
# "amount": 6.896,
# "cashAmount": 6.3855,
# "uid": 123,
# "currencyName": "BTC"
# }
# ]
#
result = {
'info': response,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currencyName')
code = self.safe_currency_code(currencyId)
amount = self.safe_string(balance, 'amount')
available = self.safe_string(balance, 'cashAmount')
account = {
'free': available,
'total': amount,
}
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
see https://github.com/ace-exchange/ace-official-api-docs/blob/master/api_v2.md#open-api---account-balance
:param dict [params]: extra parameters specific to the ace api endpoint
:returns dict: a `balance structure <https://github.com/ccxt/ccxt/wiki/Manual#balance-structure>`
"""
await self.load_markets()
response = await self.privatePostV2CoinCustomerAccount(params)
balances = self.safe_value(response, 'attachment', [])
#
# {
# "attachment":[
# {
# "currencyId": 4,
# "amount": 6.896,
# "cashAmount": 6.3855,
# "uid": 123,
# "currencyName": "BTC"
# }
# ],
# message: null,
# parameters: null,
# status: '200'
# }
#
return self.parse_balance(balances)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if headers is None:
headers = {}
if api == 'private':
self.check_required_credentials()
nonce = self.milliseconds()
auth = 'ACE_SIGN' + self.secret
data = self.extend({
'apiKey': self.apiKey,
'timeStamp': nonce,
}, params)
dataKeys = list(data.keys())
sortedDataKeys = self.sort_by(dataKeys, 0)
for i in range(0, len(sortedDataKeys)):
key = sortedDataKeys[i]
auth += self.safe_string(data, key)
signature = self.hash(self.encode(auth), 'sha256', 'hex')
data['signKey'] = signature
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
if method == 'POST':
brokerId = self.safe_string(self.options, 'brokerId')
if brokerId is not None:
headers['Referer'] = brokerId
body = self.urlencode(data)
elif api == 'public' and method == 'GET':
if query:
url += '?' + self.urlencode(query)
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return None # fallback to the default error handler
feedback = self.id + ' ' + body
status = self.safe_number(response, 'status', 200)
if status > 200:
self.throw_exactly_matched_exception(self.exceptions['exact'], status, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], status, feedback)
return None
| [
"[email protected]"
] | |
66c716da60208194ea298347d9a3cd37cde9cdbe | 51e6015db62fd30ff9eaa724926e8373aedf796e | /custom_components/zhibot/chatbot.py | f071c55cac18fde4f0e9057457baed88c9e1df32 | [] | no_license | xxx2016/HAExtra | ed0cb3c0a5876c11894bde8a96fde31ca8a9e3a5 | e71dc33f51455e2d91ab2d7eec39a931d06847d9 | refs/heads/master | 2023-02-24T04:41:38.779325 | 2021-01-25T13:04:12 | 2021-01-25T13:04:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,692 | py |
from homeassistant.components.http import HomeAssistantView
from homeassistant.util.json import load_json, save_json
# from homeassistant.components.http import KEY_HASS
# Logging
import logging
_LOGGER = logging.getLogger(__name__)
class chatbotView(HomeAssistantView):
"""View to handle Configuration requests."""
def __init__(self, hass, conf):
self.name = self.__class__.__name__.rstrip('View').lower()
self.url = '/' + self.name
self.requires_auth = False
self.hass = hass
self.password = conf.get('password')
if self.password is None: # Auth: config UI confirmation, intead of pre shared password
self._configuring = None
self.conf = load_json(hass.config.path('.' + self.name))
if not self.conf:
self.conf = []
async def post(self, request):
try:
# request[KEY_REAL_IP]
# request.app[KEY_HASS]
data = await request.json()
_LOGGER.debug("REQUEST: %s", data)
answer = await self.handle(data) if self.check(request, data) else "没有访问授权!"
except:
import traceback
_LOGGER.error(traceback.format_exc())
answer = "程序出错啦!"
_LOGGER.debug("RESPONSE: %s", answer)
return self.json(self.response(answer))
def response(self, answer):
return None
async def handle(self, data):
return "未能处理"
def check(self, request, data):
if self.password is not None:
return self.password == request.query.get('password') or self.password == ''
return self.config(data)
def config(self, data):
configurator = self.hass.components.configurator
if self._configuring:
configurator.async_request_done(self._configuring)
def config_callback(fields):
configurator.request_done(self._configuring)
self._configuring = None
_LOGGER.debug(fields)
if fields.get('agree') == 'ok':
self.config_done(data)
save_json(self.hass.config.path('.' + self.name), self.conf)
self._configuring = configurator.async_request_config(
'智加加', config_callback,
description=self.config_desc(data),
submit_caption='完成',
fields=[{'id': 'agree', 'name': '如果允许访问,请输入“ok”'}],
)
return False
def config_done(self, data):
pass
def config_desc(self, data):
return "授权访问"
| [
"[email protected]"
] | |
46dbd48b6d5e6c8ffb047612a78d868f11973154 | 17ba39d104403a36ecdfe83da0d5424feb3fdf24 | /accounts/serializers.py | 5758bb9f9ed301c9eb631dfdfec009cdfca7f20d | [] | no_license | bellomusodiq/obs | 46bc3bfc316d224c732a8747649016ca2fdf9493 | a207bf51c2e21c10996e53f01e56368b648c7e6e | refs/heads/master | 2020-04-02T21:35:46.813250 | 2018-10-30T11:11:38 | 2018-10-30T11:11:38 | 151,956,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,275 | py | from rest_framework import serializers
from rest_framework_jwt.settings import api_settings
from .models import User, CuponCode
from django.template.loader import render_to_string
from django.core.mail import EmailMessage
import string, random
class UserSerializer(serializers.ModelSerializer):
def gen_token(self):
choices = string.ascii_letters + string.digits + string.hexdigits
gen = ''
for i in range(10):
gen += random.choice(choices)
return gen
class Meta:
model = User
fields = (
'id', 'email', 'username', 'firstname', 'lastname',
'referral_allowance', 'read_allowance', 'comment_allowance', 'referral_code',
'is_admin', 'is_active', 'password', 'cupon_code'
)
read_only_fields = ('last_login','activation_token', 'is_active')
extra_kwargs = {
'password': {'write_only': True},
'referral_allowance': {'read_only': True},
'read_allowance': {'read_only': True},
'referral_code': {'read_only': True},
}
def validate_cupon_code(self, value):
if value in [code.code for code in CuponCode.objects.all()] or value == '':
return value
if(value in [user.referral_code for user in User.objects.filter(is_admin=False)]):
return value
raise serializers.ValidationError('Incorrect cupon code, input a correct cupon code or leave blank')
def create(self, validated_data, *args, **kwargs):
user = User(
username = validated_data['username'],
email = validated_data['email'],
firstname = validated_data['firstname'],
lastname = validated_data['lastname'],
cupon_code = validated_data['cupon_code'],
referral_code = self.gen_token()
)
user.set_password(validated_data['password'])
user.save()
if not user.is_admin:
cupon_code = validated_data['cupon_code']
if(cupon_code in [user.referral_code for user in User.objects.filter(is_admin=False).exclude(pk=user.pk)]):
user_obj = User.objects.get(referral_code=cupon_code)
user_obj.referral_allowance = float(user_obj.referral_allowance) + 200
user_obj.save()
if(cupon_code in [cupon_code.code for cupon_code in CuponCode.objects.all()]):
user.referral_allowance = float(user.referral_allowance) + 500
CuponCode.objects.get(code=cupon_code).delete()
user.save()
return user
class UserLoginSerializer(serializers.ModelSerializer):
token = serializers.CharField(allow_blank=True, read_only=True)
username = serializers.CharField()
class Meta:
fields = [
'username',
'password',
'email',
'token',
'id',
'is_active',
'is_admin',
]
model = User
extra_kwargs = {
"password": {"write_only": True},
"email": {"read_only": True},
"is_active": {"read_only": True},
"is_admin": {"read_only": True},
"id": {"read_only": True},
}
def validate(self, data):
username = data.get('username', None)
password = data['password']
if not username:
raise serializers.ValidationError('A username is required to login')
user = User.objects.get(username=username)
if not user:
raise serializers.ValidationError('the username is not valid')
if(user):
if not user.check_password(password):
raise serializers.ValidationError('Incorrect Credential please try again')
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
payload = jwt_payload_handler(user)
token = jwt_encode_handler(payload)
data['token'] = token
data['id'] = user.id
return data
class CuponCodeSerializer(serializers.ModelSerializer):
class Meta:
model = CuponCode
fields = ['id', 'code']
extra_kwargs = {
"code": {"read_only": True},
} | [
"[email protected]"
] | |
067961039a165f93e34347f56a947b446b17133d | 1117ae9a0bc4bbbe0e505e573af70a9629ec8c45 | /App/models.py | 865bdb8ca8a7b9edfe0f5c7e75a61cbabda2da10 | [] | no_license | Chukslord1/E-LIBRARY | 003eadf124be91e40586f2f6661b5895a93d6a60 | c16f6d7ab2efb2be136251298f28119c2023b19f | refs/heads/master | 2023-01-10T17:43:28.293541 | 2020-11-17T20:27:52 | 2020-11-17T20:27:52 | 267,709,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,724 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Book(models.Model):
title=models.TextField()
isbn = models.IntegerField()
summary= models.TextField()
author = models.TextField()
position=models.CharField(max_length=100)
genre=models.CharField(max_length=100)
language=models.TextField()
total_copies=models.IntegerField()
available_copies=models.IntegerField()
pic=models.ImageField(blank=True, null=True)
review=models.IntegerField()
paginate_by = 2
def __str__(self):
return self.title
class Language(models.Model):
name = models.CharField(max_length=200,
help_text="Enter the book's natural language (e.g. English, French, Japanese etc.)")
code=models.CharField(max_length=200,
help_text="Enter the book's natural language code")
def __str__(self):
return self.name
class Genre(models.Model):
name = models.CharField(max_length=200, help_text="Enter a book genre (e.g. Science Fiction, French Poetry etc.)")
def __str__(self):
return self.name
class Series(models.Model):
name = models.CharField(max_length=200, help_text="Enter a book that is a Series")
def __str__(self):
return self.name
class Book_Allotment(models.Model):
book_title=models.TextField()
book_number=models.IntegerField()
member=models.CharField(max_length=100)
email=models.CharField(max_length=100)
issue_date=models.TextField(null=True,blank=True)
return_date=models.TextField(null=True,blank=True)
book_status=models.TextField(null=True,blank=True)
def __str__(self):
return self.book_title
class Member(models.Model):
full_name=models.TextField()
address=models.TextField()
email=models.CharField(max_length=100)
phone_number=models.IntegerField()
def __str__(self):
return self.full_name
class Publisher(models.Model):
full_name=models.TextField()
email=models.CharField(max_length=100)
def __str__(self):
return self.full_name
class Author(models.Model):
full_name=models.TextField()
email=models.CharField(max_length=100)
def __str__(self):
return self.full_name
class Liberian(models.Model):
user = models.OneToOneField(User, related_name="profile", on_delete=models.CASCADE)
username=models.CharField(max_length=100, null=True,blank=True)
name= models.CharField(max_length=100)
address= models.TextField()
phone_number=models.IntegerField()
class Settings(models.Model):
image=models.ImageField(null=True,blank=True)
name=models.TextField(null=True,blank=True)
| [
"[email protected]"
] | |
74e1f700ad462338166fff4c0f99dcfb6d303a54 | 0a5db329e6ca4690f6f5f84d34ed51c0f54273b4 | /6 Extra 3/Example 1.py | 42e838ef0565d84e028cb1912447b5a8a3e24c4a | [] | no_license | wiput1999/Python101 | b7778e8feacdf95039260ba4e7d149a1fea30304 | 5d0eac78417cc1139f884652c5a6c6995dfb9e22 | refs/heads/master | 2021-01-20T15:31:19.170451 | 2017-04-08T16:03:38 | 2017-04-08T16:03:38 | 90,780,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | num = []
for i in range(10):
num.append(int(input("Num : ")))
for j in range(0,len(num)):
for i in range(0,len(num)-1-j):
if num[i] > num[i+1]:
t = num[i]
num[i] = num[i+1]
num[i+1] = t
print(num) | [
"[email protected]"
] | |
c96f702c3c4d089f96e3879dc22c4ec60f1ad720 | 29c58b3bec6ac0fcdb3070efc118600ee92004da | /test/test_connector_sync_event_dto.py | 488ce7ea7b82b940e80e1efabe7d0b5b32f66c67 | [
"MIT"
] | permissive | mailslurp/mailslurp-client-python | a2b5a0545206714bd4462ae517f242852b52aaf9 | 5c9a7cfdd5ea8bf671928023e7263847353d92c4 | refs/heads/master | 2023-06-23T00:41:36.257212 | 2023-06-14T10:10:14 | 2023-06-14T10:10:14 | 204,662,133 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,387 | py | # coding: utf-8
"""
MailSlurp API
MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://docs.mailslurp.com/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501
The version of the OpenAPI document: 6.5.2
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import mailslurp_client
from mailslurp_client.models.connector_sync_event_dto import ConnectorSyncEventDto # noqa: E501
from mailslurp_client.rest import ApiException
class TestConnectorSyncEventDto(unittest.TestCase):
"""ConnectorSyncEventDto unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ConnectorSyncEventDto
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = mailslurp_client.models.connector_sync_event_dto.ConnectorSyncEventDto() # noqa: E501
if include_optional :
return ConnectorSyncEventDto(
id = '0',
connector_id = '0',
sync_status = 'SUCCESS',
sync_count = 56,
message = '0',
created_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f')
)
else :
return ConnectorSyncEventDto(
id = '0',
connector_id = '0',
sync_status = 'SUCCESS',
sync_count = 56,
created_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
)
def testConnectorSyncEventDto(self):
"""Test ConnectorSyncEventDto"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
b9c545c2775953db809aa333c5571186f061f2f1 | 6080bfbc95ef2e4103fbd9c75c6b30402fe08aa5 | /helpers/ccsm/ccsm2icar.py | 1db50ddfa270a90c8105cc3a3cc03832c34efb97 | [
"MIT"
] | permissive | d-reynolds/HICAR | 0628f2a65922b61e7c68749ccc5b4328fe7c5dec | 0ae97ec4556624bd5fe288420f0dde2f737bf1f8 | refs/heads/master | 2023-05-27T09:55:13.262316 | 2023-03-31T12:43:55 | 2023-03-31T12:43:55 | 284,660,559 | 1 | 1 | MIT | 2020-09-29T14:12:28 | 2020-08-03T09:40:03 | Fortran | UTF-8 | Python | false | false | 1,103 | py | #!/usr/bin/env python
import os,traceback,sys
import config
import io_routines
import output
import convert
def main(info):
for k in info.keys():
if k!="times" and k!="lat_data" and k!="lon_data":
print(k,info[k])
print(info.times[0],info.times[-1])
curtime=info.times[0]
curpos=0
while curtime<=info.end_date:
raw_data=io_routines.load_data(curtime,info)
processed_data=convert.ccsm2icar(raw_data)
output.write_file(curtime,info,processed_data)
curpos+=raw_data.atm.ntimes
curtime=info.times[curpos]
if __name__ == '__main__':
try:
info=config.parse()
config.update_info(info)
exit_code = main(info)
if exit_code is None:
exit_code = 0
sys.exit(exit_code)
except KeyboardInterrupt as e: # Ctrl-C
raise e
except SystemExit as e: # sys.exit()
raise e
except Exception as e:
print('ERROR, UNEXPECTED EXCEPTION')
print(str(e))
traceback.print_exc()
os._exit(1)
| [
"[email protected]"
] | |
21ffe4553a4099601f96bfe38dde4dcef4cce140 | 7a3114bedb5e866fc85fecca44432d1ce60e4262 | /where/postprocessors/__init__.py | e701fda648f7bd0a1601571994da4ea81345f780 | [
"MIT"
] | permissive | kartverket/where | 99f26e5d5f2f23a79921bad0fb60cb8a99d05e7f | 0c8c5c68adca08f97e22cab1bce10e382a7fbf77 | refs/heads/master | 2023-08-31T03:26:23.222100 | 2023-08-30T08:27:07 | 2023-08-30T08:27:07 | 111,802,841 | 21 | 15 | MIT | 2019-02-01T15:42:36 | 2017-11-23T11:44:29 | Python | UTF-8 | Python | false | false | 1,007 | py | """Framework for post-processing data
Description:
------------
Each postprocessor should be defined in a separate .py-file. The function inside the .py-file that should be called
needs to be decorated with the :func:`~midgard.dev.plugins.register` decorator as follows::
from midgard.dev import plugins
@plugins.register
def gnss_linear_combination(dset):
...
"""
# Midgard imports
from midgard.dev import plugins
# Where imports
from where.lib import config
from where.lib import log
def apply_postprocessors(config_key, dset):
"""Apply postprocessors for a given session
Args:
config_key (String): The configuration key listing which postprocessors to apply.
dset (Dataset): Dataset containing analysis data.
"""
prefix = dset.vars["pipeline"]
postprocessors = config.tech[config_key].list
log.info(f"Applying postprocessors")
return plugins.call_all(package_name=__name__, plugins=postprocessors, prefix=prefix, dset=dset)
| [
"[email protected]"
] | |
9e616c90dd516ff508549568cd468a52e1e61faf | 731ebf286a169b5f4dae914bcb0970c2388ba875 | /tigereye/helpers/tetime.py | 1def5631fe0d37df838160507a265594d8ef3325 | [] | no_license | ljxproject/tigereye | f8e86287b03102b713b4179a9fa023f03cfd36ea | 406024d88450b6dcbec7a337a79339ff8c97a3e3 | refs/heads/master | 2020-03-26T08:48:48.595467 | 2018-08-14T13:05:26 | 2018-08-14T13:05:26 | 144,721,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | from datetime import datetime
DEFAULT_DATETIME_FORMAT = '%Y%m%d%H%M%S'
SIMPLE_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
def now():
return datetime.now().strftime(DEFAULT_DATETIME_FORMAT) | [
"[email protected]"
] | |
d18cab078be2ac934e019489d1f9e4c5ac53d096 | d1ef145c7b51b694e59ed26894ef12e1026e9e78 | /data_samples/aio-libs/aiohttp/aiohttp/client_ws.py | 209935da8ab681ee47d6e8d8d77923b20921555d | [] | no_license | rkdls/tensorflow_new_seq2seq | 2afc12b9e28626d351e4849c556a9f2320588a26 | 82ab66a79cc3f6631970ba2b2b349792b1aac7e4 | refs/heads/master | 2021-01-01T15:43:48.902079 | 2017-07-20T17:10:02 | 2017-07-20T17:10:02 | 97,688,888 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,678 | py | 'WebSocket client for asyncio.'
import asyncio
import json
from .client_exceptions import ClientError
from .helpers import PY_35, PY_352, Timeout, call_later, create_future
from .http import WS_CLOSED_MESSAGE, WS_CLOSING_MESSAGE, WebSocketError, WSMessage, WSMsgType
class Class321:
def __init__(self, arg312, arg501, arg693, arg1016, arg1505, arg210, arg101, arg2033, *, receive_timeout=None, heartbeat=None):
self.attribute1244 = arg1016
self.attribute257 = arg1016.connection
self.attribute1822 = arg501
self.attribute459 = arg312
self.attribute668 = arg693
self.attribute10 = False
self.attribute71 = False
self.attribute1314 = None
self.attribute758 = arg1505
self.attribute1580 = receive_timeout
self.attribute1153 = arg210
self.attribute1413 = arg101
self.attribute1388 = heartbeat
self.attribute351 = None
if (heartbeat is not None):
self.attribute1004 = (heartbeat / 2.0)
self.attribute1125 = None
self.attribute650 = arg2033
self.attribute98 = None
self.attribute1713 = None
self.function2113()
def function1265(self):
if (self.attribute1125 is not None):
self.attribute1125.cancel()
self.attribute1125 = None
if (self.attribute351 is not None):
self.attribute351.cancel()
self.attribute351 = None
def function2113(self):
self.function1265()
if (self.attribute1388 is not None):
self.attribute351 = call_later(self.function1157, self.attribute1388, self.attribute650)
def function1157(self):
if ((self.attribute1388 is not None) and (not self.attribute10)):
self.function1918()
if (self.attribute1125 is not None):
self.attribute1125.cancel()
self.attribute1125 = call_later(self.function471, self.attribute1004, self.attribute650)
def function471(self):
if (not self.attribute10):
self.attribute10 = True
self.attribute1314 = 1006
self.attribute1713 = asyncio.TimeoutError()
self.attribute1244.close()
@property
def function2018(self):
return self.attribute10
@property
def function2260(self):
return self.attribute1314
@property
def function1869(self):
return self.attribute668
def function1796(self, arg677, arg1808=None):
'extra info from connection transport'
try:
return self.attribute1244.connection.transport.function1796(arg677, arg1808)
except:
return arg1808
def function1349(self):
return self.attribute1713
def function1918(self, arg1442='b'):
self.attribute1822.function1918(arg1442)
def function2181(self, arg1232='b'):
self.attribute1822.function2181(arg1232)
def function644(self, arg162):
if (not isinstance(arg162, str)):
raise TypeError(('data argument must be str (%r)' % type(arg162)))
return self.attribute1822.send(arg162, binary=False)
def function819(self, arg470):
if (not isinstance(arg470, (bytes, bytearray, memoryview))):
raise TypeError(('data argument must be byte-ish (%r)' % type(arg470)))
return self.attribute1822.send(arg470, binary=True)
def function2205(self, arg1185, *, dumps=json.dumps):
return self.function644(dumps(arg1185))
@asyncio.coroutine
def function2368(self, *, code=1000, message=b''):
if ((self.attribute98 is not None) and (not self.attribute10)):
self.attribute459.feed_data(WS_CLOSING_MESSAGE, 0)
yield from self.attribute98
if (not self.attribute10):
self.function1265()
self.attribute10 = True
try:
self.attribute1822.function2368(code, message)
except asyncio.CancelledError:
self.attribute1314 = 1006
self.attribute1244.function2368()
raise
except Exception as var525:
self.attribute1314 = 1006
self.attribute1713 = var525
self.attribute1244.function2368()
return True
if self.attribute71:
self.attribute1244.function2368()
return True
while True:
try:
with Timeout(self.attribute758, loop=self.attribute650):
var52 = yield from self.attribute459.read()
except asyncio.CancelledError:
self.attribute1314 = 1006
self.attribute1244.function2368()
raise
except Exception as var525:
self.attribute1314 = 1006
self.attribute1713 = var525
self.attribute1244.function2368()
return True
if (var52.type == WSMsgType.CLOSE):
self.attribute1314 = var52.data
self.attribute1244.function2368()
return True
else:
return False
@asyncio.coroutine
def function1959(self, arg1248=None):
while True:
if (self.attribute98 is not None):
raise RuntimeError('Concurrent call to receive() is not allowed')
if self.attribute10:
return WS_CLOSED_MESSAGE
elif self.attribute71:
yield from self.function2368()
return WS_CLOSED_MESSAGE
try:
self.attribute98 = create_future(self.attribute650)
try:
with Timeout((timeout or self.attribute1580), loop=self.attribute650):
var52 = yield from self.attribute459.read()
self.function2113()
finally:
var2137 = self.attribute98
self.attribute98 = None
var2137.set_result(True)
except (asyncio.CancelledError, asyncio.TimeoutError):
self.attribute1314 = 1006
raise
except ClientError:
self.attribute10 = True
self.attribute1314 = 1006
return WS_CLOSED_MESSAGE
except WebSocketError as var525:
self.attribute1314 = var525.code
yield from self.function2368(code=var525.code)
return WSMessage(WSMsgType.ERROR, var525, None)
except Exception as var525:
self.attribute1713 = var525
self.attribute71 = True
self.attribute1314 = 1006
yield from self.function2368()
return WSMessage(WSMsgType.ERROR, var525, None)
if (var52.type == WSMsgType.CLOSE):
self.attribute71 = True
self.attribute1314 = var52.data
if ((not self.attribute10) and self.attribute1153):
yield from self.function2368()
elif (var52.type == WSMsgType.CLOSING):
self.attribute71 = True
elif ((var52.type == WSMsgType.PING) and self.attribute1413):
self.function2181(var52.data)
continue
elif ((var52.type == WSMsgType.PONG) and self.attribute1413):
continue
return var52
@asyncio.coroutine
def function858(self, *, timeout=None):
var52 = yield from self.function1959(arg1248)
if (var52.type != WSMsgType.TEXT):
raise TypeError('Received message {}:{!r} is not str'.format(var52.type, var52.data))
return var52.data
@asyncio.coroutine
def function2267(self, *, timeout=None):
var52 = yield from self.function1959(arg1248)
if (var52.type != WSMsgType.BINARY):
raise TypeError('Received message {}:{!r} is not bytes'.format(var52.type, var52.data))
return var52.data
@asyncio.coroutine
def function1176(self, *, loads=json.loads, timeout=None):
var2727 = yield from self.function858(timeout=arg1248)
return loads(var2727)
if PY_35:
def __aiter__(self):
return self
if (not PY_352):
var1487 = asyncio.coroutine(var1487)
@asyncio.coroutine
def __anext__(self):
var52 = yield from self.function1959()
if (var52.type in (WSMsgType.CLOSE, WSMsgType.CLOSING, WSMsgType.CLOSED)):
raise StopAsyncIteration
return var52 | [
"[email protected]"
] | |
37ce0dc192e7a51cb2d91c3c8bbc9b94d5546a56 | 16ad791ae0fcf9b00fb3f3938e3e69fd86d91715 | /solved/probs050-099/euler055.py | fd7e12947d84d15f28aec0927a223aa37fab9937 | [] | no_license | chrisgilmerproj/project_euler | 9a6cf051ddc1882d803531cb02cc356a94d9bdf4 | 5a2c72ae40cfff32b79b35bb93db2b93a84afc25 | refs/heads/master | 2020-05-07T16:44:22.052645 | 2011-05-03T15:35:34 | 2011-05-03T15:35:34 | 1,447,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,772 | py | # This is a problem from the Project Euler Website
# http://projecteuler.net/
#
# Euler Problem #055
#
# Problem: How many Lychrel numbers are there below ten-thousand?
#
# Hint: If we take 47, reverse and add, 47 + 74 = 121, which is palindromic.
#
# Not all numbers produce palindromes so quickly. For example,
#
# 349 + 943 = 1292
# 1292 + 2921 = 4213
# 4213 + 3124 = 7337
#
# That is, 349 took three iterations to arrive at a palindrome.
#
# Although no one has proved it yet, it is thought that some numbers,
# like 196, never produce a palindrome. A number that never forms a
# palindrome through the reverse and add process is called a Lychrel
# number. Due to the theoretical nature of these numbers, and for the
# purpose of this problem, we shall assume that a number is Lychrel
# until proven otherwise. In addition you are given that for every
# number below ten-thousand, it will either (i) become a palindrome
# in less than fifty iterations, or, (ii) no one, with all the
# computing power that exists, has managed so far to map it to a
# palindrome. In fact, 10677 is the first number to be shown to
# require over fifty iterations before producing a palindrome:
# 4668731596684224866951378664 (53 iterations, 28-digits).
#
# Surprisingly, there are palindromic numbers that are themselves
# Lychrel numbers; the first example is 4994.
#
# NOTE: Wording was modified slightly on 24 April 2007 to emphasise
# the theoretical nature of Lychrel numbers.
#
# Written by Chris Gilmer
# Solved: 12/08/2008
# Answer: 249
#
# Notes:
if __name__ == '__main__':
i = 1
limit = 10000
lychrel = []
while i < limit:
palindrome = False
count = 1
total = i
print i
while palindrome == False and count < 50:
new_i = total
reverse_i = list(str(new_i))
reverse_i.reverse()
reverse_i = int(str(''.join(reverse_i)))
total = new_i + reverse_i
print "\t%s + %s = %s" % (new_i, reverse_i, total)
str_total = list(str(total))
reverse_total = list(str(total))
reverse_total.reverse()
if str_total == reverse_total:
palindrome = True
count += 1
if count == 50:
lychrel.append(i)
print "\n\tLychrel number:", i
i += 1
print '\nThere are %s Lychrel numbers below %s' % (len(lychrel),limit)
print 'These numbers are:', lychrel
| [
"[email protected]"
] | |
501225b7b62991c2bb7a453bcb123b336846959d | 8b9a418950a8c3ee42e4a4692a0f690c033ba401 | /emulators/csp_vis_sender_02/app/__main__.py | e9dfdda4a5725883e6a15b577f3cddae1c04009f | [
"BSD-3-Clause"
] | permissive | jan2nov/integration-prototype | da5b0f8b168365856dabb644bd1d2440ebced9e8 | 5b4db822b0d49ab45d10365d5c7aaa86954dc2e0 | refs/heads/master | 2020-03-20T14:54:23.937780 | 2018-06-04T12:09:21 | 2018-06-04T12:09:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,006 | py | # -*- coding: utf-8 -*-
"""Module main to stream SPEAD visibility data."""
import sys
import argparse
import logging
import json
from .simulator import SimpleSimulator
def _init_log(level=logging.DEBUG):
"""Initialise the logging object.
Args:
level (int): Logging level.
Returns:
Logger: Python logging object.
"""
log = logging.getLogger(__file__)
log.setLevel(level)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(level)
formatter = logging.Formatter('%(asctime)s: %(message)s',
'%Y/%m/%d-%H:%M:%S')
ch.setFormatter(formatter)
log.addHandler(ch)
return log
def _parse_command_line():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
prog='csp_visibility_sender',
description='Send fake visibility data using the SPEAD protocol.')
parser.add_argument('config_file', type=argparse.FileType('r'),
help='JSON configuration file.')
parser.add_argument('-v', '--verbose', help='Enable verbose messages.',
action='store_true')
parser.add_argument('-p', '--print_settings', help='Print settings file.',
action='store_true')
return parser.parse_args()
def main(config, log):
"""Main script function"""
# Create simulation object, and start streaming SPEAD heaps
sim = SimpleSimulator(config, log)
sim.simulate_heaps()
if __name__ == '__main__':
# Parse command line arguments
args = _parse_command_line()
# Initialise logging.
_log = _init_log(level=logging.DEBUG if args.verbose else logging.INFO)
# Load configuration.
_log.info('Loading config: {}'.format(args.config_file.name))
_config = json.load(args.config_file)
if args.print_settings:
_log.debug('Settings:\n {}'.format(json.dumps(_config, indent=4,
sort_keys=True)))
main(_config, _log)
| [
"[email protected]"
] | |
bf6dd2083475268a966f43c0bed20eeff4672592 | 944401a6292baa2d23b9738898e0b0cb199d0795 | /lib/python2.7/site-packages/anaconda_navigator/widgets/lists/apps.py | 00f63fc959dcd66ca504c9d506d11bbb66603fd5 | [
"Python-2.0"
] | permissive | sunnyweilai/Finding-Theme-Color-Palettes | cc84c93ce58abdd1802431c41bd59181d7a4f75b | 4c38b112f5c40b43d6ec126e415b609c7fdc1f39 | refs/heads/master | 2022-12-21T09:41:31.187411 | 2019-04-30T14:50:17 | 2019-04-30T14:50:17 | 184,273,925 | 1 | 0 | null | 2022-12-07T03:46:55 | 2019-04-30T14:09:52 | Python | UTF-8 | Python | false | false | 22,134 | py | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2016-2017 Anaconda, Inc.
#
# May be copied and distributed freely only as part of an Anaconda or
# Miniconda installation.
# -----------------------------------------------------------------------------
"""
Widgets to list applications available to launch from the Home tab.
This widget does not perform the actual conda actions or command launch, but it
emits signals that should be connected to the parents and final controller on
the main window.
"""
# yapf: disable
from __future__ import absolute_import, division, print_function
# Standard library imports
import sys
# Third party imports
from qtpy.QtCore import QPoint, QSize, Qt, QTimer, Signal
from qtpy.QtGui import QPixmap
from qtpy.QtWidgets import QHBoxLayout, QListWidget, QMenu, QVBoxLayout
# Local imports
from anaconda_navigator.api.anaconda_api import AnacondaAPI
from anaconda_navigator.config import GLOBAL_VSCODE_APP
from anaconda_navigator.static.images import ANACONDA_ICON_256_PATH
from anaconda_navigator.utils import constants as C
from anaconda_navigator.utils.logs import logger
from anaconda_navigator.utils.py3compat import PY3, to_text_string
from anaconda_navigator.utils.qthelpers import (add_actions, create_action,
update_pointer)
from anaconda_navigator.utils.styles import SASS_VARIABLES, load_style_sheet
from anaconda_navigator.widgets import (ButtonLabel, ButtonLink, ButtonNormal,
FrameBase, LabelBase)
from anaconda_navigator.widgets.lists import ListWidgetBase, ListWidgetItemBase
from anaconda_navigator.widgets.spinner import NavigatorSpinner
# yapf: enable
# --- Widgets used in CSS styling
# -----------------------------------------------------------------------------
class ButtonApplicationInstall(ButtonNormal):
"""Button used in CSS styling."""
class ButtonApplicationLaunch(ButtonNormal):
"""Button used in CSS styling."""
class ButtonApplicationOptions(ButtonNormal):
"""Button used in CSS styling."""
class ButtonApplicationUpdate(ButtonNormal):
"""Button used in CSS styling."""
class ButtonApplicationLicense(ButtonLink):
"""Button used in CSS styling."""
class LabelApplicationLicense(ButtonLabel):
"""Button used in CSS styling."""
class LabelApplicationIcon(LabelBase):
"""Label used in CSS styling."""
class LabelApplicationName(LabelBase):
"""Label used in CSS styling."""
class LabelApplicationVersion(LabelBase):
"""Label used in CSS styling."""
class LabelApplicationDescription(LabelBase):
"""Label used in CSS styling."""
class FrameApplicationSpinner(FrameBase):
"""Label used in CSS styling."""
class ButtonApplicationVersion(ButtonLabel):
"""Button used in CSS styling."""
class WidgetApplication(FrameBase):
"""Widget used in CSS styling."""
# application_name, command, leave_path_alone, prefix, sender, non_conda
sig_launch_action_requested = Signal(
object, object, bool, object, object, object
)
# action, application_name, version, sender, non_conda
sig_conda_action_requested = Signal(object, object, object, object, object)
sig_url_clicked = Signal(object)
# --- Main Widgets
# -----------------------------------------------------------------------------
class ListWidgetApplication(ListWidgetBase):
"""Widget that holds the whole list of applications to launch."""
# application_name, command, leave_path_alone, prefix, sender, non_conda
sig_launch_action_requested = Signal(
object, object, bool, object, object, object
)
# action, application_name, version, sender, non_conda
sig_conda_action_requested = Signal(object, object, object, object, object)
sig_url_clicked = Signal(object)
def __init__(self, *args, **kwargs):
"""Widget that holds the whole list of applications to launch."""
super(ListWidgetApplication, self).__init__(*args, **kwargs)
self.setGridSize(ListItemApplication.widget_size())
self.setWrapping(True)
self.setViewMode(QListWidget.IconMode)
self.setLayoutMode(ListWidgetApplication.Batched)
self.setFocusPolicy(Qt.NoFocus)
def ordered_widgets(self):
"""Return a list of the ordered widgets."""
ordered_widgets = []
for item in self.items():
ordered_widgets += item.ordered_widgets()
return ordered_widgets
def setup_item(self, item):
"""Override base method."""
item.widget.sig_conda_action_requested.connect(
self.sig_conda_action_requested
)
item.widget.sig_launch_action_requested.connect(
self.sig_launch_action_requested
)
item.widget.sig_url_clicked.connect(self.sig_url_clicked)
class ListItemApplication(ListWidgetItemBase):
"""Item with custom widget for the applications list."""
ICON_SIZE = 64
def __init__(
self,
name=None,
description=None,
command=None,
versions=None,
image_path=None,
prefix=None,
needs_license=False,
non_conda=False,
):
"""Item with custom widget for the applications list."""
super(ListItemApplication, self).__init__()
self.api = AnacondaAPI()
self.prefix = prefix
self.name = name
self.url = ''
self.expired = False
self.needs_license = needs_license
self.description = description
self.command = command
self.versions = versions
self.image_path = image_path if image_path else ANACONDA_ICON_256_PATH
self.style_sheet = None
self.timeout = 2000
self.non_conda = non_conda
self._vscode_version_value = None
# Widgets
self.button_install = ButtonApplicationInstall("Install") # or Try!
self.button_launch = ButtonApplicationLaunch("Launch")
self.button_options = ButtonApplicationOptions()
self.label_license = LabelApplicationLicense('')
self.button_license = ButtonApplicationLicense('')
self.label_icon = LabelApplicationIcon()
self.label_name = LabelApplicationName(self.name)
self.label_description = LabelApplicationDescription(self.description)
self.button_version = ButtonApplicationVersion(
to_text_string(self.version)
)
self.menu_options = QMenu('Application options')
self.menu_versions = QMenu('Install specific version')
self.pixmap = QPixmap(self.image_path)
self.timer = QTimer()
self.widget = WidgetApplication()
self.frame_spinner = FrameApplicationSpinner()
self.spinner = NavigatorSpinner(self.widget, total_width=16)
lay = QHBoxLayout()
lay.addWidget(self.spinner)
self.frame_spinner.setLayout(lay)
# Widget setup
self.button_version.setFocusPolicy(Qt.NoFocus)
self.button_version.setEnabled(True)
self.label_description.setAlignment(Qt.AlignCenter)
self.timer.setInterval(self.timeout)
self.timer.setSingleShot(True)
self.label_icon.setPixmap(self.pixmap)
self.label_icon.setScaledContents(True) # important on High DPI!
self.label_icon.setMaximumWidth(self.ICON_SIZE)
self.label_icon.setMaximumHeight(self.ICON_SIZE)
self.label_icon.setAlignment(Qt.AlignCenter)
self.label_name.setAlignment(Qt.AlignCenter)
self.label_name.setWordWrap(True)
self.label_description.setWordWrap(True)
self.label_description.setAlignment(Qt.AlignTop | Qt.AlignHCenter)
self.frame_spinner.setVisible(False)
# Layouts
layout_spinner = QHBoxLayout()
layout_spinner.addWidget(self.button_version, 0, Qt.AlignCenter)
layout_spinner.addWidget(self.frame_spinner, 0, Qt.AlignCenter)
layout_license = QHBoxLayout()
layout_license.addStretch()
layout_license.addWidget(self.label_license, 0, Qt.AlignCenter)
layout_license.addWidget(self.button_license, 0, Qt.AlignCenter)
layout_license.addStretch()
layout_main = QVBoxLayout()
layout_main.addWidget(self.button_options, 0, Qt.AlignRight)
layout_main.addWidget(self.label_icon, 0, Qt.AlignCenter)
layout_main.addWidget(self.label_name, 0, Qt.AlignCenter)
layout_main.addLayout(layout_spinner)
layout_main.addLayout(layout_license)
layout_main.addWidget(self.label_description, 0, Qt.AlignCenter)
layout_main.addWidget(self.button_launch, 0, Qt.AlignCenter)
layout_main.addWidget(self.button_install, 0, Qt.AlignCenter)
self.widget.setLayout(layout_main)
self.widget.setStyleSheet(load_style_sheet())
self.setSizeHint(self.widget_size())
# This might help with visual quirks on the home screen
self.widget.setMinimumSize(self.widget_size())
# Signals
self.button_install.clicked.connect(self.install_application)
self.button_launch.clicked.connect(self.launch_application)
self.button_options.clicked.connect(self.actions_menu_requested)
self.button_license.clicked.connect(self.launch_url)
self.timer.timeout.connect(self._application_launched)
# Setup
self.update_status()
# --- Callbacks
# -------------------------------------------------------------------------
def _application_launched(self):
self.button_launch.setDisabled(False)
update_pointer()
# --- Helpers
# -------------------------------------------------------------------------
def update_style_sheet(self, style_sheet=None):
"""Update custom CSS stylesheet."""
if style_sheet:
self.style_sheet = style_sheet
else:
self.style_sheet = load_style_sheet()
self.menu_options.setStyleSheet(self.style_sheet)
self.menu_versions.setStyleSheet(self.style_sheet)
def ordered_widgets(self):
"""Return a list of the ordered widgets."""
return [
self.button_license, self.button_install, self.button_launch,
self.button_options
]
@staticmethod
def widget_size():
"""Return the size defined in the SASS file."""
return QSize(
SASS_VARIABLES.WIDGET_APPLICATION_TOTAL_WIDTH,
SASS_VARIABLES.WIDGET_APPLICATION_TOTAL_HEIGHT
)
def launch_url(self):
"""Launch signal for url click."""
self.widget.sig_url_clicked.emit(self.url)
def actions_menu_requested(self):
"""Create and display menu for the currently selected application."""
self.menu_options.clear()
self.menu_versions.clear()
# Add versions menu
versions = self.versions if self.versions else []
version_actions = []
for version in reversed(versions):
action = create_action(
self.widget,
version,
triggered=lambda value, version=version: self.
install_application(version=version)
)
action.setCheckable(True)
if self.version == version and self.installed:
action.setChecked(True)
action.setDisabled(True)
version_actions.append(action)
install_action = create_action(
self.widget,
'Install application',
triggered=lambda: self.install_application()
)
install_action.setEnabled(not self.installed)
update_action = create_action(
self.widget,
'Update application',
triggered=lambda: self.update_application()
)
if versions and versions[-1] == self.version:
update_action.setDisabled(True)
else:
update_action.setDisabled(False)
if self.non_conda and self.name == GLOBAL_VSCODE_APP:
update_action.setDisabled(True)
remove_action = create_action(
self.widget,
'Remove application',
triggered=lambda: self.remove_application()
)
remove_action.setEnabled(self.installed)
actions = [
install_action, update_action, remove_action, None,
self.menu_versions
]
add_actions(self.menu_options, actions)
add_actions(self.menu_versions, version_actions)
offset = QPoint(self.button_options.width(), 0)
position = self.button_options.mapToGlobal(QPoint(0, 0))
self.menu_versions.setEnabled(len(versions) > 1)
self.menu_options.move(position + offset)
self.menu_options.exec_()
def update_status(self):
"""Update status."""
# License check
license_label_text = ''
license_url_text = ''
self.url = ''
self.expired = False
button_label = 'Install'
if self.needs_license:
# TODO: Fix this method to use the api
license_info = self.api.get_package_license(self.name)
license_days = self.api.get_days_left(license_info)
end_date = license_info.get('end_date', '')
self.expired = license_days == 0
plural = 's' if license_days != 1 else ''
is_trial = license_info.get('type', '').lower() == 'trial'
if self.installed and license_info:
if is_trial and not self.expired:
license_label_text = (
'Trial, {days} day{plural} '
'remaining'.format(days=license_days, plural=plural)
)
self.url = ''
elif is_trial and self.expired:
license_label_text = 'Trial expired, '
license_url_text = 'contact us'
self.url = 'mailto:[email protected]'
elif not is_trial and not self.expired:
license_label_text = 'License expires {}'.format(end_date)
self.url = ''
elif not is_trial and self.expired:
license_url_text = 'Renew license'
self.url = 'mailto:[email protected]'
elif self.installed and not bool(license_info):
# Installed but no license found!
license_url_text = 'No license found'
self.url = 'mailto:[email protected]'
else:
if not self.expired:
button_label = 'Install'
else:
button_label = 'Try'
self.button_license.setText(license_url_text)
self.button_license.setVisible(bool(self.url))
self.label_license.setText(license_label_text)
self.label_license.setVisible(bool(license_label_text))
# Version and version updates
if (self.versions and self.version != self.versions[-1] and
self.installed):
# The property is used with CSS to display updatable packages.
self.button_version.setProperty('pressed', True)
self.button_version.setToolTip(
'Version {0} available'.format(self.versions[-1])
)
else:
self.button_version.setProperty('pressed', False)
if not self.needs_license:
self.button_install.setText(button_label)
self.button_install.setVisible(not self.installed)
self.button_launch.setVisible(self.installed)
else:
self.button_install.setText('Try' if self.expired else 'Install')
self.button_launch.setVisible(not self.expired)
self.button_install.setVisible(self.expired)
self.button_launch.setEnabled(True)
def update_versions(self, version=None, versions=None):
"""Update button visibility depending on update availability."""
logger.debug(str((self.name, self.dev_tool, self.installed)))
if self.installed and version:
self.button_options.setVisible(True)
self.button_version.setText(version)
self.button_version.setVisible(True)
elif not self.installed and versions:
self.button_install.setEnabled(True)
self.button_version.setText(versions[-1])
self.button_version.setVisible(True)
self.versions = versions
self.version = version
self.update_status()
def set_loading(self, value):
"""Set loading status."""
self.button_install.setDisabled(value)
self.button_options.setDisabled(value)
self.button_launch.setDisabled(value)
self.button_license.setDisabled(value)
if value:
self.spinner.start()
else:
self.spinner.stop()
if self.version is None and self.versions is not None:
version = self.versions[-1]
else:
version = self.version
self.button_version.setText(version)
self.button_launch.setDisabled(self.expired)
self.frame_spinner.setVisible(value)
self.button_version.setVisible(not value)
# --- Helpers using api
# -------------------------------------------------------------------------
def _vscode_version(self):
"""Query the vscode version for the default installation path."""
version = None
if self._vscode_version_value is None:
cmd = [self.api.vscode_executable(), '--version']
# print(cmd)
import subprocess
try:
output = subprocess.check_output(cmd)
if PY3:
output = output.decode()
output = [o for o in output.split('\n') if o and '.' in o]
# print(output)
if output:
version = output[0]
except Exception:
pass
# print(e)
self._vscode_version_value = version
else:
version = self._vscode_version_value
return version
@property
def installed(self):
"""Return the installed status of the package."""
version = None
if self.non_conda and self.name == GLOBAL_VSCODE_APP:
# TODO: Vscode program location, check existence
version = self._vscode_version()
elif self.prefix:
version = self.api.conda_package_version(
prefix=self.prefix, pkg=self.name, build=False
)
return bool(version)
@property
def version(self):
"""Return the current installed version or the highest version."""
version = None
if self.non_conda and self.name == GLOBAL_VSCODE_APP:
version = self._vscode_version()
elif self.prefix:
version = self.api.conda_package_version(
prefix=self.prefix, pkg=self.name, build=False
)
if not version:
version = self.versions[-1]
return version
# --- Application actions
# ------------------------------------------------------------------------
def install_application(self, value=None, version=None, install=True):
"""
Update the application on the defined prefix environment.
This is used for both normal install and specific version install.
"""
if not version:
version = self.versions[-1]
action = C.APPLICATION_INSTALL if install else C.APPLICATION_UPDATE
self.widget.sig_conda_action_requested.emit(
action,
self.name,
version,
C.TAB_HOME,
self.non_conda,
)
self.set_loading(True)
def remove_application(self):
"""Remove the application from the defined prefix environment."""
self.widget.sig_conda_action_requested.emit(
C.APPLICATION_REMOVE,
self.name,
None,
C.TAB_HOME,
self.non_conda,
)
self.set_loading(True)
def update_application(self):
"""Update the application on the defined prefix environment."""
self.install_application(version=self.versions[-1], install=False)
def launch_application(self):
"""Launch application installed in prefix environment."""
leave_path_alone = False
if self.command is not None:
if self.non_conda and self.name == GLOBAL_VSCODE_APP:
leave_path_alone = True
args = [self.command]
else:
args = self.command.split(' ')
leave_path_alone = True
self.button_launch.setDisabled(True)
self.timer.setInterval(self.timeout)
self.timer.start()
update_pointer(Qt.BusyCursor)
self.widget.sig_launch_action_requested.emit(
self.name,
args,
leave_path_alone,
self.prefix,
C.TAB_HOME,
self.non_conda,
)
# --- Local testing
# -----------------------------------------------------------------------------
def local_test(): # pragma: no cover
"""Run local test."""
from anaconda_navigator.utils.qthelpers import qapplication
from anaconda_navigator.static.images import ANACONDA_ICON_256_PATH
app = qapplication(test_time=5)
widget = ListWidgetApplication()
for i in range(30):
item = ListItemApplication(
name="Package {0}".format(i),
description="Scientific PYthon Development EnviRonment",
versions=[str(i), str(i + 1)],
image_path=ANACONDA_ICON_256_PATH,
prefix=None
)
widget.addItem(item)
widget.update_style_sheet()
widget.show()
sys.exit(app.exec_())
if __name__ == "__main__": # pragma: no cover
local_test()
| [
"[email protected]"
] | |
2f2e2d2d2856f845f1e9418d80a262410ffbca17 | ea515ab67b832dad3a9b69bef723bd9d918395e7 | /03_Implementacao/DataBase/true_or_false_question_class_P_plus_regex/make_transformations.py | 40165e0cd50ec5187a5a9d604d1f395df4bb19a5 | [] | no_license | projeto-exercicios/Exercicios-Python-de-correccao-automatica | b52be3211e75d97cb55b6cdccdaa1d9f9d84f65b | a7c80ea2bec33296a3c2fbe4901ca509df4b1be6 | refs/heads/master | 2022-12-13T15:53:59.283232 | 2020-09-20T21:25:57 | 2020-09-20T21:25:57 | 295,470,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,577 | py |
import sys
#sys.path.append(
# '/home/jbs/develop.old/articles/201509_python_exercises_generator')
#sys.path.append('/home/jbs/develop/201902_questions_transformer')
sys.path.append('../qom_questions_transformer')
import string
import re
from random import sample
from random import choice
from random import randint
from random import shuffle
from text_transformer.tt_text_transformer_interface import add_changeable
#from text_transformer.tt_text_transformer_interface import change_all_occurrences
from text_transformer.tt_text_transformer_interface import change_one_occurrence
# # this import removes an import error. I don't know why (jbs
# # 2018/12/12). see pt_import_tests.py and try to correct the problem.
# import py_transformer.ast_processor
# from python_transformer.pt_python_transformer_interface import change_identifier_all_occurrences
# from python_transformer.pt_python_transformer_interface import change_all_occurrences_in_strings
from python_transformer.pt_python_transformer_interface import change_token_all_occurrences
from python_transformer.pt_python_transformer_interface import change_all_occurrences
#from sympy import latex, sympify
# in the question (program)
add_changeable('135') # seed
add_changeable('a') # the list
add_changeable('b') # the 2nd list
add_changeable('n') # the loop variable
add_changeable('i') # the loop variable
add_changeable('__var') # class's global variable
add_changeable('P') # class name
add_changeable('p') # class variable
add_changeable('13') # the list length
add_changeable('33') # the list length
add_changeable('3') # the list length
# answers list name
add_changeable(r'\verb+a+')
add_changeable(r'\verb+b+')
# answers (indexes)
add_changeable(r'\verb+1+')
add_changeable(r'\verb+2_1+')
add_changeable(r'\verb+2_2+')
add_changeable(r'\verb+3_1+')
add_changeable(r'\verb+3_2+')
add_changeable(r'\verb+3_1_f+')
add_changeable(r'\verb+3_2_f+')
add_changeable(r'\verb+4+')
add_changeable(r'\verb+5_1+')
add_changeable(r'\verb+5_2+')
# right answers values
add_changeable(r'\verb+11+')
add_changeable(r'\verb+22+')
add_changeable(r'\verb+33+')
add_changeable(r'\verb+44+')
add_changeable(r'\verb+55+')
# wrong answers values
add_changeable(r'\verb+111+')
add_changeable(r'\verb+222+')
add_changeable(r'\verb+333+')
add_changeable(r'\verb+444+')
add_changeable(r'\verb+555+')
# variáveis partilhas entre as funções make_transformations e
# make_transformations_on_results
a = None
b = None
_1 = None
_2_1 = None
_2_2 = None
_3 = None
_3_1 = None
_3_2 = None
_3_1_f = None
_3_2_f = None
_4 = None
_5_1 = None
_5_2 = None
__var = None
var = None
P = None
p = None
def make_transformations():
''
global a
global b
global _1
global _2_1
global _2_2
global _3
global _3_1
global _3_2
global _3_1_f
global _3_2_f
global _4
global __var
global var
global P
global p
# question
_135 = str(randint(1000000, 2000000))
[a, b, n, p, P, var, i] = sample(string.ascii_lowercase, 7)
_13 = randint(19000, 20000)
_3 = randint(0, 3)
_33 = randint(50, 1500)
__var = '__' + var
P = P.upper()
wrong_3 = choice((p + '.' + '__' + var,
P + '.' + '__' + var,
p + '._' + P + '_' + var,
P + '.' + var , p + '.' + var))
correct_answers_list = [p + '._' + P + '__' + var,
p + ".get_var()", P + "().get_var()"]
_3_1, _3_2 = sample(correct_answers_list, 2)
decision = choice((0, 1))
correct_3 = choice((_3_1, _3_2))
_3_1_f = correct_3 if decision == 0 else wrong_3
_3_2_f = wrong_3 if decision == 0 else _3_1
change_all_occurrences('135', _135)
change_token_all_occurrences('a', a)
change_token_all_occurrences('b', b)
change_token_all_occurrences('P', P)
change_token_all_occurrences('__var', __var)
change_token_all_occurrences('p', p)
change_token_all_occurrences('i', i)
change_token_all_occurrences('n', n)
change_all_occurrences('13', str(_13))
change_all_occurrences('3', str(_3))
change_all_occurrences('33', str(_33))
# answers
change_all_occurrences(r'\verb+a+', r'\verb+' + a + '+')
change_all_occurrences(r'\verb+b+', r'\verb+' + b + '+')
# indexes with no repetitions
[_1] = sample(range(10), 1)
_1 = str(_1)
#regex parameters decision
[_2_1, _2_2] = sample(range(10) ,2)
_2_1 = str(_2_1)
_2_2 = str(_2_2)
decision = choice(("single", "list"))
if decision == "single":
[_4] = sample(range(10),1)
_4 = str(_4) + choice(('?', '*', '+'))
if decision == "list":
[_4] = sample(range(10),1)
[max_4] = sample(range(len(str(_13))),1)
max_4 = max_4 if max_4 > 0 else max_4 + 1
[min_4] = sample(range(max_4),1)
_4 = str(_4) + '{' + str(min_4) + '-' + str(max_4) + '}'
change_all_occurrences(r'\verb+1+', r'\verb+' + _1 + '+')
change_all_occurrences(r'\verb+2_1+', r'\verb+' + _2_1 + '+')
change_all_occurrences(r'\verb+2_2+', r'\verb+' + _2_2 + '+')
change_all_occurrences(r'\verb+3_1+', r'\verb+' + _3_1 + '+')
change_all_occurrences(r'\verb+3_2+', r'\verb+' + _3_2 + '+')
## change_all_occurrences(r'\verb+3_1+', _3_1)
## change_all_occurrences(r'\verb+3_2+', _3_2 )
change_all_occurrences(r'\verb+3_1_f+', r'\verb+' + _3_1_f + '+')
change_all_occurrences(r'\verb+3_2_f+', r'\verb+' + _3_2_f + '+')
change_all_occurrences(r'\verb+4+', r'\verb+' + _4 + '+')
def make_transformations_on_results(program):
''
global a
global b
global _1
global _2_1
global _2_2
global _3_1
global _3_2
global _3_1_f
global _3_2_f
global _4
global _5_1
global _5_2
global __var
global var
global P
global p
the_list = program.get_global(a)
the_list_b = program.get_global(b)
#correct index for question 5
_5_1, _5_2 = choose_correct_idx_5(the_list)
change_all_occurrences(r'\verb+5_1+', r'\verb+' + str(_5_1) + '+')
change_all_occurrences(r'\verb+5_2+', r'\verb+' + str(_5_2) + '+')
answer_1_true = regular_search(_1, the_list_b)
new_b = regular_subtitution(_2_1, _2_2, the_list_b)
answer_2_true = round(regular_search(_2_2, new_b) /
regular_search(_2_2, the_list_b),2)
answer_4_true = regular_match(_4, the_list_b)
answer_5_true = power(the_list[_5_1].num, the_list[_5_2].num)
# true answers
change_all_occurrences(r'\verb+11+', str(answer_1_true))
change_all_occurrences(r'\verb+22+', str(answer_2_true))
change_all_occurrences(r'\verb+44+', str(answer_4_true))
change_all_occurrences(r'\verb+55+', str(answer_5_true))
# wrong answers
increment2 = choice([.1, -.1])
increment4 = choice([1, -1])
increment5 = choice([1, -1])
answer_1_false = _1
answer_2_false = round(answer_2_true + increment2,2)
answer_4_false = answer_4_true + increment4
answer_5_false = answer_5_true + increment5
change_all_occurrences(r'\verb+111+', str(answer_1_false))
change_all_occurrences(r'\verb+222+', str(answer_2_false))
change_all_occurrences(r'\verb+444+', str(answer_4_false))
change_all_occurrences(r'\verb+555+', str(answer_5_false))
def choose_correct_idx_5(the_list):
result_base = 0
result_exp = 1
for i in range(len(the_list)):
value = the_list[i].num
if value < 10:
result_base = i
break
for i in range(len(the_list)):
value = the_list[i].num
if value < 8 and value != the_list[result_base].num:
result_exp = i
break
return result_base, result_exp
def choose_diffrenet_answer(the_list, choosen_answer):
while True:
result = choice(the_list)
if result != choosen_answer:
return result
def power(num, exp):
if exp == 1: return num
else: return num * power(num, exp - 1)
def regular_search(pattern_val, the_list):
return sum(list(map(lambda x: 1 if re.search(r"" + pattern_val, x)
else 0, the_list)))
def regular_subtitution(pattern_val, subtitute_val, the_list):
return list(map(lambda x: re.sub(r"" + pattern_val, subtitute_val, x) , the_list))
def regular_match(pattern_val, the_list):
return sum(list(map(lambda x: 1 if re.match(r"" + pattern_val, x)
else 0, the_list)))
| [
"[email protected]"
] | |
219470a0ff5bb403514695edf64cf9ab42c04142 | 6791fd830e1e3bb1b3e31bac32c8c43debc6e45b | /hash_table/files.py | bb007467042726baa23b98afaff41ac9fa007b62 | [] | no_license | niccokunzmann/pydht | e3205eb4f93840531ef79019c7e47156aed44d29 | 89621647455657291dbb27f966a53ab10c6862f5 | refs/heads/master | 2020-05-30T17:48:43.608086 | 2013-10-09T20:34:28 | 2013-10-09T20:34:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,556 | py | import io
import tempfile
import os
from .. import hashing
from ..errors import ContentAltered
class HashTableFileMixin:
"""A mixin for a hashtable that adds itself to the hashtable when closed"""
def __init__(self, hash_table, *args, **kw):
super().__init__(*args, **kw)
self._hash_table = hash_table
def add_to_hashtable(self):
self.seek(0)
self._hash_table._add_hash_table_file(self)
def close(self, *args, **kw):
self.add_to_hashtable()
super().close(*args, **kw)
def __enter__(self, *args, **kw):
if hasattr(super(), '__enter__'):
super().__enter__(*args, **kw)
return self
def __exit__(self, *args, **kw):
self.add_to_hashtable()
if hasattr(super(), '__exit__'):
return super().__exit__(*args, **kw)
class BytesIO(HashTableFileMixin, io.BytesIO):
"""A io.BytesIO for a hashtable that adds itself to the hashtable when closed"""
pass
class SpooledTemporaryFile(HashTableFileMixin, tempfile.SpooledTemporaryFile):
"""A tempfile.SpooledTemporaryFile for a hashtable that adds itself to the hashtable when closed"""
pass
class HashingFile:
"""One can read from this file and the hash is updated"""
default_chunk_size = 4096
is_hash = staticmethod(hashing.is_hash)
def __init__(self, file, length = None):
self._file = file
self._read = self._file.read
if hasattr(file, 'fileno'):
self.fileno = file.fileno
self._algorithm = hashing.algorithm()
self._length = self.get_length_of_file(file, length)
@property
def length(self):
"""=> length of the file or None"""
return self._length
def get_length_of_file(self, file, length = None):
if length is not None: return length
if hasattr(file, '__len__'):
return len(file)
if hasattr(file, 'fileno'):
try: return os.fstat(file.fileno()).st_size
except OSError: pass
if hasattr(file, 'seek') and hasattr(file, 'tell'):
start = file.tell()
file.seek(0, 2) # end of stream
try: return file.tell() - start
finally: file.seek(start)
if hasattr(file, 'getvalue'):
return len(file.getvalue())
def read(self, *args):
bytes = self._read(*args)
self._algorithm.update(bytes)
return bytes
@property
def hash(self):
return self._algorithm.hexdigest()
def __len__(self):
if self._length is None:
raise TypeError('length not supported for {}'.format(self._file))
return self._length
def __iter__(self):
# should be readline but is not required yet
data = self.read(self.default_chunk_size)
while data:
yield data
data = self.read(self.default_chunk_size)
class HashCheckingFile(HashingFile):
def __init__(self, expected_hash, file, length = None):
assert self.is_hash(expected_hash)
super().__init__(file, length = length)
self.expected_hash = expected_hash
self._bytes_read = 0
@property
def bytes_read(self):
"""=> the number of bytes read from this file"""
return self._bytes_read
def is_valid(self):
"""=> whether the hash of the content matches"""
return self.hash == self.expected_hash and self.is_read_completed()
def is_read_completed(self):
"""=> whether something can be expected to be read from the file
if the file has a length"""
return self.bytes_read == self.length
def read(self, *args):
"""read from the file and at check for consistency when its end is reached"""
bytes = super().read(*args)
self._bytes_read += len(bytes)
if self.is_read_completed() and not self.is_valid():
return self.error_hash_does_not_match()
return bytes
def error_hash_does_not_match(self):
"""Throw an error that the content differs from the expected"""
raise ContentAltered("Expected the hash {} for the ressource {}"
" but got the hash {}".format(self.expected_hash,
self._file,
self.hash))
class NonCheckingBytesIO(io.BytesIO):
@staticmethod
def is_valid():
return True
__all__ = ['BytesIO', 'SpooledTemporaryFile', 'HashingFile', 'HashCheckingFile']
| [
"[email protected]"
] | |
d95f07794f027cd5d62ae5f538ba541367149f10 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_97/492.py | a1f42066fd7dc40e8cdcf88aee4718a892fe6447 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | def find_recycled(n, b):
ns = str(n)
reclist = []
for i in xrange(1, len(ns), 1):
nrec = ns[i:len(ns)] + ns[0:i]
if nrec[0] != "0":
nrec = eval(nrec)
if nrec <= b and nrec > n and (n, nrec) not in reclist:
reclist.append((n,nrec))
return reclist
inp = file("input.in")
T = eval(inp.readline())
out = file("output.txt", "w")
d = []
for n in xrange(12, 2000000, 1):
d.extend(find_recycled(n, 2000000))
for i in xrange(T):
a, b = inp.readline().strip().split()
a = eval(a)
b = eval(b)
nrec = 0
for item in d:
if item[0] > b:
break
if item[0] >= a and item[1] <= b:
nrec += 1
out.write("Case #%d: %d\n" %(i + 1, nrec))
| [
"[email protected]"
] | |
b211f6d3238e69fee4c33b2e8d89b34fe17e5730 | f56a16a03346eb2854eaeae0a13a92a222806551 | /test/functional/interface_bitcoin_cli.py | 74867f2d89fe0cdd48537eea6e2d53ac2dc0d1e0 | [
"MIT"
] | permissive | minblock/mishcoin | 77b64c00043557cde6b49d4f58612b8ff670d8f6 | 65d47897b2413b83480d1f04eb2031f62b36a708 | refs/heads/master | 2021-05-22T23:56:07.613136 | 2020-04-05T03:14:27 | 2020-04-05T03:14:27 | 253,146,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,408 | py | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mishcoin-cli"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_process_error, get_auth_cookie
class TestBitcoinCli(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
"""Main test logic"""
cli_response = self.nodes[0].cli("-version").send_cli()
assert("Mishcoin Core RPC client version" in cli_response)
self.log.info("Compare responses from getwalletinfo RPC and `mishcoin-cli getwalletinfo`")
if self.is_wallet_compiled():
cli_response = self.nodes[0].cli.getwalletinfo()
rpc_response = self.nodes[0].getwalletinfo()
assert_equal(cli_response, rpc_response)
self.log.info("Compare responses from getblockchaininfo RPC and `mishcoin-cli getblockchaininfo`")
cli_response = self.nodes[0].cli.getblockchaininfo()
rpc_response = self.nodes[0].getblockchaininfo()
assert_equal(cli_response, rpc_response)
user, password = get_auth_cookie(self.nodes[0].datadir)
self.log.info("Test -stdinrpcpass option")
assert_equal(0, self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input=password).getblockcount())
assert_raises_process_error(1, "Incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input="foo").echo)
self.log.info("Test -stdin and -stdinrpcpass")
assert_equal(["foo", "bar"], self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input=password + "\nfoo\nbar").echo())
assert_raises_process_error(1, "Incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input="foo").echo)
self.log.info("Test connecting to a non-existing server")
assert_raises_process_error(1, "Could not connect to the server", self.nodes[0].cli('-rpcport=1').echo)
self.log.info("Test connecting with non-existing RPC cookie file")
assert_raises_process_error(1, "Could not locate RPC credentials", self.nodes[0].cli('-rpccookiefile=does-not-exist', '-rpcpassword=').echo)
self.log.info("Make sure that -getinfo with arguments fails")
assert_raises_process_error(1, "-getinfo takes no arguments", self.nodes[0].cli('-getinfo').help)
self.log.info("Compare responses from `mishcoin-cli -getinfo` and the RPCs data is retrieved from.")
cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
if self.is_wallet_compiled():
wallet_info = self.nodes[0].getwalletinfo()
network_info = self.nodes[0].getnetworkinfo()
blockchain_info = self.nodes[0].getblockchaininfo()
assert_equal(cli_get_info['version'], network_info['version'])
assert_equal(cli_get_info['protocolversion'], network_info['protocolversion'])
if self.is_wallet_compiled():
assert_equal(cli_get_info['walletversion'], wallet_info['walletversion'])
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['blocks'], blockchain_info['blocks'])
assert_equal(cli_get_info['timeoffset'], network_info['timeoffset'])
assert_equal(cli_get_info['connections'], network_info['connections'])
assert_equal(cli_get_info['proxy'], network_info['networks'][0]['proxy'])
assert_equal(cli_get_info['difficulty'], blockchain_info['difficulty'])
assert_equal(cli_get_info['testnet'], blockchain_info['chain'] == "test")
if self.is_wallet_compiled():
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['keypoololdest'], wallet_info['keypoololdest'])
assert_equal(cli_get_info['keypoolsize'], wallet_info['keypoolsize'])
assert_equal(cli_get_info['paytxfee'], wallet_info['paytxfee'])
assert_equal(cli_get_info['relayfee'], network_info['relayfee'])
# unlocked_until is not tested because the wallet is not encrypted
if __name__ == '__main__':
TestBitcoinCli().main()
| [
"[email protected]"
] | |
655f1c6517e4c4109ba59bf8025f9fc0cb629994 | 41a0c25333100fd551e7a49ceec128c1cd80857f | /Scripts/doSuperResolution.py | 7a61ed8b685cd6ce72250a81e478dc58cf0f5443 | [] | no_license | ANTsXNet/MRISuperResolution | 7a6a6b8e3290d993f79a8d0bc9aa357fee755cb0 | 3568ad193e124d2000a39e89f11e50231443fff6 | refs/heads/master | 2021-08-16T10:16:41.781465 | 2020-09-22T15:37:53 | 2020-09-22T15:37:53 | 223,532,949 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,879 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et
import os
import sys
import time
import numpy as np
import keras
import ants
import antspynet
args = sys.argv
if len(args) != 3:
help_message = ("Usage: python doSuperResolution.py inputFile outputFile")
raise AttributeError(help_message)
else:
input_file_name = args[1]
output_file_name = args[2]
start_time_total = time.time()
print("Reading ", input_file_name)
start_time = time.time()
input_image = ants.image_read(input_file_name)
end_time = time.time()
elapsed_time = end_time - start_time
print(" (elapsed time: ", elapsed_time, " seconds)")
dimension = len(input_image.shape)
input_image_list = list()
if dimension == 4:
input_image_list = ants.ndimage_to_list(input_image)
elif dimension == 2:
raise ValueError("Model for 3-D or 4-D images only.")
elif dimension == 3:
input_image_list.append(input_image)
model = antspynet.create_deep_back_projection_network_model_3d(
(*input_image_list[0].shape, 1),
number_of_outputs=1, number_of_base_filters=64,
number_of_feature_filters=256, number_of_back_projection_stages=7,
convolution_kernel_size=(3, 3, 3), strides=(2, 2, 2),
number_of_loss_functions=1)
print( "Loading weights file" )
start_time = time.time()
weights_file_name = "./mriSuperResolutionWeights.h5"
if not os.path.exists(weights_file_name):
weights_file_name = antspynet.get_pretrained_network("mriSuperResolution", weights_file_name)
model.load_weights(weights_file_name)
end_time = time.time()
elapsed_time = end_time - start_time
print(" (elapsed time: ", elapsed_time, " seconds)")
number_of_image_volumes = len(input_image_list)
output_image_list = list()
for i in range(number_of_image_volumes):
print("Applying super resolution to image", i, "of", number_of_image_volumes)
start_time = time.time()
input_image = ants.iMath(input_image_list[i], "TruncateIntensity", 0.0001, 0.995)
output_sr = antspynet.apply_super_resolution_model_to_image(input_image, model, target_range=(127.5, -127.5))
input_image_resampled = ants.resample_image_to_target(input_image, output_sr)
output_image_list.append(antspynet.regression_match_image(output_sr, input_image_resampled, poly_order = 2))
end_time = time.time()
elapsed_time = end_time - start_time
print(" (elapsed time:", elapsed_time, "seconds)")
print("Writing output image.")
if number_of_image_volumes == 1:
ants.image_write( output_image_list[0], output_file_name)
else:
output_image = ants.list_to_ndimage(input_image, output_image_list)
ants.image_write(output_image, output_file_name)
end_time_total = time.time()
elapsed_time_total = end_time_total - start_time_total
print( "Total elapsed time: ", elapsed_time_total, "seconds" ) | [
"[email protected]"
] | |
ca829bb3b5c37e7e3f12c3fdecba9401acdbba5d | bccfab4d853f7417401a084be95de293e66ccd2a | /mySpider/auxiliary_files/Exhibition136_supporting.py | 7999b97dede8993a5006d00cee82bfa1e3c14a46 | [] | no_license | CS1803-SE/The-First-Subsystem | a8af03ce04a9de72a6b78ece6411bac4c02ae170 | 4829ffd6a83133479c385d6afc3101339d279ed6 | refs/heads/main | 2023-05-06T02:32:08.751139 | 2021-05-24T06:09:37 | 2021-05-24T06:09:37 | 363,400,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2021/5/11 20:14
# @Author : 10711
# @File : Exhibition136_supporting.py
# @Software: PyCharm
class Exhibition136Supporting:
startUrl = ['http://www.changjiangcp.com/view/16351.html'] | [
"[email protected]"
] | |
e340886d15839bcf81591484b8f866d8ee964e49 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02806/s940830540.py | 373fb938a67908fb9d236775b0b7b61aa4ef974e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | N = int(input())
st = [input().split() for i in range(N)]
X = input()
flg = 0
ans = 0
for s, t in st:
if flg:
ans += int(t)
else:
if s == X:
flg = 1
print(ans) | [
"[email protected]"
] | |
c1b942cfb9f3e78d5166a2ba3efb2c10a7cea81b | ab5cdf8f2de94c327e4679da84f941b1f3c04db4 | /kubernetes/test/test_version_api.py | 2949fd9417eb62fa0c11cfb164bab7abe4314d78 | [
"Apache-2.0"
] | permissive | diannaowa/client-python | a4a92a125178db26004eaef5062f9b1b581b49a8 | 5e268fb0b6f21a535a14a7f968b84ed4486f6774 | refs/heads/master | 2020-12-02T22:06:03.687696 | 2017-06-30T21:42:50 | 2017-06-30T21:42:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 832 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.apis.version_api import VersionApi
class TestVersionApi(unittest.TestCase):
""" VersionApi unit test stubs """
def setUp(self):
self.api = kubernetes.client.apis.version_api.VersionApi()
def tearDown(self):
pass
def test_get_code(self):
"""
Test case for get_code
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
e125277049f9d9995016ddc244b396fee1ba6e28 | 40bc68b22e37e77ff7d7ed75f08b7195d16f9fde | /2019/day07/solutionsvm.py | 4a2d5b272357149ee73475ed6667046cf732278d | [] | no_license | fuglede/adventofcode | 1dd61b3bfd8db0346c8cb6838da8da5adf3d5296 | e3c85daf96889dd7aac04a0e741d1409f74e549d | refs/heads/master | 2023-09-03T08:52:48.575960 | 2023-08-26T18:29:19 | 2023-08-26T18:29:19 | 159,918,186 | 59 | 14 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | from collections import deque
from itertools import cycle, permutations
from math import inf
from vm import VM, read_program
p07 = read_program(7)
# Part one
m = -inf
for perm in permutations(range(5)):
vms = []
signal = 0
for phase in perm:
vm = VM(p07)
signal = next(VM(p07, deque([phase, signal])))
m = max(m, signal)
print(m)
# Part two
m = -inf
for perm in permutations(range(5, 10)):
vms = [VM(p07, deque([phase])) for phase in perm]
signal = 0
try:
for i in cycle(range(5)):
vms[i].inputs.append(signal)
signal = next(vms[i])
except StopIteration:
m = max(m, signal)
print(m)
| [
"[email protected]"
] | |
d6c70c773646c56f5d50057fddd579b9c60a264a | 213be849a50c84e9fc01aade5ff064a9aa7eb8c6 | /nautobot_golden_config/__init__.py | 133f8c98424367855de2c97353490af77f994942 | [
"Apache-2.0"
] | permissive | nniehoff/nautobot-plugin-golden-config | c8d62b381727c9ba76740e4dfa81835561738840 | 5c5f051d244b277dc9d1dbd6a11c9b236ee9a229 | refs/heads/main | 2023-08-03T22:11:46.669288 | 2021-09-24T14:56:28 | 2021-09-24T14:56:28 | 413,983,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | """Plugin declaration for nautobot_golden_config."""
__version__ = "0.9.7"
from nautobot.extras.plugins import PluginConfig
class GoldenConfig(PluginConfig):
"""Plugin configuration for the nautobot_golden_config plugin."""
name = "nautobot_golden_config"
verbose_name = "Golden Configuration"
version = __version__
author = "Network to Code, LLC"
author_email = "[email protected]"
description = "A plugin for managing Golden Configurations."
base_url = "golden-config"
required_settings = []
# min_version = "0"
# max_version = "100"
default_settings = {
"enable_backup": True,
"enable_golden": True,
"enable_compliance": True,
"enable_sotagg": True,
"per_feature_width": 13,
"per_feature_height": 4,
"per_feature_bar_width": 0.3,
}
caching_config = {}
config = GoldenConfig # pylint:disable=invalid-name
| [
"[email protected]"
] | |
2bca0b2d85d4f4a26bf43a98631bde8cfd883738 | e2cd4f444b18adca671ae2ac8856594b22c6d2ae | /arc/migrations/0091_remove_story_details_change_date.py | 1fd910194b0f36e1c5aaa93c79ea00542003a3f9 | [] | no_license | anshumanairy/Sprint-Management | 36c54c03b66a0d02071a337e8217144a0b0c9578 | 0c4e8fe87ec4099253d894b7876f0b5b914a2652 | refs/heads/master | 2022-12-28T02:46:25.847713 | 2020-10-02T10:02:13 | 2020-10-02T10:02:13 | 195,804,893 | 0 | 0 | null | 2020-10-02T10:02:14 | 2019-07-08T12:11:27 | CSS | UTF-8 | Python | false | false | 341 | py | # Generated by Django 2.2 on 2019-08-21 10:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('arc', '0090_story_details_change_date'),
]
operations = [
migrations.RemoveField(
model_name='story_details',
name='change_date',
),
]
| [
"[email protected]"
] | |
9ae68445b993fd78a1465231bfcc101b7efb2783 | 3f888fab6f08ceda44949c7c583ceddcc41b4062 | /report_result.py | 3f86d4e510fb3a6cda320f5710f608dc81f4cc25 | [] | no_license | sisiwang027/Project_SBA_HackBright | cf0397da2eeb58162718c8904d2476dadd4c40bd | 38fb60bade59cc5192b08fb47a8530f153a5a7f6 | refs/heads/master | 2020-12-14T08:50:06.125734 | 2017-08-07T08:39:38 | 2017-08-07T08:39:38 | 95,503,248 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,515 | py | """Get the result of reports and return json"""
from flask import (Flask, render_template, redirect, request, flash,
session, jsonify, url_for, send_from_directory)
from model import (Gender, User, Customer, Category, CategoryAttribute, CategoryDetail,
Product, CategoryDetailValue, ProductDetail, Sale, Purchase)
from model import connect_to_db, db, app
from datetime import datetime
from sqlalchemy.sql.functions import coalesce
from dateutil.relativedelta import relativedelta
def sql_to_linechartejson(sqlalchemy_list, chart_title):
"""Change the result of sqlalchemy to linecharte json"""
data_dict = {}
data_dict["labels"] = []
data_dict["datasets"] = []
cg_qty_dic = {}
color_list = ["#ffb366", "rgba(102,204,255,1)", "#66ff66", "#99b3e6"]
clor_choose = 0
for time_at, cg_name, qty in sqlalchemy_list:
time_at = str(int(time_at))
cg_name = str(cg_name)
if time_at not in data_dict["labels"]:
data_dict["labels"].append(time_at)
cg_qty_dic.setdefault(cg_name, []).append(qty)
for cg_name in cg_qty_dic:
data_set = {"fill": True,
"lineTension": 0.5,
"backgroundColor": "rgba(220,220,220,0.2)",
"borderCapStyle": 'butt',
"borderDash": [],
"borderDashOffset": 0.0,
"borderJoinStyle": 'miter',
"pointBorderColor": "rgba(220,220,220,1)",
"pointBackgroundColor": "#fff",
"pointBorderWidth": 1,
"pointHoverRadius": 5,
"pointHoverBackgroundColor": "#fff",
"pointHoverBorderColor": "rgba(220,220,220,1)",
"pointHoverBorderWidth": 2,
"pointRadius": 3,
"pointHitRadius": 10,
"spanGaps": False}
data_set["label"] = cg_name
data_set["data"] = cg_qty_dic[cg_name]
data_set["borderColor"] = color_list[clor_choose]
clor_choose += 1
data_dict["datasets"].append(data_set)
options = {"title": {"display": True, "text": chart_title}, "responsive": True}
data_chart = {"type": "line", "options": options, "data": data_dict}
return data_chart
def show_sal_qtychart_json(user_id, month_num, attr_list):
"""show sale quantities chart data as a json"""
firstday_month = "01{}{}".format(str(datetime.now().month), str(datetime.now().year))
set_date = datetime.strptime(firstday_month, "%d%m%Y").date() - relativedelta(months=month_num-1)
sale = db.session.query(db.func.date_part('year', Sale.transc_at).label("year_at"), db.func.date_part('month', Sale.transc_at).label("month_at"), Sale.prd_id, db.func.sum(Sale.transc_price * Sale.quantities).label("revenue"), db.func.sum(Sale.quantities).label("sale_qty")).filter(Sale.transc_at >= set_date).group_by(db.func.date_part('year', Sale.transc_at).label("year_at"), db.func.date_part('month', Sale.transc_at).label("month_at"), Sale.prd_id).subquery()
purch_cost = db.session.query(Purchase.prd_id, (db.func.sum(Purchase.purchase_price * Purchase.quantities) / db.func.sum(Purchase.quantities)).label("avg_purch_cost")).group_by(Purchase.prd_id).subquery()
prod = db.session.query(Product.prd_id, Product.cg_id, Category.cg_name).join(Category).join(Product.prddetail).filter(CategoryDetailValue.attr_val.in_(attr_list), Product.user_id == user_id).group_by(Product.prd_id, Product.cg_id, Category.cg_name).subquery()
sale_qty_sum = db.session.query((sale.c.year_at * 100 + sale.c.month_at).label("sale_at"),\
prod.c.cg_name,\
db.func.sum(db.func.round(sale.c.sale_qty)).label("sale_qty"))\
.join(purch_cost, sale.c.prd_id == purch_cost.c.prd_id)\
.join(prod, sale.c.prd_id == prod.c.prd_id)\
.group_by((sale.c.year_at * 100 + sale.c.month_at).label("sale_at"),\
prod.c.cg_name).order_by((sale.c.year_at * 100 + sale.c.month_at).label("sale_at"), prod.c.cg_name)\
.all()
return sql_to_linechartejson(sale_qty_sum, "Qty Chart")
def show_sal_revenuechart_json(user_id, month_num, attr_list):
"""show sale revenue chart data as a json"""
firstday_month = "01{}{}".format(str(datetime.now().month), str(datetime.now().year))
set_date = datetime.strptime(firstday_month, "%d%m%Y").date() - relativedelta(months=month_num-1)
sale = db.session.query(db.func.date_part('year', Sale.transc_at).label("year_at"),\
db.func.date_part('month', Sale.transc_at).label("month_at"), Sale.prd_id,\
db.func.sum(Sale.transc_price * Sale.quantities).label("revenue"),\
db.func.sum(Sale.quantities).label("sale_qty")).filter(Sale.transc_at >= set_date)\
.group_by(db.func.date_part('year', Sale.transc_at).label("year_at"), db.func.date_part('month', Sale.transc_at).label("month_at"), Sale.prd_id).subquery()
purch_cost = db.session.query(Purchase.prd_id,\
(db.func.sum(Purchase.purchase_price * Purchase.quantities) / db.func.sum(Purchase.quantities)).label("avg_purch_cost"))\
.group_by(Purchase.prd_id).subquery()
prod = db.session.query(Product.prd_id, Product.cg_id, Category.cg_name)\
.join(Category).join(Product.prddetail)\
.filter(CategoryDetailValue.attr_val.in_(attr_list), Product.user_id == user_id)\
.group_by(Product.prd_id, Product.cg_id, Category.cg_name).subquery()
sale_revenue_sum = db.session.query((sale.c.year_at * 100 + sale.c.month_at).label("sale_at"),\
prod.c.cg_name,\
db.func.sum(sale.c.revenue).label("revenue"))\
.join(purch_cost, sale.c.prd_id == purch_cost.c.prd_id)\
.join(prod, sale.c.prd_id == prod.c.prd_id)\
.group_by((sale.c.year_at * 100 + sale.c.month_at).label("sale_at"),\
prod.c.cg_name).order_by((sale.c.year_at * 100 + sale.c.month_at).label("sale_at"), prod.c.cg_name)\
.all()
return sql_to_linechartejson(sale_revenue_sum, "Revenue Chart")
def show_sal_profitchart_json(user_id, month_num, attr_list):
"""show sale profit chart data as a json"""
firstday_month = "01{}{}".format(str(datetime.now().month), str(datetime.now().year))
set_date = datetime.strptime(firstday_month, "%d%m%Y").date() - relativedelta(months=month_num-1)
sale = db.session.query(db.func.date_part('year', Sale.transc_at).label("year_at"),\
db.func.date_part('month', Sale.transc_at).label("month_at"), Sale.prd_id,\
db.func.sum(Sale.transc_price * Sale.quantities).label("revenue"),\
db.func.sum(Sale.quantities).label("sale_qty")).filter(Sale.transc_at >= set_date)\
.group_by(db.func.date_part('year', Sale.transc_at).label("year_at"), db.func.date_part('month', Sale.transc_at).label("month_at"), Sale.prd_id).subquery()
purch_cost = db.session.query(Purchase.prd_id,\
(db.func.sum(Purchase.purchase_price * Purchase.quantities) / db.func.sum(Purchase.quantities)).label("avg_purch_cost"))\
.group_by(Purchase.prd_id).subquery()
prod = db.session.query(Product.prd_id, Product.cg_id, Category.cg_name)\
.join(Category).join(Product.prddetail)\
.filter(CategoryDetailValue.attr_val.in_(attr_list), Product.user_id == user_id)\
.group_by(Product.prd_id, Product.cg_id, Category.cg_name).subquery()
sale_profit_sum = db.session.query((sale.c.year_at * 100 + sale.c.month_at).label("sale_at"),\
prod.c.cg_name,\
db.func.sum(sale.c.revenue - purch_cost.c.avg_purch_cost * sale.c.sale_qty).label("profit"))\
.join(purch_cost, sale.c.prd_id == purch_cost.c.prd_id)\
.join(prod, sale.c.prd_id == prod.c.prd_id)\
.group_by((sale.c.year_at * 100 + sale.c.month_at).label("sale_at"),\
prod.c.cg_name).order_by((sale.c.year_at * 100 + sale.c.month_at).label("sale_at"), prod.c.cg_name)\
.all()
return sql_to_linechartejson(sale_profit_sum, "Profit Chart")
def sale_sum_report(user_id, attr_list, month_num):
"""Return data of Sale Sum Report."""
result = {}
firstday_month = "01{}{}".format(str(datetime.now().month), str(datetime.now().year))
set_date = datetime.strptime(firstday_month, "%d%m%Y").date() - relativedelta(months=month_num-1)
sale = db.session.query(db.func.date_part('year', Sale.transc_at).label("year_at"), db.func.date_part('month', Sale.transc_at).label("month_at"), Sale.prd_id, db.func.sum(Sale.transc_price * Sale.quantities).label("revenue"), db.func.sum(Sale.quantities).label("sale_qty")).filter(Sale.transc_at >= set_date).group_by(db.func.date_part('year', Sale.transc_at).label("year_at"), db.func.date_part('month', Sale.transc_at).label("month_at"), Sale.prd_id).subquery()
purch_cost = db.session.query(Purchase.prd_id, (db.func.sum(Purchase.purchase_price * Purchase.quantities) / db.func.sum(Purchase.quantities)).label("avg_purch_cost")).group_by(Purchase.prd_id).subquery()
prod = db.session.query(Product.prd_id, Product.cg_id, Category.cg_name).join(Category).join(Product.prddetail).filter(CategoryDetailValue.attr_val.in_(attr_list), Product.user_id == user_id).group_by(Product.prd_id, Product.cg_id, Category.cg_name).subquery()
sale_sum = db.session.query((sale.c.year_at * 100 + sale.c.month_at).label("sale_at"), prod.c.cg_name, db.func.sum(db.func.round(sale.c.sale_qty)).label("sale_qty"), db.func.sum(sale.c.revenue).label("revenue"), db.func.sum(sale.c.revenue - purch_cost.c.avg_purch_cost * sale.c.sale_qty).label("profit")).join(purch_cost, sale.c.prd_id == purch_cost.c.prd_id).join(prod, sale.c.prd_id == prod.c.prd_id).group_by((sale.c.year_at * 100 + sale.c.month_at).label("sale_at"), prod.c.cg_name).order_by((sale.c.year_at * 100 + sale.c.month_at).label("sale_at"), prod.c.cg_name)
column_name = [column["name"] for column in sale_sum.column_descriptions]
result["result"] = [dict(zip(column_name, data)) for data in sale_sum]
return result
def prod_sum_report(user_id, attr_list, month_num):
"""Return data of Product Sum."""
result = {}
firstday_month = month_num.replace('-', '') + "01"
set_date = datetime.strptime(firstday_month, "%Y%m%d").date() + relativedelta(months=1)
purch = db.session.query(Purchase.prd_id,
db.func.round(db.func.sum(coalesce(Purchase.quantities, 0))).label("purch_qty"),
db.func.sum(coalesce(db.func.round(Purchase.quantities) * Purchase.purchase_price, 0)).label("purch_price_sum"))\
.filter(Purchase.purchase_at < set_date)\
.group_by(Purchase.prd_id).subquery()
sale = db.session.query(Sale.prd_id,
db.func.round(db.func.sum(coalesce(Sale.quantities, 0))).label("sale_qty"),
db.func.sum(coalesce(db.func.round(Sale.quantities) * Sale.transc_price, 0)).label("sale_price_sum"))\
.filter(Sale.transc_at < set_date)\
.group_by(Sale.prd_id).subquery()
prod = db.session.query(Product.prd_id,
Product.cg_id, Category.cg_name)\
.join(Category).join(Product.prddetail)\
.filter(CategoryDetailValue.attr_val.in_(attr_list), Product.user_id == user_id)\
.group_by(Product.prd_id, Product.cg_id, Category.cg_name).subquery()
product_sum = db.session.query(prod.c.cg_name,
db.func.count(prod.c.prd_id).label("prod_num"),
db.func.sum(purch.c.purch_qty).label("purch_qty_sum"),
db.func.sum(purch.c.purch_price_sum).label("purch_price_sum"),
db.func.sum(purch.c.purch_qty - sale.c.sale_qty).label("purch_onhand_qty"),
db.func.sum(purch.c.purch_price_sum / purch.c.purch_qty * (purch.c.purch_qty - sale.c.sale_qty)).label("purch_onhand_cost"),
db.func.sum(sale.c.sale_qty).label("sale_qty"),
db.func.sum(sale.c.sale_price_sum).label("sale_price_sum"))\
.outerjoin(purch, prod.c.prd_id == purch.c.prd_id)\
.outerjoin(sale, prod.c.prd_id == sale.c.prd_id)\
.group_by(prod.c.cg_name)
column_name = [column["name"] for column in product_sum.column_descriptions]
result["result"] = [dict(zip(column_name, data)) for data in product_sum]
return result
def sql_to_pichartejson(sqlalchemy_list, chart_title):
"""Change the result of sqlalchemy to linecharte json"""
qty_date = list(sqlalchemy_list[0])
data_dict = {"labels": ["Sale Qty", "On-hand Qty"],
"datasets": [{"data": qty_date,
"backgroundColor": ["#FF6384", "#36A2EB"],
"hoverBackgroundColor": ["#FF6384", "#36A2EB"]}]}
options = {"title": {"display": True, "text": chart_title}, "responsive": True}
data_chart = {"type": "doughnut", "options": options, "data": data_dict}
return data_chart
def show_prodchart_json(user_id, month_num, attr_list):
"""show sale profit chart data as a json"""
firstday_month = month_num.replace('-', '') + "01"
set_date = datetime.strptime(firstday_month, "%Y%m%d").date() + relativedelta(months=1)
purch = db.session.query(Purchase.prd_id,
db.func.round(db.func.sum(coalesce(Purchase.quantities, 0))).label("purch_qty"),
db.func.sum(coalesce(db.func.round(Purchase.quantities) * Purchase.purchase_price, 0)).label("purch_price_sum"))\
.filter(Purchase.purchase_at < set_date)\
.group_by(Purchase.prd_id).subquery()
sale = db.session.query(Sale.prd_id,
db.func.round(db.func.sum(coalesce(Sale.quantities, 0))).label("sale_qty"),
db.func.sum(coalesce(db.func.round(Sale.quantities) * Sale.transc_price, 0)).label("sale_price_sum"))\
.filter(Sale.transc_at < set_date)\
.group_by(Sale.prd_id).subquery()
# prod = db.session.query(Product.prd_id,
# Product.cg_id, Category.cg_name)\
# .join(Category).join(Product.prddetail)\
# .filter(CategoryDetailValue.attr_val.in_(attr_list), Product.user_id == user_id)\
# .group_by(Product.prd_id, Product.cg_id, Category.cg_name).subquery()
product_sum = db.session.query(db.func.sum(sale.c.sale_qty).label("sale_qty_sum"),
db.func.sum(purch.c.purch_qty - sale.c.sale_qty).label("purch_onhand_qty"))\
.join(purch, sale.c.prd_id == purch.c.prd_id).all()
return sql_to_pichartejson(product_sum, "Sales Chart")
def sql_to_barchartejson(sqlalchemy_list, chart_title):
"""Change the result of sqlalchemy to linecharte json"""
prod_list = []
sale_list = []
for prod_name, sale_qty in sqlalchemy_list:
prod_list.append(prod_name)
sale_list.append(sale_qty)
data_dict = {"labels": prod_list,
"datasets": [{"data": sale_list,
"backgroundColor": ["#FF6384", "#36A2EB", "#36A2EB", "#36A2EB", "#36A2EB",
"#36A2EB", "#36A2EB", "#36A2EB", "#36A2EB", "#36A2EB"]}]}
options = {"title": {"display": True, "text": chart_title}, "legend": {"display": False}}
data_chart = {"type": "bar", "options": options, "data": data_dict}
return data_chart
def show_top10_prod_json(user_id, month_num, attr_list):
"""Show Top 10 products chart."""
firstday_month = month_num.replace('-', '') + "01"
set_date = datetime.strptime(firstday_month, "%Y%m%d").date() + relativedelta(months=1)
top10_prod = db.session.query(Product.prd_name,
db.func.sum(db.func.round(Sale.quantities)).label("sale_qty"))\
.filter(Sale.transc_at < set_date)\
.join(Sale).group_by(Product.prd_name)\
.order_by(db.func.sum(db.func.round(Sale.quantities)).label("sale_qty").desc())\
.limit(10).all()
return sql_to_barchartejson(top10_prod, "Top Ten Products")
def sql_to_cust_barchartejson(sqlalchemy_list, chart_title):
"""Change the result of sqlalchemy to barcharte json"""
distrib_name = []
cust_num = []
for distri_name, num in sqlalchemy_list:
distrib_name.append(distri_name)
cust_num.append(num)
data_dict = {"labels": distrib_name,
"datasets": [{"data": cust_num,
"backgroundColor": ["#36A2EB", "#FF6384", "#36A2EB", "#36A2EB"]}]}
options = {"title": {"display": True, "text": chart_title}, "legend": {"display": False}}
data_chart = {"type": "bar", "options": options, "data": data_dict}
return data_chart
def show_cust_age_json(user_id):
"""Show customer age distribution chart."""
sql = "select birth, count(*) num from ( select case when date_part('year',age(birth_date)) < 20 then 'age:0-20' when date_part('year',age(birth_date)) between 20 and 30 then 'age:21-30' when date_part('year',age(birth_date)) between 30 and 40 then 'age:31-40' when date_part('year',age(birth_date)) > 40 then 'age:41 and up' end birth from customers where user_id = 1 ) a group by birth order by 1"
cursor = db.session.execute(sql)
result = cursor.fetchall()
return sql_to_cust_barchartejson(result, "Customers Age Distribution")
if __name__ == "__main__":
print "Don't run this file directly."
| [
"[email protected]"
] | |
95592d26dae1df0cb62329b8a85140998af39521 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-2/7c65ad11e2914bc9774abd37cdd1ac455f1c9433-<list_all>-fix.py | 14a5a1ef5bf50d8cde4ae3f6831042b8b6ceec9b | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py |
def list_all(self):
self.log('List all items')
try:
response = self.storage_client.storage_accounts.list()
except Exception as exc:
self.fail('Error listing all items - {0}'.format(str(exc)))
return response
| [
"[email protected]"
] | |
58607e561ba847b72406335fbca478f269ddb28c | 3ec008209548feba572f95629747abdebe0558a3 | /housing prediction/env/bin/jupyter | 37d2a6756a7e2057599c8598b75d53af3855bfea | [] | no_license | KaTaiHo/Continious_Learning | 9937b5d14067b13608ad731e5345043caacaca96 | 879279f9a51038dc3d7b64c28b1e13880f23ac8a | refs/heads/master | 2022-11-20T21:23:20.680437 | 2018-08-24T06:15:16 | 2018-08-24T06:15:16 | 99,074,529 | 0 | 1 | null | 2022-10-21T10:47:01 | 2017-08-02T05:11:53 | Python | UTF-8 | Python | false | false | 263 | #!/Users/KaTaiHo/Desktop/GitHub/learning/env/bin/python3.5
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_core.command import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
1980f57a7df7a9544b8dbf8bacd0c53c350a1143 | 559995c23c13f67ee6f342389d0db81081207d87 | /prjforinfcreditvilfw/estimation/postprocess/jsoncsv/gen_top_estimates_df.py | db5348dfd2d78e63f2c2dd095ba7aee8b7cbbcb7 | [] | no_license | MacroFinanceHub/PrjForInfCreditVilFW | 06a6c475d0c846c1578205e062acb0190bcce1c2 | d2a863656962691f8dc13d205a82c81823040c8b | refs/heads/main | 2023-07-19T05:31:15.992847 | 2021-08-30T14:44:14 | 2021-08-30T14:44:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,516 | py | '''
Created on Aug 30, 2018
@author: fan
'''
import os.path
import logging
import pandas as pd
# import pandas.io.readexport as readexport
import pyfan.panda.inout.readexport as readexport
import estimation.moments.momcomp as momcomp
import projectsupport.hardcode.string_shared as hardstring
logger = logging.getLogger(__name__)
def top_estimates_df(panda_df, top_estimates_keep_count):
"""Merge top results moments and data together
Find Top estimates, combine model with data, and save
Examples
--------
import estimation.postprocess.jsoncsv.gen_top_estimates_df as top_estimate
top_esti_df = top_estimate.top_estimates_df(panda_df, top_estimates_keep_count)
"""
moment_csv_strs = hardstring.moment_csv_strs()
moment_pd_cate_vars = hardstring.moment_csv_strs_cates()
top_objective = moment_csv_strs['main_allperiods_obj'][1]
sort_by_col = top_objective
group_by_col = moment_pd_cate_vars['period_dictkey']['colname']
unique_periodkeys = list(panda_df[moment_pd_cate_vars['period_dictkey']['colname']].unique())
for ctr, period_key_str_val in enumerate(unique_periodkeys):
all_esti_df_cur = panda_df[panda_df[group_by_col] == period_key_str_val]
all_esti_df_cur = all_esti_df_cur.sort_values(sort_by_col).head(top_estimates_keep_count)
if (ctr == 0):
top_esti_df = all_esti_df_cur
else:
# drop = true to delete the current index (2020-11-14 21:10)
top_esti_df = pd.concat([top_esti_df, all_esti_df_cur], axis=0).reset_index(drop=True)
# Previously drop=FALSE by default, created index column, was deleted, new code below is absolete
if 'index' in top_esti_df:
del top_esti_df['index']
return top_esti_df
def top_results_merge_moments_data(combo_type_list,
esti_specs,
all_esti_df,
top_estimates_keep_count=4,
search_directory='',
save_file_name='',
save_file_name_regress='',
multiperiod=True,
save_panda_top=True,
return_panda_top=False,
exo_or_endo_graph_row_select='_exo_wgtJ'):
"""Merge top results moments and data together
Find Top estimates, combine model with data, and save to CSV
Parameters
----------
return_panda_top: boolean
return existing, do not save new or find new if already has file
Examples
--------
import estimation.postprocess.jsoncsv.gen_top_estimates_df as top_estimate
"""
'''
0. File Full Directory + Name
'''
save_directory = search_directory
if (save_file_name.endswith('.csv')):
file_name = save_file_name
else:
file_name = save_file_name + '.csv'
save_directory_file_name = save_directory + file_name
file_exists = False
if (os.path.isfile(save_directory_file_name)):
file_exists = True
if (file_exists and return_panda_top):
top_esti_df = readexport.read_csv(save_directory_file_name)
else:
'''
1. Top Estimate and Moments
'''
moment_pd_cate_vars = hardstring.moment_csv_strs_cates()
moment_csv_strs = hardstring.moment_csv_strs()
'''
1b. CAN NOT have other top_objective, CAN ONLY HAVE the main_allperiods_obj
other objectives are period specific, not common to all periods.
'''
top_objective = moment_csv_strs['main_allperiods_obj'][1]
'''
unique period keys
'''
unique_periodkeys = list(all_esti_df[moment_pd_cate_vars['period_dictkey']['colname']].unique())
unique_periodkeys = [x for x in unique_periodkeys if str(x) != 'nan']
if (multiperiod):
esti_obj_main_obj = [top_objective,
moment_csv_strs['period_dictkey'][1]]
'''
1. separate to sub-groups
2. each subgroup select top
3. combine back
'''
top_esti_df = top_estimates_df(all_esti_df, top_estimates_keep_count)
else:
esti_obj_main_obj = [moment_csv_strs['main_obj'][1]]
all_esti_df = all_esti_df.sort_values(esti_obj_main_obj, ascending=True)
'''
2. Top estimates
'''
top_esti_df = all_esti_df.iloc[0:top_estimates_keep_count * 2]
'''
3. add string variable for model or data, all model simulation results are model
'''
data_model_col = moment_pd_cate_vars['data_model']['colname']
data_model_col_model_cate = moment_pd_cate_vars['data_model']['cates']['model'][0]
data_model_col_data_cate = moment_pd_cate_vars['data_model']['cates']['data'][0]
top_esti_df[data_model_col] = data_model_col_model_cate
'''
Unique Period Keys In current Results
'''
moments_type = esti_specs['moments_type']
momsets_type = esti_specs['momsets_type']
moments_data, __, __ = momcomp.get_moments_momsets(moments_type, momsets_type)
periods_keys = hardstring.region_time_dict(True)
for period_key_str_val in unique_periodkeys:
period_moments_data_dict = moments_data[period_key_str_val]
'''
Add to CSV
'''
period_moments_data_dict[moment_csv_strs['period_dictkey'][0]] = period_key_str_val
period_moments_data_dict[data_model_col] = data_model_col_data_cate
df_period_moments_data = pd.DataFrame([period_moments_data_dict],
columns=period_moments_data_dict.keys())
top_esti_df = pd.concat([top_esti_df, df_period_moments_data], axis=0).reset_index(drop=True)
'''
4. Re-order column names
'''
steady_agg_suffixes = hardstring.steady_aggregate_suffixes()
moment_key_list = list(period_moments_data_dict.keys())
moment_key_list_wth_var = []
for mom_key in moment_key_list:
moment_key_list_wth_var.append(mom_key)
moment_key_list_wth_var.append(mom_key + steady_agg_suffixes['_var'][0])
'''
4b. include priority columns that are also in all_cols, this is for variance, earlier calculation did nothave variance
also deletes vars from the data/model key var as well as date var
'''
all_cols = list(top_esti_df.columns.values)
priority_cols = [top_objective] + moment_key_list_wth_var
priority_cols_include = [col_priority for col_priority in priority_cols if (col_priority in all_cols)]
non_priority_cols = [col for col in all_cols if (col not in priority_cols_include)]
resorted_cols = priority_cols_include + non_priority_cols
top_esti_df = top_esti_df[resorted_cols]
'''
4c. Sort by time, data vs model, by objective
'''
sort_cols = [moment_pd_cate_vars['data_model']['colname'], moment_csv_strs['main_obj'][1]]
if (multiperiod):
sort_cols = [moment_csv_strs['period_dictkey'][0],
moment_pd_cate_vars['data_model']['colname'],
top_objective]
top_esti_df = top_esti_df.sort_values(by=sort_cols, ascending=True)
'''
5. Save Results in single file under main folder
'''
if (save_panda_top):
save_directory = search_directory
if (save_file_name.endswith('.csv')):
file_name = save_file_name
else:
file_name = save_file_name + '.csv'
top_esti_df.to_csv(save_directory + file_name, header=True, index=False)
'''
6. Top with only estimation parameters
'''
save_panda_top_regress = False
if (save_panda_top_regress):
'''
if save panda_top, save a separate file where we have estimation objective
as well as all parameters that were randomly drawn, that were allowed to be different
- keep only one time period, because parameters and objectives are the same
'''
regress_use_cols = [moment_csv_strs['main_allperiods_obj'][1],
moment_csv_strs['agg_prob_obj'][1],
moment_csv_strs['BI_obj'][1]] + combo_type_list[0][2]
'''
select only one time period rows
all_esti_df only has model rows, top has data rows as well
unique_periodkeys[0] or unique_periodkeys[1] should give identical results
'''
all_esti_df_modelrows_oneperiod = all_esti_df[
all_esti_df[moment_csv_strs['period_dictkey'][0]] == unique_periodkeys[0]]
'''
regression table
'''
top_esti_df_regress = all_esti_df_modelrows_oneperiod[regress_use_cols]
save_directory = search_directory
if (save_file_name_regress.endswith('.csv')):
file_name = save_file_name_regress
else:
file_name = save_file_name_regress + '.csv'
top_esti_df_regress.to_csv(save_directory + file_name, header=True, index=False)
return top_esti_df | [
"[email protected]"
] | |
369727caa9b1f274cb3338d70531988c54568528 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_098/ch86_2020_06_21_19_53_15_222963.py | 1fcf5f95c6e8c012df1e1136e94771e9acbee12f | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | import csv
with open('dados.csv','r') as csv_file, open('dados.tsv', 'w') as tsv_file:
csv_file,tsv_file = csv.reader(csv_file), csv.writer(tsv_file, delimiter='\t')
for linha in csv_file:
tsv_file.writerow(linha) | [
"[email protected]"
] | |
0fc50b674c41b43ef786650cbc2d22bfbb195881 | d14a63d9a08be6040baf51d4f4a6ec845fd9cb97 | /admit/util/filter/Filter1D.py | 5c1d28090f94e669ff599b1036d628719d97bafd | [
"MIT"
] | permissive | teuben/admit | abf982620f1c58a770a24f9c28165f4adf3f1d0c | 1cae54d1937c9af3f719102838df716e7e6d655c | refs/heads/master | 2020-04-12T00:49:54.196990 | 2016-11-09T20:42:13 | 2016-11-09T20:42:13 | 73,388,055 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,793 | py | """ .. _filter1D:
Filter1D --- 1-dimensional spectral filtering.
----------------------------------------------
This module defines the 1D filter methods.
"""
import numpy as np
import math
from copy import deepcopy
from collections import OrderedDict
class Filter1D(object):
""" This class defines and runs 1D spectral filters. The currently available
filters are Gaussian, Hanning, Triangle, Welch, Boxcar, and Savitzky
Golay. The output spectrum will be of the same length as the input
spectrum, however some edge channels may be zeroed by some methods,
depending on the input paramters.
Parameters
----------
spec : numpy array
1D numpy array of the input spectrum (just the amplitudes).
method : str
The smoothing filter to apply: boxcar, gaussian, welch, hanning,
triangle, or savgol.
No default. Minimum matching is enabled with a minimum of 3
characters, i.e. box = boxcar.
keyval : various
Any keyword value pairs for the specific method chosen, see the
notes for specific keywords.
Attributes
----------
spec : numpy array
The spectrum.
len : int
The length of the spectrum.
methods : list
A list of the available filters.
[method]_args : dict
A dictionary for each method giving its keywords and defaults
(e.g. boxcar_args).
method : str
The method being used.
Notes
-----
Details of the different filter keywords and defaults:
.. tabularcolumns:: |p{1.5cm}|p{2cm}|p{0.5cm}|p{8cm}|
+------------+---------------+------+----------------------------------------------+
| Filter | Keyword | Def. | Description |
+============+===============+======+==============================================+
| "boxcar" | "width" | 3 | Number of channels to average together |
+------------+---------------+------+----------------------------------------------+
| "gaussian" | "width" | 7 | Number of channels to span with the gaussian |
+------------+---------------+------+----------------------------------------------+
| "hanning" | "width" | 5 | Number of channels to include in the cos |
+------------+---------------+------+----------------------------------------------+
| "triangle" | "width" | 5 | Number of channels to span with the triangle |
+------------+---------------+------+----------------------------------------------+
| "welch" | "width" | 5 | Number of channels to use in the function |
+------------+---------------+------+----------------------------------------------+
| "savgol" | "window_size" | 7 | Number of channels to use in the calculation |
+------------+---------------+------+----------------------------------------------+
| | "order" | 3 | Order of the poynomial fit (must be odd) |
+------------+---------------+------+----------------------------------------------+
| | "deriv" | 0 | The number of the derivative to compute |
| | | | (0 = just smooth) |
+------------+---------------+------+----------------------------------------------+
"""
boxcar_args = OrderedDict([("width", 3)])
gaussian_args = OrderedDict([("width", 7)])
welch_args = OrderedDict([("width", 5)])
hanning_args = OrderedDict([("width", 5)])
triangle_args = OrderedDict([("width", 5)])
savgol_args = OrderedDict([("window_size", 7),
("order" , 3),
("deriv" , 0),
("rate" , 1)])
methods = ["boxcar",
"gaussian",
"welch",
"hanning",
"triangle",
"savgol"]
def __init__(self, spec, method, **keyval):
if len(spec.shape) > 1:
raise Exception("Spectrum is not 1D but you are trying to use a 1D filter.")
self.spec = spec
self.len = self.spec.shape[0]
# keywords for the different algorithms
self.method = self.checkmethod(method)
for k, v in keyval.iteritems():
try:
a = getattr(self, method + "_args")[k]
except:
raise Exception("Unknown input %s for smoothing." % (k))
if type(a) != type(v):
raise Exception("Cannot change the type of an attribute. %s must be a %s not a %s." % (k, type(a), type(v)))
getattr(self, method + "_args")[k] = v
def isodd(self, value):
""" Method to determine if a number is odd
Parameters
----------
value : int
The number to check
Returns
-------
bool, True if the number is odd, False if it is even
"""
return value%2 == 1
def checkmethod(self, method):
""" Method to interpret the input method and determine the full method
name
Parameters
----------
method : str
The method to use, minimal matching is possible, with a minimum
of 3 characters (e.g. "box" will be interpreted to be "boxcar")
Returns
-------
None
"""
if len(method) < 3:
raise Exception("Minimum of 3 characters are needed for minimal matching of strings.")
for m in self.methods:
if m.startswith(method):
return m
raise Exception("Unknown method %s given for smoothing. Available methods are: %s" % (method, str(self.methods)))
def buffer(self, nchan):
""" Method to buffer/pad an array so that filters can work all the way
to the edge. Uses np.pad with mode='reflect'
Parameters
----------
nchan : int
The number of channels to add to each end of the array
Returns
-------
Numpy array containing the buffered input array
"""
return np.pad(self.spec, (nchan, ), mode='reflect')
def boxcar(self, width):
r""" Method to apply a boxcar filter to a spectrum. The filter for point
x[i] is defined as:
.. math::
x[i] = \frac{1}{N} \sum_{n=0}^{N} x[i + n - \frac{N - 1}{2}]
where N is the width of the filter.
Parameters
----------
width : int
The width of the box to use in channels, must be odd
Returns
-------
numpy array
The smoothed spectrum, (width - 1)/2 edge channels will be
zeroed
"""
if not self.isodd(width):
raise Exception("Boxcar width must be an odd number.")
side = (width - 1) / 2
kernel = np.array([1.0] * width)
kernel /= kernel.sum()
return np.convolve(self.buffer(side), kernel, mode="valid")
def gaussian(self, width):
r""" Method to apply a Gaussian filter to a spectrum. The filter for
point x[i] is defined as:
.. math::
x[i] = \sum_{n=0}^{N} x[i + n - \frac{N - 1}{2}] e^{-\frac{1}{2}\left(\frac{n-(N-1)/2}{\sigma(N-1)/2}\right)^2}
where N is the width of the filter.
Parameters
----------
width : int
The number of channels to span with the gaussian for each
iteration, must be odd
Returns
-------
numpy array
The smoothed spectrum, (width - 1)/2 edge channels will be zeroed
"""
if not self.isodd(width):
raise Exception("Gaussian width must be an odd number.")
side = (width - 1) / 2
kernel = np.zeros(width)
for j in range(width):
kernel[j] = math.exp(-0.5 * pow(((float(j) - ((float(width) - 1.0) /
2.0)) / (0.2 * (float(width) - 1.0) / 2.0)), 2))
kernel /= kernel.sum()
return np.convolve(self.buffer(side), kernel, mode="valid")
def welch(self, width):
r""" Method to apply a Welch filter to a spectrum. The filter for point x[i]
is defined as:
.. math::
x[i] = \sum_{n=0}^{N} x[i + n - \frac{N - 1}{2}] \left(1 - \left(\frac{n - \frac{N-1}{2}}{\frac{N-1}{2}}\right)^2\right)
where N is the width of the filter.
Parameters
----------
width : int
The number of channels to span with the function for each
iteration, must be odd
Returns
-------
numpy array
The smoothed spectrum, (width - 1)/2 edge channels will be zeroed
"""
if not self.isodd(width):
raise Exception("Welch width must be an odd number.")
width += 2 # must add 2 to get the proper width
side = (width - 1) / 2
kernel = np.zeros(width)
for j in range(width):
kernel[j] = (1 - math.pow((j - (float(width - 1) / 2.0)) /
(float(width - 1) / 2.0), 2))
kernel /= kernel.sum()
return np.convolve(self.buffer(side), kernel, mode="valid")
def hanning(self, width):
r""" Method to apply a Hanning filter to a spectrum. The filter for
point x[i] is defined as:
.. math::
x[i] = \sum_{n=0}^{N} x[i + n - \frac{N - 1}{2}] 0.5 \left(1 - \cos\left(\frac{2\pi n}{N-1}\right)\right)
where N is the width of the filter.
Parameters
----------
width : int
The number of channels to span with the function for each
iteration, must be odd
Returns
-------
numpy array
The smoothed spectrum, (width - 1)/2 edge channels will be zeroed
"""
if not self.isodd(width):
raise Exception("Hanning width must be an odd number.")
width += 2 # must add 2 to get the proper width
side = (width - 1) / 2
kernel = np.zeros(width)
for j in range(width):
kernel[j] = 0.5 * (1.0 - math.cos((2.0 * math.pi * j) / float(width - 1)))
kernel /= kernel.sum()
return np.convolve(self.buffer(side), kernel, mode="valid")
def triangle(self, width):
r""" Method to apply a Triangular filter to a spectrum. The filter for
point x[i] is defined as:
.. math::
x[i] = \sum_{n=0}^{N} x[i + n - \frac{N - 1}{2}] \left(1 - \left|\frac{n-\frac{N-1}{2}}{\frac{N}{2}}\right|\right)
where N is the width of the filter.
Parameters
----------
width : int
The number of channels to span with the function for each
iteration, must be odd
Returns
-------
numpy array
The smoothed spectrum, (width - 1)/2 edge channels will be zeroed
"""
if not self.isodd(width):
raise Exception("Triangle width must be an odd number.")
side = (width - 1) / 2
kernel = np.zeros(width)
for j in range(width):
kernel[j] = (1 - abs((j - (float(width - 1) / 2.0)) /
(float(width) / 2.0)))
kernel /= kernel.sum()
return np.convolve(self.buffer(side), kernel, mode="valid")
def savgol(self, window_size, order, deriv=0, rate=1):
""" Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques. Adapted from
http://wiki.scipy.org/Cookbook/SavitzkyGolay
Parameters
----------
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only
smoothing)
Returns
-------
ndarray
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
if not self.isodd(width):
raise Exception("Savgol window_size must be an odd number.")
y = deepcopy(self.spec)
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order + 1)
half_window = (window_size - 1) // 2
# precompute coefficients
b = np.mat([[k ** i for i in order_range] for k in range(-half_window,
half_window + 1)])
m = np.linalg.pinv(b).A[deriv] * rate ** deriv * math.factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::-1], y, mode='valid')
@staticmethod
def convertargs(args):
""" Method to convert a tuple of arguments into a dictionary of arguments for the specified
method. The first item of the tuple must be the method name. The remaining items are the
arguments to the method in the order the method lists. To see which arguments a method
takes call getargs(method) or getargs() to list the arguments for all methods.
Parameters
----------
args : tuple
Tuple containing the method as the first item and any arguments for that method in
the order specified by the method.
Returns
-------
Dictionary containing the converted arguments.
"""
if len(args) == 0:
raise Exception("Smoothing method must be given.")
if args[0] not in Filter1D.methods:
raise Exception("The smoothing method %s is not known, it must be one of: %s" %
(args[0], str(Fiter1D.methods)))
keyval = deepcopy(getattr(Filter1D, args[0] + "_args"))
keys = keyval.keys()
for i, arg in enumerate(args):
if i == 0:
continue
keyval[keys[i - 1]] = arg
return dict(keyval)
def run(self):
""" Method to run the selected filter on the data
Parameters
----------
None
Returns
-------
The smoothed spectrum
"""
return getattr(self, self.method)(**getattr(self, self.method + "_args"))
def getargs(method=None):
""" Method to report the keywords and default values for smoothing algorithms
Parameters
----------
method : str
The name of the method to report the keywords and default values for. If no method is
given then all methods are reported on.
Default: None
Returns
-------
None
"""
if method is None:
print " arg Default"
for m in Filter1D.methods:
print m
for k, v in getattr(Filter1D, m + "_args").iteritems():
print " %s %s" % (k.ljust(14), str(v))
return
if method in Filter1D.methods:
print " arg Default"
for k, v in getattr(Filter1D, method + "_args").iteritems():
print " %s %s" % (k.ljust(14), str(v))
return
print "Method %s is not known. Available methods are: %s" % (method, Filter1D.methods)
| [
"[email protected]"
] | |
0a1229effa14b40210a1abe973f19d6d8e697ee6 | 384d31fe319844c171891f7453b73df84a77bdcc | /src/apps_base/order/constants.py | efbdbf9e0bf0fc6b7c9cad05af960452d6cb5749 | [] | no_license | danielhuamani/fanntop | 0227a1b5337a45b3b91ab16c614e206f10efc891 | 1adb65f617f1e418cad75588fa60af909c1e690a | refs/heads/master | 2021-03-22T00:46:51.355400 | 2018-10-06T05:46:07 | 2018-10-06T05:46:07 | 114,432,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | PENDIENTE = "PE"
RECHAZADO = "RC"
PAGADO = "PG"
CANCELADO = "CN"
DENEGADO = "DN"
PROCESO = 'PR_1'
PROCESO_2 = 'PR_2'
ORDER_VALIDATED = 'VAL'
ORDER_USED = 'USE'
REEMBOLSO = 'RE'
TYPE_STATUS = (
(PROCESO, "Pendiente"),
("RC", "Rechazado"),
("PG", "Pagado"),
# ("PE", "Pendiente"),
# (PROCESO_2, "Proceso Paso 2"),
("RE", "Reembolso"),
)
ALMACEN = 'AL'
TYPE_STATUS_SHIPPING = (
(ALMACEN, "En Almacén"),
("DS", "En Despacho"),
("EG", "Entregado"),
)
| [
"[email protected]"
] | |
cf39c48564e9b19e551240ce121a93cc7743fb4b | dbf4f74403dec9c5531118a858c7b208c43323d4 | /airflow/dags/lib/common.py | 1ce3c55bbcfb30bcba974a240da78b23c1a92130 | [] | no_license | LaoKpa/short_sale_volume | 03208c6a5830b61a8e98ba3854b0ada45ee2a666 | 02c2fb9f91ca94845768554074a5a3018e87b0fe | refs/heads/master | 2022-04-06T15:05:19.678439 | 2020-02-27T20:05:18 | 2020-02-27T20:05:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | from airflow.configuration import conf as airflow_config
import configparser
import json
import os
config = configparser.ConfigParser()
airflow_dir = os.path.split(airflow_config['core']['dags_folder'])[0]
config.read('{}/config.cfg'.format(airflow_dir))
CLUSTER_NAME = config['AWS']['CLUSTER_NAME']
VPC_ID = config['AWS']['VPC_ID']
SUBNET_ID = config['AWS']['SUBNET_ID']
if config['App']['STOCKS'] == '':
STOCKS = []
else:
STOCKS = json.loads(config.get('App', 'STOCKS').replace("'", '"'))
if config['App']['STOCK_LIMITS'] == '':
LIMIT = None
else:
LIMIT = int(config['App']['STOCK_LIMITS'])
| [
"[email protected]"
] | |
877b61c1bf6a0f9f65e65d4dddc3d75e1788ad23 | 3ff1c245d945acf82e48f388d2457204e202275f | /desafio/migrations/0022_atributos.py | d65eeb6862db78a6105419422e55ae646f1da42a | [] | no_license | rauldosS/desafio_compiladores | 075e7dcb3a167d20d71928727db6c1cb500e23af | da01adf41c47dafd50b1487bb4ad8d27c4f2d199 | refs/heads/main | 2023-01-03T09:13:18.990618 | 2020-10-29T01:25:59 | 2020-10-29T01:25:59 | 305,174,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | # Generated by Django 3.1.2 on 2020-10-28 23:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('desafio', '0021_auto_20201026_0732'),
]
operations = [
migrations.CreateModel(
name='Atributos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('atributo', models.CharField(max_length=255)),
],
options={
'verbose_name': 'Atributo',
'verbose_name_plural': 'Atributos',
'ordering': ('id', 'atributo'),
},
),
]
| [
"[email protected]"
] | |
d01c0cfc4e6c223bd56c8cba997a671ee074cc0a | 642b7138da231474154a83c2dc3b4a2a42eb441b | /array/sub_arr_with_least_avg.py | 4adb849677697c4f94b4740e71555febb2a85ea6 | [] | no_license | somanshu/python-pr | 15465ed7182413591c709f9978420f6a16c9db91 | 7bfee6fc2a8340ba3e343f991a1da5bdb4ae9cb2 | refs/heads/master | 2020-07-02T17:21:37.132495 | 2019-08-22T08:04:11 | 2019-08-22T08:04:11 | 201,602,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | # https://www.geeksforgeeks.org/find-subarray-least-average/
def leastAvg(arr, k):
summation = []
summation.append(0)
summation.append(arr[0])
min_avg = 9999
min_avg_last_index = None
for i in range(2, len(arr) + 1):
summation.append(summation[i-1] + arr[i-1])
for i in range(3, len(arr) + 1):
cur_sum = summation[i] - summation[i-k]
avg_sum = cur_sum // k
if avg_sum < min_avg:
min_avg = avg_sum
min_avg_last_index = i - 1
return (min_avg, min_avg_last_index - k + 1, min_avg_last_index)
arr = [3, 7, 90, 20, 10, 50, 40]
arr = [3, 7, 5, 20, -10, 0, 12]
k = 2
res = leastAvg(arr, k)
print(res)
| [
"[email protected]"
] | |
b361107089051be80b1ac8ccd2fb285cfb6e7754 | be95beccb4747297af730b25ff00e5016c4337c3 | /hostman/__init__.py | 091830c8cee238e3b72309918ecee5541fc3e40c | [
"MIT"
] | permissive | TeamAleph/hostman | 4e2ebf1a9f246763762fd816d2f3eab9a86e2de4 | 8ba27903a6dc58464ee4cb8e1a48a2b1a5559696 | refs/heads/master | 2022-03-30T07:46:24.126778 | 2020-01-13T20:17:49 | 2020-01-13T20:17:49 | 275,264,024 | 1 | 0 | MIT | 2020-06-26T23:14:09 | 2020-06-26T23:14:08 | null | UTF-8 | Python | false | false | 9,467 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Hostman.
Usage:
hostman add [-fqbvq] [--force] [--path=PATH]
( [ENTRY ...] | [--input-file=FILE] | [--input-url=URL] )
hostman remove [-qbvq] ([--address=<address>] [--names=<names>]) [--path=PATH]
[--input-file=FILE] [--input-url=URL]
hostman --version
Options:
-h --help show this help message and exit
--version show version and exit
-f --force replace matching entries
--address=ADDRESS ipv6 or ipv4 address
--names=NAMES host names
-q --quiet report only failures
-p --path=PATH location of hosts file (attempts to detect default)
-i --input-file=FILE file containing hosts to import
-u --input-url=URL url of file containing hosts to import
-b --backup create a backup before writing any changes
--exclude=VALUE comma separated list of names or addresses
to exclude from operation [default: 127.0.0.1]
-v --verbose print verbose output
"""
from __future__ import print_function
from docopt import docopt
from python_hosts import Hosts, HostsEntry
from .utils import is_writeable, is_readable
import sys
import os
import datetime
import shutil
from colorama import Fore, init
init(autoreset=True)
name = "hostman"
def backup_hosts(source=None, extension=None):
"""Backup a hosts file
:param source: Path to the hosts file
:param extension: The extension to add to the backup file
:return: A dict containing the result and user message to output
"""
if not extension:
now = datetime.datetime.now()
ext = now.strftime('%Y%m%d%H%M%S')
else:
ext = extension
dest_split = source.split('/')
new_filename = ".{0}.{1}".format(dest_split[-1], ext)
dest_split[-1] = new_filename
dest = "/".join(dest_split)
try:
shutil.copy(source, dest)
return {'result': 'success', 'message': 'Backup written to: {0}'.format(dest)}
except IOError:
return {'result': 'failed', 'message': 'Cannot create backup file: {0}'.format(dest)}
def output_message(message=None, quiet=False):
"""User friendly result of action
:param message: A dict containing the result and a user notification message
:return: Exit with 0 or 1, or True if this is not the final output
"""
res = message.get('result')
if res == 'success':
if not quiet:
print(Fore.GREEN + message.get('message'))
sys.exit(0)
elif res == 'failed':
print(Fore.RED + message.get('message'))
sys.exit(1)
elif res == 'continue':
if not quiet:
print(message.get('message'))
return True
def add(entry_line=None, hosts_path=None, force_add=False):
"""Add the specified entry
:param entry_line: The entry to add
:param hosts_path: The path of the hosts file
:param force_add: Replace matching any matching entries with new entry
:return: A dict containing the result and user message to output
"""
hosts_entry = HostsEntry.str_to_hostentry(entry_line)
if not hosts_entry:
output_message({'result': 'failed',
'message': '"{0}": is not a valid entry.'.format(entry_line)})
duplicate_entry = False
entry_to_add = False
hosts = Hosts(hosts_path)
add_result = hosts.add(entries=[hosts_entry], force=force_add)
if add_result.get('replaced_count'):
hosts.write()
return {'result': 'success',
'message': 'Entry added. Matching entries replaced.'}
if add_result.get('ipv4_count') or add_result.get('ipv6_count'):
entry_to_add = True
if add_result.get('duplicate_count'):
duplicate_entry = True
if entry_to_add and not duplicate_entry:
hosts.write()
return {'result': 'success',
'message': 'New entry added.'}
if not force_add and duplicate_entry:
return {'result': 'failed',
'message': 'New entry matches one or more existing.'
'\nUse -f to replace similar entries.'}
def import_from_file(hosts_path=None, file_path=None):
"""Import entries from a text file
:param hosts_path: Path to the hosts file to update
:param file_path: Path to the file containing the hosts entries to import
:return: A dict containing the result and user message to output
"""
if hosts_path and not os.path.exists(hosts_path):
return {'result': 'failed', 'message': 'Cannot read hosts file: {0}'.format(hosts_path)}
if not os.path.exists(file_path):
return {'result': 'failed', 'message': 'Cannot read import file: {0}'.format(file_path)}
else:
hosts = Hosts(path=hosts_path)
pre_count = len(hosts.entries)
import_file_output = hosts.import_file(import_file_path=file_path)
post_count = len(hosts.entries)
write_result = import_file_output.get('write_result')
message = 'New entries:\t{0}\nTotal entries:\t{1}\n'.format(
post_count - pre_count,
write_result.get('total_written')
)
return {'result': import_file_output.get('result'),
'message': message}
def import_from_url(hosts_path=None, url=None):
"""Import entries from a text file found on a specific URL
:param hosts_path: Path to the hosts file to update
:param url: URL of the text file containing the hosts entries to import
:return: A dict containing the result and user message to output
"""
hosts = Hosts(path=hosts_path)
pre_count = len(hosts.entries)
import_url_output = hosts.import_url(url=url)
post_count = len(hosts.entries)
write_result = import_url_output.get('write_result')
message = 'New entries:\t{0}\nTotal entries:\t{1}\n'.format(
post_count - pre_count,
write_result.get('total_written')
)
return {'result': import_url_output.get('result'),
'message': message}
def remove(address_to_remove=None, names_to_remove=None, remove_from_path=None):
"""Remove entries from a hosts file
:param address_to_remove: An ipv4 or ipv6 address to remove
:param names_to_remove: A list of names to remove
:param remove_from_path: The path of the hosts file to remove entries from
:return: A dict containing the result and user message to output
"""
hosts = Hosts(path=remove_from_path)
if address_to_remove or names_to_remove:
num_before = hosts.count()
hosts.remove_all_matching(address=address_to_remove, name=names_to_remove)
hosts.write()
difference = num_before - hosts.count()
if difference:
if difference > 1:
str_entry = 'entries'
else:
str_entry = 'entry'
return {'result': 'success',
'message': 'Removed {0} {1}'.format(difference, str_entry)}
else:
return {'result': 'failed',
'message': 'No matching entries found'}
def strip_entry_value(entry_value):
"""Strip white space from a string or list of strings
:param entry_value: value to strip spaces from
:return: value minus the leading and trailing spaces
"""
if isinstance(entry_value, list):
new_list = []
for value in entry_value:
new_list.append(value.strip())
return ' '.join(new_list)
if isinstance(entry_value, str):
return entry_value.strip()
def real_main():
""" The function called from the script
:return: None
"""
arguments = docopt(__doc__, version='0.1.3')
entry = arguments.get('ENTRY')
quiet = arguments.get('--quiet')
path = arguments.get('--path')
force = arguments.get('--force')
backup = arguments.get('--backup')
address = arguments.get('--address')
names = arguments.get('--names')
input_file = arguments.get('--input-file')
input_url = arguments.get('--input-url')
result = None
if not path:
if sys.platform.startswith('win'):
path = r'c:\windows\system32\drivers\etc\hosts'
else:
path = '/etc/hosts'
if not is_readable(path):
output_message({'result': 'failed',
'message': 'Unable to read path: {0}.'.format(path)})
new_entry = None
if entry:
new_entry = strip_entry_value(entry)
if backup:
result = backup_hosts(source=path)
if result.get('result') == 'success':
result['result'] = 'continue'
output_message(result, quiet=quiet)
if arguments.get('add'):
if not is_writeable(path):
result = {'result': 'failed',
'message': 'Unable to write to: {0}'.format(path)}
if new_entry:
result = add(entry_line=new_entry, hosts_path=path, force_add=force)
if input_file:
result = import_from_file(hosts_path=path, file_path=input_file)
if input_url:
result = import_from_url(hosts_path=path, url=input_url)
else:
if arguments.get('remove'):
result = remove(address_to_remove=address, names_to_remove=names, remove_from_path=path)
if result:
output_message(result, quiet=quiet)
if __name__ == '__main__':
real_main()
| [
"[email protected]"
] | |
756c1be0b975f8ff483955c9a83fcd8608da7e75 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_136/3240.py | a6126108a389ce9f2e9429f3b4ddc959c34189ad | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | #!/usr/bin/env python
import sys
# import numpy
import operator
def main(filename):
f = open(filename, 'r')
T = int(f.readline())
for t in xrange(T):
result = solve(f)
print "Case #%i: %.7f" % (t+1, result)
def solve(f):
F0 = 2.0
C, F, X = map(float, f.readline().split())
best_time = X / F0
current_time = 0.0
current_rate = F0
while True:
current_time += C / current_rate
current_rate += F
new_completion_time = X / current_rate + current_time
if new_completion_time < best_time:
best_time = new_completion_time
else:
break
return best_time
if __name__ == "__main__":
sys.exit(main(sys.argv[1])) | [
"[email protected]"
] | |
0afde19c13d759b12976085d7ffb89bde8ee1f5e | 9aaa39f200ee6a14d7d432ef6a3ee9795163ebed | /Algorithm/Python/812. Largest Triangle Area.py | 892e5661352025b255cbf0cabc57d72ec735f4c0 | [] | no_license | WuLC/LeetCode | 47e1c351852d86c64595a083e7818ecde4131cb3 | ee79d3437cf47b26a4bca0ec798dc54d7b623453 | refs/heads/master | 2023-07-07T18:29:29.110931 | 2023-07-02T04:31:00 | 2023-07-02T04:31:00 | 54,354,616 | 29 | 16 | null | null | null | null | UTF-8 | Python | false | false | 1,046 | py | # -*- coding: utf-8 -*-
# Created on Mon Apr 09 2018 15:45:58
# Author: WuLC
# EMail: [email protected]
# get the area of triangle with Heron's formula
# reference https://en.wikipedia.org/wiki/Heron%27s_formula
class Solution(object):
def largestTriangleArea(self, points):
"""
:type points: List[List[int]]
:rtype: float
"""
n = len(points)
result = 0
for i in xrange(n):
for j in xrange(i+1, n):
for k in xrange(j+1, n):
result = max(result, self.area(points[i], points[j], points[k]))
return result
def area(self, p1, p2, p3):
a = ((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2) ** 0.5
b = ((p1[0] - p3[0])**2 + (p1[1] - p3[1])**2) ** 0.5
c = ((p2[0] - p3[0])**2 + (p2[1] - p3[1])**2) ** 0.5
if a+b <= c or a+c <=b or b+c <= a: # three points may not be able to contruct a triangle
return 0
s = (a+b+c)/2.0
return (s*(s-a)*(s-b)*(s-c))**0.5 | [
"[email protected]"
] | |
98582fdd92598261a9288ab72cad9d235b2a216b | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-3893.py | a0be1c21754b1c2d35d43c59263aa38aa757372e | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,754 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4($ID:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
2827303ea1787af24a4a3935a48e8b7cc341780b | 2c4648efe8c7e408b8c3a649b2eed8bb846446ec | /codewars/Python/8 kyu/ExclusiveOrXorLogicalOperator/xor.py | ee8ba551c8806b9d2aebe809a31c1ddfa1f85d12 | [] | no_license | Adasumizox/ProgrammingChallenges | 9d79bd1b0ce4794b576124f9874aabb86d5c0713 | 3630fcde088d7991e344eb1b84805e9e756aa1a2 | refs/heads/master | 2021-07-16T08:16:57.538577 | 2020-07-19T19:58:28 | 2020-07-19T19:58:28 | 190,159,085 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31 | py | def xor(a,b):
return a != b | [
"[email protected]"
] | |
f1196a410af03757d39757835dc4e5a8603ad26a | 45de7d905486934629730945619f49281ad19359 | /xlsxwriter/test/comparison/test_comment03.py | 28e4c327e5958ca3a4b6bd46ac2f1bfae30638fe | [
"BSD-2-Clause"
] | permissive | jmcnamara/XlsxWriter | 599e1d225d698120ef931a776a9d93a6f60186ed | ab13807a1be68652ffc512ae6f5791d113b94ee1 | refs/heads/main | 2023-09-04T04:21:04.559742 | 2023-08-31T19:30:52 | 2023-08-31T19:30:52 | 7,433,211 | 3,251 | 712 | BSD-2-Clause | 2023-08-28T18:52:14 | 2013-01-04T01:07:06 | Python | UTF-8 | Python | false | false | 961 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2023, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("comment03.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with comments."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write("A1", "Foo")
worksheet.write_comment("A1", "Some text")
worksheet.write_comment("XFD1048576", "Some text")
worksheet.set_comments_author("John")
workbook.close()
self.assertExcelEqual()
| [
"[email protected]"
] | |
09dddad36a31c3941f9759d85f109af7ad424d73 | 28c0bcb13917a277cc6c8f0a34e3bb40e992d9d4 | /koku/api/migrations/0010_auto_20200128_2138.py | 7aceb81ec568991a0e9b1d2e34bcf43e8c3ff8f9 | [
"Apache-2.0"
] | permissive | luisfdez/koku | 43a765f6ba96c2d3b2deda345573e1d97992e22f | 2979f03fbdd1c20c3abc365a963a1282b426f321 | refs/heads/main | 2023-06-22T13:19:34.119984 | 2021-07-20T12:01:35 | 2021-07-20T12:01:35 | 387,807,027 | 0 | 1 | Apache-2.0 | 2021-07-20T13:50:15 | 2021-07-20T13:50:14 | null | UTF-8 | Python | false | false | 593 | py | # Generated by Django 2.2.8 on 2020-01-28 21:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("api", "0009_providerstatus_squashed_0042_auto_20200116_2048")]
operations = [
migrations.RunSQL(
"""
UPDATE public.api_provider
SET type = 'Azure'
WHERE type = 'AZURE'
;
UPDATE public.api_providerinfrastructuremap
SET infrastructure_type = 'Azure'
WHERE infrastructure_type = 'AZURE'
;
"""
)
]
| [
"[email protected]"
] | |
de6c66ecf43e841a117ca0be3fd1b576c402f4e8 | 51e7336e8bb447187cbe6ede2910f40700316dc1 | /simics/monitorCore/diddler.py | 9bb67e16e8360d641a011f184d6c1538ec82b788 | [] | no_license | hacker-steroids/RESim | 69bac74a1b119c54d03b9ea0fda7a85cc45ea854 | 94498c699575f5078de415fac8c517d520cb2f94 | refs/heads/master | 2020-05-30T12:33:53.799610 | 2019-06-01T00:51:20 | 2019-06-01T00:51:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,721 | py | #!/usr/bin/env python
import os
import re
from simics import *
def nextLine(fh):
retval = None
while retval is None:
line = fh.readline()
if line is None or len(line) == 0:
break
if line.startswith('#'):
continue
retval = line.strip('\n')
return retval
class Diddler():
class Fiddle():
def __init__(self, match, was, becomes, cmds=[]):
self.match = match
self.was = was
self.becomes = becomes
self.cmds = cmds
def __init__(self, path, mem_utils, cell_name, lgr):
self.kind = None
self.fiddles = []
self.mem_utils = mem_utils
self.lgr = lgr
self.stop_hap = None
self.cell_name = cell_name
self.path = path
self.operation = None
if os.path.isfile(path):
with open(path) as fh:
done = False
kind_line = nextLine(fh)
parts = kind_line.split()
self.kind = parts[0]
if len(parts) > 1:
self.operation = parts[1]
else:
self.lgr.error('Diddle command missing operation %s' % kind_line)
return
self.lgr.debug('Diddle of kind %s cell is %s' % (self.kind, self.cell_name))
if self.kind == 'full_replace':
match = nextLine(fh)
becomes=''
while not done:
line = fh.readline()
if line is None or len(line)==0:
done = True
break
if len(becomes)==0:
becomes=line
else:
becomes=becomes+line
self.fiddles.append(self.Fiddle(match, None, becomes))
elif self.kind == 'match_cmd':
match = nextLine(fh)
was = nextLine(fh)
cmds=[]
while not done:
line = nextLine(fh)
if line is None or len(line)==0:
done = True
break
cmds.append(line)
self.fiddles.append(self.Fiddle(match, was, None, cmds=cmds))
elif self.kind == 'sub_replace':
while not done:
match = nextLine(fh)
if match is None:
done = True
break
was = nextLine(fh)
becomes = nextLine(fh)
self.fiddles.append(self.Fiddle(match, was, becomes))
else:
print('Unknown diddler kind: %s' % self.kind)
return
self.lgr.debug('Diddler loaded %d fiddles of kind %s' % (len(self.fiddles), self.kind))
else:
self.lgr.debug('Diddler, no file at %s' % path)
def subReplace(self, cpu, s, addr):
rm_this = None
for fiddle in self.fiddles:
#self.lgr.debug('Diddle checkString %s to %s' % (fiddle.match, s))
if re.search(fiddle.match, s, re.M|re.I) is not None:
if re.search(fiddle.was, s, re.M|re.I) is not None:
#self.lgr.debug('Diddle replace %s with %s in \n%s' % (fiddle.was, fiddle.becomes, s))
new_string = re.sub(fiddle.was, fiddle.becomes, s)
self.mem_utils.writeString(cpu, addr, new_string)
else:
#self.lgr.debug('Diddle found match %s but not string %s in\n%s' % (fiddle.match, fiddle.was, s))
pass
rm_this = fiddle
break
return rm_this
def fullReplace(self, cpu, s, addr):
rm_this = None
fiddle = self.fiddles[0]
if fiddle.match in s:
count = len(fiddle.becomes)
self.mem_utils.writeString(cpu, addr, fiddle.becomes)
esp = self.mem_utils.getRegValue(cpu, 'esp')
count_addr = esp + 3*self.mem_utils.WORD_SIZE
self.mem_utils.writeWord(cpu, count_addr, count)
#cpu.iface.int_register.write(reg_num, count)
self.lgr.debug('diddle fullReplace %s in %s wrote %d bytes' % (fiddle.match, s, count))
rm_this = fiddle
#SIM_break_simulation('deeedee')
return rm_this
def stopAlone(self, fiddle):
self.stop_hap = SIM_hap_add_callback("Core_Simulation_Stopped", self.stopHap, fiddle)
SIM_break_simulation('matchCmd')
def matchCmd(self, s):
''' The match lets us stop looking regardless of whether or not the values are
bad. The "was" tells us a bad value, i.e., reason to run commands '''
rm_this = None
fiddle = self.fiddles[0]
#self.lgr.debug('look for match of %s in %s' % (fiddle.match, s))
if re.search(fiddle.match, s, re.M|re.I) is not None:
#self.lgr.debug('found match of %s in %s' % (fiddle.match, s))
rm_this = fiddle
if re.search(fiddle.was, s, re.M|re.I) is not None:
SIM_run_alone(self.stopAlone, fiddle)
return rm_this
def checkString(self, cpu, addr, count):
retval = False
byte_string, byte_array = self.mem_utils.getBytes(cpu, count, addr)
s = ''.join(map(chr,byte_array))
if self.kind == 'sub_replace':
rm_this = self.subReplace(cpu, s, addr)
elif self.kind == 'full_replace':
rm_this = self.fullReplace(cpu, s, addr)
elif self.kind == 'match_cmd':
rm_this = self.matchCmd(s)
else:
print('Unknown kind %s' % self.kind)
return
if rm_this is not None:
self.lgr.debug('Diddler checkString found match cell %s path %s' % (self.cell_name, self.path))
self.fiddles.remove(rm_this)
if len(self.fiddles) == 0:
self.lgr.debug('Diddler checkString removed last fiddle')
retval = True
return retval
def stopHap(self, fiddle, one, exception, error_string):
SIM_hap_delete_callback_id("Core_Simulation_Stopped", self.stop_hap)
self.lgr.debug('Diddler stop hap')
for cmd in fiddle.cmds:
SIM_run_command(cmd)
def getOperation(self):
return self.operation
def getPath(self):
return self.path
if __name__ == '__main__':
print('begin')
d = Diddler('dog.diddle')
| [
"[email protected]"
] | |
baebd8438e36c15261b39d8240930bbf3b21cfac | 42fdf741bf64ea2e63d1546bb08356286f994505 | /macrocab_ex2/rasp30_vmm_frame6.py | 5ccb702b8fe5f8f28ed7cfe61ee3204261d47609 | [] | no_license | skim819/RASP_Workspace_sihwan | 7e3cd403dc3965b8306ec203007490e3ea911e3b | 0799e146586595577c8efa05c647b8cb92b962f4 | refs/heads/master | 2020-12-24T05:22:25.775823 | 2017-04-01T22:15:18 | 2017-04-01T22:15:18 | 41,511,563 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | 'cab_vmm.O[0:5]' ,[range( 29, 23, -1), 21]] ## o/ps connectn to i/ps?? ummmmm !!! ---we need this
self.li = smDictFromList(li_sm)
li0b = recStrExpand(li_sm_0b)
li0b.reverse()
self.li0 = recStrExpand(li_sm_0a) + li0b
self.li1 = recStrExpand(li_sm_1)
#pdb.set_trace()
#CAB Devices ## order is very important here
| [
"ubuntu@ubuntu-VirtualBox.(none)"
] | ubuntu@ubuntu-VirtualBox.(none) |
ded8ee76872f157d15e6d9423f30b3068ac198ae | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02392/s638672279.py | d0cf75d9c7215e1b6ef0047f7a19216eae852dd7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | import fileinput
for line in fileinput.input():
tokens = list(map(int, line.strip().split()))
a, b, c = tokens[0], tokens[1], tokens[2]
if a < b and b < c:
print("Yes")
else:
print("No") | [
"[email protected]"
] | |
05bd301169b370c0af23207dbee2b2997c24161d | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-network/azure/mgmt/network/v2018_12_01/operations/vpn_connections_operations.py | ad7612c0a65d8b003afc62f941e61d8a204314a4 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 17,358 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class VpnConnectionsOperations(object):
"""VpnConnectionsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2018-12-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-12-01"
self.config = config
def get(
self, resource_group_name, gateway_name, connection_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves the details of a vpn connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the vpn connection.
:type connection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VpnConnection or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2018_12_01.models.VpnConnection or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<azure.mgmt.network.v2018_12_01.models.ErrorException>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'}
def _create_or_update_initial(
self, resource_group_name, gateway_name, connection_name, vpn_connection_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(vpn_connection_parameters, 'VpnConnection')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnConnection', response)
if response.status_code == 201:
deserialized = self._deserialize('VpnConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, gateway_name, connection_name, vpn_connection_parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates a vpn connection to a scalable vpn gateway if it doesn't exist
else updates the existing connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the connection.
:type connection_name: str
:param vpn_connection_parameters: Parameters supplied to create or
Update a VPN Connection.
:type vpn_connection_parameters:
~azure.mgmt.network.v2018_12_01.models.VpnConnection
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VpnConnection or
ClientRawResponse<VpnConnection> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_12_01.models.VpnConnection]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_12_01.models.VpnConnection]]
:raises:
:class:`ErrorException<azure.mgmt.network.v2018_12_01.models.ErrorException>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
connection_name=connection_name,
vpn_connection_parameters=vpn_connection_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VpnConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'}
def _delete_initial(
self, resource_group_name, gateway_name, connection_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, gateway_name, connection_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes a vpn connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the connection.
:type connection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises:
:class:`ErrorException<azure.mgmt.network.v2018_12_01.models.ErrorException>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
connection_name=connection_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'}
def list_by_vpn_gateway(
self, resource_group_name, gateway_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves all vpn connections for a particular virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VpnConnection
:rtype:
~azure.mgmt.network.v2018_12_01.models.VpnConnectionPaged[~azure.mgmt.network.v2018_12_01.models.VpnConnection]
:raises:
:class:`ErrorException<azure.mgmt.network.v2018_12_01.models.ErrorException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_by_vpn_gateway.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.VpnConnectionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VpnConnectionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_vpn_gateway.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections'}
| [
"[email protected]"
] | |
ceb816aa78646111fde73dd941d40a313982aebe | 95100d3a58122a81946eac46618b9e59bef1ba22 | /Bin/autoTestClass.py | 82c5b1f2fd5691c930ede521b36ef16f95f2afad | [] | no_license | ayiya-hui/automation | 6fc65bf7168a2ca663d17ead66ad83adffb61cb4 | d100795db2275994a40199b8935296ae30a9eb0e | refs/heads/master | 2022-12-31T08:32:53.558207 | 2020-05-25T09:17:53 | 2020-05-25T09:17:53 | 135,518,107 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,566 | py | import logging
class Config:
def __init__(self):
self.dataCollector=''
self.appServer=''
self.user=''
self.password=''
self.testModule=''
self.testTask=''
self.testSuites=''
self.excludeSuites=''
self.option=''
self.sleep=''
self.version=''
class TestCategory:
def __init__(self):
self.suites=''
class TestSuite:
def __init__(self):
self.name=''
self.method=''
self.setupTasks=[]
self.testcases=[]
self.fileName=''
class configImportSuite(TestSuite):
def __init__(self):
TestSuite.__init__(self)
class RBACSuite(TestSuite):
def __init__(self):
TestSuite.__init__(self)
class adminRole:
def __init__(self):
self.userName=''
self.org=''
self.password=''
self.scope=''
class setupTask:
def __init__(self):
self.setupName=''
self.setupValue=[]
class verifyTask:
def __init__(self):
self.type=''
class eventTypeQuery:
def __init__(self):
self.name=''
class reportQuery:
def __init__(self):
self.id=''
self.eventType=''
self.key=''
class rbacEventQuery:
def __init__(self):
self.name=''
self.condition=''
class readEventType:
def __init__(self):
self.name=''
class createDevice:
def __init__(self):
self.deviceList=[]
class device:
def __init__(self):
self.name=''
self.type=''
self.ip=''
self.custId=''
class sentEvent:
def __init__(self):
self.eventList=[]
class event:
def __init__(self):
self.eventType=''
self.reporter=''
class sentIncident:
def __init__(self):
self.incidentList=[]
class incident:
def __init__(self):
self.incidentType=''
self.reporter=''
class eventExportSuite(TestSuite):
def __init__(self):
TestSuite.__init__(self)
class eventParsingSuite(TestSuite):
def __init__(self):
TestSuite.__init__(self)
def getKeyMap(self):
eventKey=[]
reporterKey=[]
for case in self.testcases:
event=case.eventType.strip()
if event not in eventKey:
eventKey.append(event)
reporter=case.reporter
if reporter not in reporterKey:
reporterKey.append(reporter)
eventStr='","'.join(eventKey)
reporterStr=','.join(reporterKey)
keyMap={}
keyMap['eventType']='"'+eventStr+'"'
keyMap['reporter']=reporterStr
return keyMap
class eventTypeSuite(TestSuite):
def __init__(self):
TestSuite.__init__(self)
class logDiscoverySuite(TestSuite):
def __init__(self):
TestSuite.__init__(self)
class incidentSuite(TestSuite):
def __init__(self):
TestSuite.__init__(self)
def getKeyMap(self):
eventKey=[]
reporterKey=[]
for case in self.testcases:
event=case.eventType.strip()
if event not in eventKey:
eventKey.append(event)
reporter=case.reporter
if reporter not in reporterKey:
reporterKey.append(reporter)
eventStr='","'.join(eventKey)
reporterStr='","'.join(reporterKey)
keyMap={}
keyMap['eventType']='"'+eventStr+'"'
keyMap['reporter']='"'+reporterStr+'"'
return keyMap
class incidentTimeBasedSuite(incidentSuite):
def __init__(self):
incidentSuite.__init__(self)
self.sendEvent=''
class incidentPatternBasedSuite(incidentTimeBasedSuite):
def __init__(self):
incidentTimeBasedSuite.__init__(self)
class linuxFileMonitorSuite(TestSuite):
def __init__(self):
TestSuite.__init__(self)
self.linuxHost=''
self.linuxUsers=[]
self.monPath=''
self.monConfig=''
class linuxUser:
def __init__(self):
self.name=''
self.password=''
class reportSuite(TestSuite):
def __init__(self):
TestSuite.__init__(self)
class TestCase:
def __init__(self):
self.name=''
self.reporter=''
class configImportCase(TestCase):
def __init__(self):
TestCase.__init__(self)
class RBACCase(TestCase):
def __init__(self):
TestCase.__init__(self)
self.verifyName=''
self.eventType=''
self.desc=''
self.roleName=''
self.verifyTasks=[]
class eventExportCase(TestCase):
def __init__(self):
TestCase.__init__(self)
self.deviceName=''
self.timeZone=''
self.option=''
self.startTime=''
self.endTime=''
self.custName=''
class eventParsingCase(TestCase):
def __init__(self):
TestCase.__init__(self)
self.eventType=''
self.parseEvent=''
self.key=''
self.parameters=''
class eventTypeCase(TestCase):
def __init__(self):
TestCase.__init__(self)
self.verifyTasks=[]
class logDiscoveryCase(TestCase):
def __init__(self):
TestCase.__init__(self)
self.discoverEvent=''
self.parameters=''
class incidentCase(TestCase):
def __init__(self):
TestCase.__init__(self)
self.eventType=''
self.createDevice=''
self.deviceType=''
self.deviceName=''
self.custId=''
self.repeatCount=''
self.repeatInterval=''
self.domainController=''
self.events=[]
self.parameters=''
class incidentEvent:
def __init__(self):
self.incidentMsg=''
class incidentTimeBasedCase(incidentCase):
def __init__(self):
incidentCase.__init__(self)
self.sendCount=''
self.sendInterval=''
self.clearInterval=''
self.clearWait=''
class incidentPatternBasedCase(incidentTimeBasedCase):
def __init__(self):
incidentTimeBasedCase.__init__(self)
self.clearEvent=''
self.clearCount=''
class reportCase(TestCase):
def __init__(self):
TestCase.__init__(self)
self.verifyTasks=[]
class linuxFileMonitorCase(TestCase):
def __init__(self):
TestCase.__init__(self)
self.resultOption=''
self.parameters=''
self.tasks=[]
class task:
def __init__(self):
self.taskName=''
self.taskType=''
self.targetPath=''
self.target=''
self.recurse=''
self.excuteUser=''
class RbacProfile:
def __init__(self):
self.name=''
self.description=''
self.config=''
self.eventFilter=''
class eventFilter:
def __init__(self):
self.name=''
self.singleConstraint=''
self.groupConstraint=''
self.groupBy=''
self.index=''
self.singleConditions=[]
self.groupConditions=[]
class domain:
def __init__(self):
self.name=''
self.domainId=''
self.companyName=''
self.description=''
self.primaryContactUser=''
self.secondaryContactUser=''
self.initialized=True
self.lastDataDistributedTime=''
self.timeZoneOffset=''
self.logoURL=''
self.encKey=''
self.disabled=False
self.custKey=''
self.includeRange=''
self.excludeRange=''
self.address=''
self.phone=''
self.collectors=[]
| [
"[email protected]"
] | |
8a08595a18180fdd63eb5e412db51e021f22bf79 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03050/s149029376.py | 4af3d320d49d90f7eaaaa5bb9349d14574c742f6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | def main():
def trial_division(n):
divs = []
for i in range(1, int(n**0.5)+1):
if n % i == 0:
divs.append(i)
if i != n//i:
divs.append(n//i)
return divs
N = int(input())
divs = trial_division(N)
ans = 0
for d in divs:
if d != 1 and N//(d-1) == N % (d-1):
ans += (d-1)
print(ans)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
123ef66ca04621db199f5545ba0778aa705e1a77 | 9afbb6993450d1e0c3bae68e86844bd06d4419ee | /file_handling/json_programs/j1.py | bf4e31c84eab625edd9c8a9dbc0bde5ea2b9b2c5 | [] | no_license | Jigar710/Python_Programs | 6f331caac30878655d4cca4ad97d4214c0262088 | 714a6306487eb6712f32ccb51b6a2407a81873fa | refs/heads/main | 2023-02-25T12:24:44.874199 | 2021-01-28T15:43:24 | 2021-01-28T15:43:24 | 332,869,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | import json
a = {"name":'sumit',"age":31}
print(type(a))
file = open("output.txt","w")
json.dump(a,file) | [
"[email protected]"
] | |
965b64261b972827ee492000da2e36bc4999cadf | 2776195dc0863f5e43c5394767f1f950ce7672bb | /util/command_console_tui.py | 646711bad18fe43b632ea9eb243f651dc9bf2df2 | [
"MIT"
] | permissive | sharkbound/PythonTwitchBotFramework | a5e6f55c89a0639cb8e3dd16b99bb6388ee5f5f8 | 3d9aff994d531272d53b869c3dac6602b04a9d70 | refs/heads/master | 2023-09-04T06:34:44.456338 | 2023-08-16T21:32:58 | 2023-08-16T21:32:58 | 134,095,615 | 111 | 47 | MIT | 2023-09-14T20:40:04 | 2018-05-19T20:24:24 | Python | UTF-8 | Python | false | false | 6,689 | py | #!/usr/bin/env python3
import asyncio
import json
import click
import websockets
from urwid import AsyncioEventLoop, Edit, ExitMainLoop, Filler, Frame, MainLoop, Text, connect_signal
COMMAND_READ, PASSWORD_READ = 0, 1
SEND_PASSWORD = 'send_password'
BAD_PASSWORD = 'bad_password'
DISCONNECTING = 'disconnecting'
LIST_CHANNELS = 'list_channels'
BAD_DATA = 'bad_data'
AUTHENTICATION_SUCCESSFUL = 'authentication_successful'
SEND_PRIVMSG = 'send_privmsg'
CHANNEL_NOT_FOUND = 'channel_not_found'
SUCCESS = 'success'
RUN_COMMAND = 'run_command'
loop = asyncio.get_event_loop()
@click.command()
@click.option('--host', prompt='Command server host', default='localhost')
@click.option('--port', prompt='Command server port', default='1337')
def run(host, port):
"""
Start a websocket client and a terminal UI to interact with it.
"""
# connection state
channels = []
bound_channel = None
ws = None
# UI state
lines = ['example text\n']
output = Text(lines)
input_field = Edit('>> ')
input_state = COMMAND_READ
widget = Frame(Filler(output, 'top'), footer=input_field)
widget.focus_position = 'footer'
# event wiring
event_loop = AsyncioEventLoop(loop=loop)
input_cb = None
def write(msg):
"""
Show an additional line of text.
"""
lines.append(msg + '\n')
output.set_text(lines)
def prompt_for_password(msg):
"""
Change prompt to password prompt. Return a future for the typed password.
"""
nonlocal input_cb, input_state
input_cb = loop.create_future()
input_state = PASSWORD_READ
input_cb.add_done_callback(_password_done)
input_field.set_mask('*')
input_field.set_caption(msg)
return input_cb
def _password_done(_):
nonlocal input_state
input_field.set_mask(None)
input_state = COMMAND_READ
def accept_input(key):
"""
Process typed lines of text. Dispatches to password prompt or command prompt
as needed.
"""
if key == 'enter':
if input_state == PASSWORD_READ:
input_cb.set_result(input_field.edit_text)
elif input_state == COMMAND_READ:
cmd_dispatch(input_field.edit_text)
input_field.set_edit_text('')
def update_channels(new_channels):
"""
Receive channel data.
"""
nonlocal channels, bound_channel
channels = new_channels
if len(channels) == 1:
bound_channel = channels[0]
write(f'bound console to channel "{bound_channel}"')
else:
write(f'bot is in these channels: {", ".join(channels)}')
async def ws_dispatch():
"""
Handle websocket messages.
"""
nonlocal ws
ws = await websockets.connect(f'ws://{host}:{port}')
while True:
try:
msg = json.loads(await ws.recv())
if msg['type'] == SEND_PASSWORD:
loop.create_task(ws.send(await prompt_for_password("Server password:")))
elif msg['type'] == DISCONNECTING:
write('server terminated connection...')
ws = None
elif msg['type'] == BAD_PASSWORD:
write('authentication failed... password did not match!')
elif msg['type'] == LIST_CHANNELS:
update_channels(msg['data']['channels'])
elif msg['type'] == AUTHENTICATION_SUCCESSFUL:
write('logged into command server!')
except Exception as e:
write(f'Error: {e}')
raise
def print_help():
write('/channel <channel> : binds this console to a bot-joined channel (needed for /chat)')
write('/chat <msg> : sends the chat message to the channel bound to this console')
write('/sendcmd <commands> [args...]: tells the bot run a command')
write('/quit: exit console')
write('/help to see this message again')
def cmd_dispatch(command):
write(f"dispatching {repr(command)}")
nonlocal bound_channel
if not ws:
write('Not connected')
return
parts = command.split()
if not parts:
print_help()
command_part = parts[0].lower()
if command_part[0] == '/':
command_part = command_part[1:]
args = parts[1:]
if command_part == 'help':
print_help()
elif command_part == 'sendcmd':
if not bound_channel:
write('there is not a bound channel! use `/channel <channel>` to bind one!')
elif not args:
write('you must provide a command to run to /sendcmd, ex: /sendcmd help')
else:
loop.create_task(ws.send(
json.dumps(
{
'type': RUN_COMMAND,
'channel': bound_channel,
'command': args[0],
'args': args[1:],
'silent': True,
}
)
))
elif command_part == 'chat':
if not bound_channel:
write('there is not a bound channel! use `/channel <channel>` to bind one!')
else:
loop.create_task(ws.send(
json.dumps(
{
'type': SEND_PRIVMSG,
'channel': bound_channel,
'message': ' '.join(args),
}
)
))
elif command_part == 'channel':
if not channels:
write('the bot is not currently in any channels, please have the bot join at least one than relaunch this console')
elif not args:
write(f'the bot is currently in these channels: {", ".join(channels)}\ndo `/channel <channel>` to bind this channel to one')
elif args[0] not in channels:
write(f'the bot is not currently in "{args[0]}"')
else:
bound_channel = args[0]
elif command_part == 'quit':
raise ExitMainLoop()
else:
write(f"Unrecognized command {repr(command_part)}")
event_loop.alarm(0, lambda: loop.create_task(ws_dispatch()))
mainloop = MainLoop(widget, event_loop=event_loop, unhandled_input=accept_input)
mainloop.run()
if __name__ == '__main__':
run()
| [
"[email protected]"
] | |
38507d07b45390ec1f2ae7abcb4b09bafc861be6 | bea3febeda4c0688dfbb2db584ab4f7d710040e0 | /django/instad/insta/settings.py | 6010fe845419449c188ed9aa6dcdd3369ae86c0d | [] | no_license | airpong/TIL-c9 | c471ac73e23716cf677ba590dd6099e584c42883 | 069cc53820a09cd9787765ad41ba7e792dc342b5 | refs/heads/master | 2022-12-12T22:26:23.147651 | 2019-06-27T08:24:44 | 2019-06-27T08:24:44 | 166,777,129 | 0 | 0 | null | 2022-11-22T03:46:57 | 2019-01-21T08:34:01 | Python | UTF-8 | Python | false | false | 3,302 | py | """
Django settings for insta project.
Generated by 'django-admin startproject' using Django 2.1.8.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'x0@q$j%oeql+7&2jpw@4r0^v^7(&%ov5*9#)@1a!qo(c4!y%wr'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['playground-airpong.c9users.io']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap4',
'imagekit',
'accounts',
'posts',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'insta.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'insta','templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'insta.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
# Media
MEDIA_URL = '/mediaimage/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
| [
"[email protected]"
] | |
e62b8f2033d88c7c11b3b8e799e603b19e5974b7 | 4bfbcb96dcfe05fee32d222cb7a274099db055bd | /bilinear_LSTM_hingeloss/utils.py | ece2b6692b2ebe9ab87dc7f335d8cdfc537b3af4 | [] | no_license | naushadzaman/ACL_CKBC | 5c5c8b0669e059f9f08090b9500dff84af94d2e6 | 655f3aaf28ff5040f50e72fb8118934766306969 | refs/heads/master | 2020-07-17T07:30:07.943944 | 2018-11-19T01:27:13 | 2018-11-19T01:27:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,095 | py | from scipy.io import loadmat
import numpy as np
import math
from random import shuffle
from random import choice
from random import randint
from theano import tensor as T
def lookup(We,words,w):
if w in words:
return We[words[w],:]
else:
#print 'find UUUNKKK words',w
return We[words['UUUNKKK'],:]
def lookupIDX(We,words,w):
if w in words:
return words[w]
else:
#print 'find UUUNKKK words',w
return words['UUUNKKK']
def lookupRelIDX(We,words,w):
w = w.lower()
if w in words:
return words[w]
else:
#print 'find UUUNKKK words',w
return words['UUUNKKK']
def lookup_with_unk(We,words,w):
if w in words:
return We[words[w],:],False
else:
#print 'find Unknown Words in WordSim Task',w
return We[words['UUUNKKK'],:],True
def lookupwordID(We,words,w):
#w = w.strip()
result = []
array = w.split(' ')
for i in range(len(array)):
if(array[i] in words):
result.append(words[array[i]])
else:
#print "Find Unknown Words ",w
result.append(words['UUUNKKK'])
return result
def getData(f):
data = open(f,'r')
lines = data.readlines()
examples = []
for i in lines:
i=i.strip()
if(len(i) > 0):
i=i.split('\t')
e = (i[0], i[1], i[2], float(i[3]))
examples.append(e)
shuffle(examples)
return examples
def getWordmap(textfile):
words={}
We = []
f = open(textfile,'r')
lines = f.readlines()
for (n,i) in enumerate(lines):
i=i.split()
j = 1
v = []
while j < len(i):
v.append(float(i[j]))
j += 1
words[i[0]]=n
We.append(v)
return (words, np.matrix(We))
def getRelation(relationfile):
rel = {}
f = open(relationfile,'r')
lines = f.readlines()
for (n,i) in enumerate(lines):
i = i.strip()
rel[i] = n
return rel
#modified
def getPairMax(label,vec_r,vec,idx,d,We,words,rel,Rel,wi,wj,Weight,Offset,activation):
min = -5000
best = None
for i in range(len(d)):
if i == idx:
continue
(r,w1,w2,l) = d[i]
v1 = getVec(We,words,w1)
#v2 = getVec(We,words,w2)
if(activation.lower()=='relu'):
gv1 = Relu(np.dot(Weight,v1)+Offset[0])
gv2 = Relu(np.dot(Weight,vec)+Offset[0])
if(activation.lower()=='tanh'):
gv1 = np.tanh(np.dot(Weight,v1)+Offset[0])
gv2= np.tanh(np.dot(Weight,vec)+Offset[0])
if(activation.lower()=='sigmoid'):
gv1 = Sigmoid(np.dot(Weight,v1)+Offset[0])
gv2= Sigmoid(np.dot(Weight,vec)+Offset[0])
temp1 = np.dot(gv1, vec_r)
np1 = np.inner(temp1,gv2)
if(np1 > min and not(wi == w1) and not(wj==w1)):
min = np1
best = w1
return best
def getPairRand(label,vec,idx,d,We,words,wi,wj):
wpick = None
while(wpick == None or wpick == wi or wpick == wj):
ww = choice(d)
ridx = randint(0,1)
wpick = ww[ridx]
#print wpick
return wpick
def getPairMix(label,vec,idx,d,We,words,wi,wj):
r1 = randint(0,1)
if r1 == 1:
return getPairMax(label,vec,idx,d,We,words,wi,wj,Weight,Offset,activation)
else:
return getPairRand(label,vec,idx,d,We,words,wi,wj)
def getVec(We,words,t):
t = t.strip()
array = t.split(' ')
if array[0] in words:
vec = We[words[array[0]],:]
else:
#print 'find UUUNKKK words',array[0].lower()
vec = We[words['UUUNKKK'],:]
for i in range(len(array)-1):
#print array[i+1]
if array[i+1] in words:
vec = vec + We[words[array[i+1]],:]
else:
#print 'can not find corresponding vector:',array[i+1].lower()
vec = vec + We[words['UUUNKKK'],:]
vec = vec/len(array)
return vec
def getPairs(d, words, We, rel, Rel, type, size,Weight,Offset,activation):
pairs = []
for i in range(len(d)):
(r, t1, t2, s) = d[i]
v1 = getVec(We,words,t1)
v2 = getVec(We,words,t2)
v_r = Rel[rel[r.lower()]*size:rel[r.lower()]*size+size,:]
p1 = None
#p2 = None
if type == "MAX":
#print w1
#only change the first term
p1 = getPairMax(s,v_r,v2,i,d,We,words,rel,Rel,t1,t2,Weight,Offset,activation)
if type == "RAND":
#print w1
p1 = getPairRand(s,v1,i,d,We,words,rel,Rel,r,t1,t2)
if type == "MIX":
#print w1
p1 = getPairMix(s,v1,i,d,We,words,rel,Rel,r,t1,t2)
pairs.append(p1)
# 'getPairs'+str(len(pairs))
#print pairs
return pairs
def getPairsBatch(d, words, We, rel, Rel, batchsize, type, size,Weight,Offset,activation):
idx = 0
pairs = []
while idx < len(d):
batch = d[idx: idx + batchsize if idx + batchsize < len(d) else len(d)]
if(len(batch) <= 2):
print "batch too small."
continue #just move on because pairing could go faulty
p = getPairs(batch,words,We,rel,Rel,type,size,Weight,Offset,activation)
pairs.extend(p)
idx += batchsize
#print 'getPairsBatch'+str(len(pairs))
return pairs
def convertToIndex(e,words, We, rel, Rel):
if str(e).find(',') != -1:
(r,p1,p2,s) = e
new_e = (lookupRelIDX(Rel, rel, r),lookupwordID(We, words, p1), lookupwordID(We, words, p2), float(s))
#print new_e
return new_e
else:
p1 = e
new_e = (lookupwordID(We, words, p1))
#print new_e
return new_e
def ReluT(x):
return T.switch(x<0, 0 ,x)
def Relu(x):
result = np.zeros(x.shape)
#print x.shape
for i in xrange(result.shape[0]):
if(x[i]>0):
result[i]=x[i]
return result
def Sigmoid(x):
result = np.zeros(x.shape)
for i in xrange(result.shape[0]):
for j in xrange(result.shape[1]):
result[i][j] = 1 / (1 + math.exp(-x[i][j]))
return result
| [
"[email protected]"
] | |
bc48e51be4ffe8728dfa46f4d44de953c2b09add | 7a550d2268bc4bc7e2fec608ffb1db4b2e5e94a0 | /0901-1000/0908-Smallest Range I/0908-Smallest Range I.py | 294efb4da9d84443ca7fc5f5c0dbae2aa24d967c | [
"MIT"
] | permissive | jiadaizhao/LeetCode | be31bd0db50cc6835d9c9eff8e0175747098afc6 | 4ddea0a532fe7c5d053ffbd6870174ec99fc2d60 | refs/heads/master | 2021-11-05T04:38:47.252590 | 2021-10-31T09:54:53 | 2021-10-31T09:54:53 | 99,655,604 | 52 | 28 | MIT | 2020-10-02T12:47:47 | 2017-08-08T05:57:26 | C++ | UTF-8 | Python | false | false | 122 | py | class Solution:
def smallestRangeI(self, A: List[int], K: int) -> int:
return max(0, max(A) - min(A) - K * 2)
| [
"[email protected]"
] | |
7c447cdc98629e0992225a79f10d08e2ae28ed04 | 650aed41de2191565dce812a3c4d2b049928f5a4 | /tornado_overview/chapter01/blockio_test.py | 1c48fd520ee956a849f583d2d50952c3f9107b0f | [
"Apache-2.0"
] | permissive | mtianyan/TornadoForum | a41dfc57f1a9ca60a0991dcaa4374cd4a8b6ba93 | 5698dd5cc0e399d3d0ec53e159b8e1f1cddfbe71 | refs/heads/master | 2022-04-23T09:48:25.933781 | 2020-04-20T17:06:23 | 2020-04-20T17:06:23 | 168,485,700 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | # 阻塞io
import socket
import requests
html = requests.get("http://www.baidu.com").text
# #1. 三次握手建立tcp连接,
# # 2. 等待服务器响应
print(html)
print("*" * 30)
# 如何通过socket直接获取html
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = "www.baidu.com"
client.connect((host, 80)) # 阻塞io, 意味着这个时候cpu是空闲的
client.send("GET {} HTTP/1.1\r\nHost:{}\r\nConnection:close\r\n\r\n".format("/", host).encode("utf8"))
data = b""
while 1:
d = client.recv(1024) # 阻塞直到有數據
if d:
data += d
else:
break
data = data.decode("utf8")
print(data)
| [
"[email protected]"
] | |
e1bd696eaab1b5eebddfedbc850748664f84c256 | bb983b38f9be7b6fd4ab1a651484db37c1aeff39 | /0705/test2_physical_properties.py | f6a28c9ca6581f46cb5dcee2ac17325035455cc2 | [] | no_license | nakanishi-akitaka/python2018_backup | c214df78372cca993d69f8001010ec2f6dcaf1be | 45766d3c3777de2a91b3e2cf50c6bfedca8627da | refs/heads/master | 2023-02-18T08:04:28.625532 | 2022-06-07T01:02:53 | 2022-06-07T01:02:53 | 201,399,236 | 5 | 30 | null | 2023-02-10T21:06:51 | 2019-08-09T05:48:22 | Jupyter Notebook | UTF-8 | Python | false | false | 583 | py | # -*- coding: utf-8 -*-
"""
Check physical properties of materials
Created on Thu Jul 5 14:04:59 2018
@author: Akitaka
"""
from mendeleev import element
for i in range(1,100):
x=element(i)
# print(x.symbol)
# print(x,x.electron_affinity,"electron affinity")
if(x.electron_affinity==None):
print(x,x.electron_affinity,"electron affinity")
elif(x.electron_affinity<0.0):
print(x,x.electron_affinity,"electron affinity")
# if(x.thermal_conductivity==None):
# print(x,x.thermal_conductivity,"thermal conductivity")
| [
"[email protected]"
] | |
4b8beff234eb9196456cb171893224665acf0ae0 | b580fd482147e54b1ca4f58b647fab016efa3855 | /host_im/mount/malware-classification-master/samples/not/sample_good386.py | 12e873b5df7e5bce9581700bed1365123b6204c4 | [] | no_license | Barnsa/Dissertation | 1079c8d8d2c660253543452d4c32799b6081cfc5 | b7df70abb3f38dfd446795a0a40cf5426e27130e | refs/heads/master | 2022-05-28T12:35:28.406674 | 2020-05-05T08:37:16 | 2020-05-05T08:37:16 | 138,386,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | import re
import datetime
import math
import array
import random
import readline
import textwrap
import stringprep
import difflib
nterms = 719
n1, n2 = 0, 1
if nterms <= 0:
print("Please provide a positive integer.")
elif nterms == 1:
print("Fibonacci sequence upto", nterms, ":")
print(n1)
else:
print("Fibonacci sequence:")
count = 0
while 0 == True & 0 < 719:
print(n1)
nth = n1 + n2
n1 = n2
n2 = nth
count += 1
| [
"[email protected]"
] | |
8e632b71a2abf023a97cded3ffed0e7a87717c64 | 5138b8077a944e655570c3d15389ccaac0dafceb | /scripts/fileserver.py | c20b8737a7e1bddee6c8ef5a3c1e1060c3c2b821 | [] | no_license | cms-btv-pog/CTagTraining | 9740abaf4a5a05500782695723cace90f6d8882e | affb2dc09a3bb812d59302990f59cbfaa06370f4 | refs/heads/master | 2021-01-18T22:29:05.148151 | 2016-04-07T07:12:29 | 2016-04-07T07:12:29 | 42,971,506 | 2 | 4 | null | 2016-01-25T10:15:07 | 2015-09-23T01:42:03 | Python | UTF-8 | Python | false | false | 998 | py | '''
Workaround to allow xrd access to root files, given that the ROOT version shipped with anaconda does not
provide the functionality. Files are transferred on demand and deleted when not needed any longer.
'''
import subprocess
import os
import uuid
class PoolFile(object):
def __init__(self, path, delete_on_exit=True):
self.path = path
self.del_once_done = delete_on_exit
def __del__(self):
if self.del_once_done:
print 'deleting %s' % self.path
os.remove(self.path)
def serve(path):
if path.startswith('root://'):
fname = '%s.root' % uuid.uuid1()
print '%s --> %s' % (path, fname)
proc = subprocess.Popen(['xrdcp', path, fname], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
exitcode = proc.wait()
if exitcode != 0:
_, stderr = proc.communicate()
raise RuntimeError('Problem copying file %s, Error: %s' % (path, stderr))
return PoolFile(fname)
else:
return PoolFile(path, False)
| [
"[email protected]"
] | |
39c8bc0e2a0434d7c3f69aa93bb3a118e6a627a0 | fa5e890e95f35744a42ae231c6678b8295502c12 | /lectures/migrations/0001_initial.py | 5b9fa25a457967d2fff42b78c8734607ad809eae | [] | no_license | JeeHyungPark/first_MVP | 4518ae01114686e9ad9fde45112c2eef438e1054 | c4a673a69772260d1ebdb16f73b242c4f90da674 | refs/heads/master | 2023-01-05T19:31:35.018377 | 2020-09-02T07:38:34 | 2020-09-02T07:38:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,967 | py | # Generated by Django 3.0.9 on 2020-09-01 16:13
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Lecture',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, unique=True, verbose_name='제목')),
('video', models.URLField(unique=True, verbose_name='강의출처')),
('description', models.TextField(blank=True, verbose_name='강의설명')),
('lecturer', models.CharField(blank=True, max_length=50, verbose_name='강사')),
('main_category', models.CharField(blank=True, choices=[('코딩', '코딩'), ('미술', '미술'), ('디자인/편집', '디자인/편집')], max_length=16, verbose_name='대분류')),
('sub_category', models.CharField(blank=True, choices=[('Python', 'Python'), ('HTML/CSS', 'HTML/CSS'), ('Javascript', 'Javascript'), ('C', 'C'), ('Java', 'Java'), ('Git', 'Git'), ('연필', '연필'), ('디지털드로잉', '디지털드로잉'), ('색연필', '색연필'), ('수채화', '수채화'), ('펜', '펜'), ('캘리그래피', '캘리그래피'), ('아크릴', '아크릴'), ('Premiere Pro', 'Premiere Pro'), ('Photoshop', 'Photoshop'), ('After Effect', 'After Effect'), ('InDesign', 'InDesign'), ('Illustrator', 'Illustrator'), ('Sketch', 'Sketch')], max_length=18, verbose_name='중분류')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': '강의',
'verbose_name_plural': '강의',
'ordering': ['title'],
},
),
]
| [
"[email protected]"
] | |
fa338f2fa35a152beb28b1af654dc0bd2c3f620e | e1efc8e0b0e4629dea61504fbc816c0527691bd9 | /6.redis/redis12-线程模型.py | ed2756dce5974767a3cd606fad0585653fcbcf93 | [] | no_license | xiongmengmeng/xmind-technology | 2bb67a0bf92cfd660cac01f8ab3a2454423ccba5 | e2fdb6987ef805a65f0a4feb52d84383853f4b77 | refs/heads/main | 2023-07-31T07:10:29.868120 | 2021-09-11T08:18:17 | 2021-09-11T08:18:17 | 307,636,242 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,214 | py | import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
import xmind
from xmind.core.markerref import MarkerId
xmind_name="redis"
w = xmind.load(os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
s2=w.createSheet()
s2.setTitle("线程模型")
r2=s2.getRootTopic()
r2.setTitle("线程模型")
content={
'线程模型':[
'基于Reactor模式开发',
'文件事件处理器'
],
'Redis称单线程模型':[
'文件事件分派器队列的消费是单线程的'
],
'文件事件处理器4部分组成':[
{'多个套接字':[
'会并发产生不同的操作,每个操作对应不同文件事件',
{'文件事件':[
'对socket操作的抽象',
'当一个socket准备好执行连接accept、read、write、close操作时,会产生一个文件事件'
]}
]},
{'IO多路复用程序':[
'监听多个socket,将socket产生的事件放入队列',
'通过队列以有序、同步且每次一个socket的方式向文件事件分派器传送socket',
'当上一个socket产生的事件被对应事件处理器执行完后,I/O多路复用程序才会向文件事件分派器传送下个socket'
]},
{'文件事件分派器':[
'接收I/O多路复用程序传来的socket',
'根据socket产生的事件类型,调用相应的事件处理器'
]},
{'事件处理器':[
'连接应答处理器',
'命令请求处理器',
'命令回复处理器'
]}
],
'客户端和Redis服务器通信过程':[
'1.客户端向服务器发起【连接请求】,socket产生一个AE_READABLE事件',
'2.AE_READABLE事件映射到【连接应答处理器】',
'3.客户端向服务器发起【命令请求】(不管读还是写请求),socket产生一个AE_READABLE事件',
'4.AE_READABLE事件映射到【命令请求处理器】',
'5.服务器向客户端发起【命令响应】',
'6.AE_WRITABLE事件映射到【命令回复处理器】'
]
}
#构建xmind
xmind.build(content,r2)
#保存xmind
xmind.save(w,os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind") | [
"[email protected]"
] | |
9ddca5a1b07df11753fc7a2efda1d2201290c07d | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/2/ehb.py | 058990e09572712d7dcb0b5b6a5f7d55aff559ca | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'eHB':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
e867fcb024d983a83ea381bf70d707e03b5bf658 | a560269290749e10466b1a29584f06a2b8385a47 | /Notebooks/py/byt3dev/titanic-data-science-solutions/titanic-data-science-solutions.py | dded11be2e4ef2c17bb12915f5a13b249e462570 | [] | no_license | nischalshrestha/automatic_wat_discovery | c71befad1aa358ae876d5494a67b0f4aa1266f23 | 982e700d8e4698a501afffd6c3a2f35346c34f95 | refs/heads/master | 2022-04-07T12:40:24.376871 | 2020-03-15T22:27:39 | 2020-03-15T22:27:39 | 208,379,586 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 38,808 | py | #!/usr/bin/env python
# coding: utf-8
# # Titanic Data Science Solutions
#
# This notebook is companion to the book [Data Science Solutions](https://startupsci.com). The notebook walks us through a typical workflow for solving data science competitions at sites like Kaggle.
#
# There are several excellent notebooks to study data science competition entries. However many will skip some of the explanation on how the solution is developed as these notebooks are developed by experts for experts. The objective of this notebook is to follow a step-by-step workflow, explaining each step and rationale for every decision we take during solution development.
#
# ## Workflow stages
#
# The competition solution workflow goes through seven stages described in the Data Science Solutions book.
#
# 1. Question or problem definition.
# 2. Acquire training and testing data.
# 3. Wrangle, prepare, cleanse the data.
# 4. Analyze, identify patterns, and explore the data.
# 5. Model, predict and solve the problem.
# 6. Visualize, report, and present the problem solving steps and final solution.
# 7. Supply or submit the results.
#
# The workflow indicates general sequence of how each stage may follow the other. However there are use cases with exceptions.
#
# - We may combine mulitple workflow stages. We may analyze by visualizing data.
# - Perform a stage earlier than indicated. We may analyze data before and after wrangling.
# - Perform a stage multiple times in our workflow. Visualize stage may be used multiple times.
# - Drop a stage altogether. We may not need supply stage to productize or service enable our dataset for a competition.
#
#
# ## Question and problem definition
#
# Competition sites like Kaggle define the problem to solve or questions to ask while providing the datasets for training your data science model and testing the model results against a test dataset. The question or problem definition for Titanic Survival competition is [described here at Kaggle](https://www.kaggle.com/c/titanic).
#
# > Knowing from a training set of samples listing passengers who survived or did not survive the Titanic disaster, can our model determine based on a given test dataset not containing the survival information, if these passengers in the test dataset survived or not.
#
# We may also want to develop some early understanding about the domain of our problem. This is described on the [Kaggle competition description page here](https://www.kaggle.com/c/titanic). Here are the highlights to note.
#
# - On April 15, 1912, during her maiden voyage, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 passengers and crew. Translated 32% survival rate.
# - One of the reasons that the shipwreck led to such loss of life was that there were not enough lifeboats for the passengers and crew.
# - Although there was some element of luck involved in surviving the sinking, some groups of people were more likely to survive than others, such as women, children, and the upper-class.
#
# ## Workflow goals
#
# The data science solutions workflow solves for seven major goals.
#
# **Classifying.** We may want to classify or categorize our samples. We may also want to understand the implications or correlation of different classes with our solution goal.
#
# **Correlating.** One can approach the problem based on available features within the training dataset. Which features within the dataset contribute significantly to our solution goal? Statistically speaking is there a [correlation](https://en.wikiversity.org/wiki/Correlation) among a feature and solution goal? As the feature values change does the solution state change as well, and visa-versa? This can be tested both for numerical and categorical features in the given dataset. We may also want to determine correlation among features other than survival for subsequent goals and workflow stages. Correlating certain features may help in creating, completing, or correcting features.
#
# **Converting.** For modeling stage, one needs to prepare the data. Depending on the choice of model algorithm one may require all features to be converted to numerical equivalent values. So for instance converting text categorical values to numeric values.
#
# **Completing.** Data preparation may also require us to estimate any missing values within a feature. Model algorithms may work best when there are no missing values.
#
# **Correcting.** We may also analyze the given training dataset for errors or possibly innacurate values within features and try to corrent these values or exclude the samples containing the errors. One way to do this is to detect any outliers among our samples or features. We may also completely discard a feature if it is not contribting to the analysis or may significantly skew the results.
#
# **Creating.** Can we create new features based on an existing feature or a set of features, such that the new feature follows the correlation, conversion, completeness goals.
#
# **Charting.** How to select the right visualization plots and charts depending on nature of the data and the solution goals. A good start is to read the Tableau paper on [Which chart or graph is right for you?](http://www.tableau.com/learn/whitepapers/which-chart-or-graph-is-right-for-you#ERAcoH5sEG5CFlek.99).
# ## Refactor Release 2017-Jan-29
#
# We are significantly refactoring the notebook based on (a) comments received by readers, (b) issues in porting notebook from Jupyter kernel (2.7) to Kaggle kernel (3.5), and (c) review of few more best practice kernels.
#
# ### User comments
#
# - Combine training and test data for certain operations like converting titles across dataset to numerical values. (thanks @Sharan Naribole)
# - Correct observation - nearly 30% of the passengers had siblings and/or spouses aboard. (thanks @Reinhard)
# - Correctly interpreting logistic regresssion coefficients. (thanks @Reinhard)
#
# ### Porting issues
#
# - Specify plot dimensions, bring legend into plot.
#
#
# ### Best practices
#
# - Performing feature correlation analysis early in the project.
# - Using multiple plots instead of overlays for readability.
# In[ ]:
# data analysis and wrangling
import pandas as pandas
import numpy as numpy
import random as random
# visualization
import seaborn as seaborn
import matplotlib.pyplot as plot
get_ipython().magic(u'matplotlib inline')
# machine learning
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
# ## Acquire data
#
# The Python Pandas packages helps us work with our datasets. We start by acquiring the training and testing datasets into Pandas DataFrames. We also combine these datasets to run certain operations on both datasets together.
# In[ ]:
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
combine = [train_df, test_df]
# ## Analyze by describing data
#
# Pandas also helps describe the datasets answering following questions early in our project.
#
# **Which features are available in the dataset?**
#
# Noting the feature names for directly manipulating or analyzing these. These feature names are described on the [Kaggle data page here](https://www.kaggle.com/c/titanic/data).
# In[ ]:
print(train_df.columns.values)
# **Which features are categorical?**
#
# These values classify the samples into sets of similar samples. Within categorical features are the values nominal, ordinal, ratio, or interval based? Among other things this helps us select the appropriate plots for visualization.
#
# - Categorical: Survived, Sex, and Embarked. Ordinal: Pclass.
#
# **Which features are numerical?**
#
# Which features are numerical? These values change from sample to sample. Within numerical features are the values discrete, continuous, or timeseries based? Among other things this helps us select the appropriate plots for visualization.
#
# - Continous: Age, Fare. Discrete: SibSp, Parch.
# In[ ]:
# preview the data
train_df.head()
# **Which features are mixed data types?**
#
# Numerical, alphanumeric data within same feature. These are candidates for correcting goal.
#
# - Ticket is a mix of numeric and alphanumeric data types. Cabin is alphanumeric.
#
# **Which features may contain errors or typos?**
#
# This is harder to review for a large dataset, however reviewing a few samples from a smaller dataset may just tell us outright, which features may require correcting.
#
# - Name feature may contain errors or typos as there are several ways used to describe a name including titles, round brackets, and quotes used for alternative or short names.
# In[ ]:
train_df.tail()
# **Which features contain blank, null or empty values?**
#
# These will require correcting.
#
# - Cabin > Age > Embarked features contain a number of null values in that order for the training dataset.
# - Cabin > Age are incomplete in case of test dataset.
#
# **What are the data types for various features?**
#
# Helping us during converting goal.
#
# - Seven features are integer or floats. Six in case of test dataset.
# - Five features are strings (object).
# In[ ]:
train_df.info()
print('_'*40)
test_df.info()
# **What is the distribution of numerical feature values across the samples?**
#
# This helps us determine, among other early insights, how representative is the training dataset of the actual problem domain.
#
# - Total samples are 891 or 40% of the actual number of passengers on board the Titanic (2,224).
# - Survived is a categorical feature with 0 or 1 values.
# - Around 38% samples survived representative of the actual survival rate at 32%.
# - Most passengers (> 75%) did not travel with parents or children.
# - Nearly 30% of the passengers had siblings and/or spouse aboard.
# - Fares varied significantly with few passengers (<1%) paying as high as $512.
# - Few elderly passengers (<1%) within age range 65-80.
# In[ ]:
train_df.describe()
# Review survived rate using `percentiles=[.61, .62]` knowing our problem description mentions 38% survival rate.
# Review Parch distribution using `percentiles=[.75, .8]`
# SibSp distribution `[.68, .69]`
# Age and Fare `[.1, .2, .3, .4, .5, .6, .7, .8, .9, .99]`
# **What is the distribution of categorical features?**
#
# - Names are unique across the dataset (count=unique=891)
# - Sex variable as two possible values with 65% male (top=male, freq=577/count=891).
# - Cabin values have several dupicates across samples. Alternatively several passengers shared a cabin.
# - Embarked takes three possible values. S port used by most passengers (top=S)
# - Ticket feature has high ratio (22%) of duplicate values (unique=681).
# In[ ]:
train_df.describe(include=['O'])
# ### Assumtions based on data analysis
#
# We arrive at following assumptions based on data analysis done so far. We may validate these assumptions further before taking appropriate actions.
#
# **Correlating.**
#
# We want to know how well does each feature correlate with Survival. We want to do this early in our project and match these quick correlations with modelled correlations later in the project.
#
# **Completing.**
#
# 1. We may want to complete Age feature as it is definitely correlated to survival.
# 2. We may want to complete the Embarked feature as it may also correlate with survival or another important feature.
#
# **Correcting.**
#
# 1. Ticket feature may be dropped from our analysis as it contains high ratio of duplicates (22%) and there may not be a correlation between Ticket and survival.
# 2. Cabin feature may be dropped as it is highly incomplete or contains many null values both in training and test dataset.
# 3. PassengerId may be dropped from training dataset as it does not contribute to survival.
# 4. Name feature is relatively non-standard, may not contribute directly to survival, so maybe dropped.
#
# **Creating.**
#
# 1. We may want to create a new feature called Family based on Parch and SibSp to get total count of family members on board.
# 2. We may want to engineer the Name feature to extract Title as a new feature.
# 3. We may want to create new feature for Age bands. This turns a continous numerical feature into an ordinal categorical feature.
# 4. We may also want to create a Fare range feature if it helps our analysis.
#
# **Classifying.**
#
# We may also add to our assumptions based on the problem description noted earlier.
#
# 1. Women (Sex=female) were more likely to have survived.
# 2. Children (Age<?) were more likely to have survived.
# 3. The upper-class passengers (Pclass=1) were more likely to have survived.
# ## Analyze by pivoting features
#
# To confirm some of our observations and assumptions, we can quickly analyze our feature correlations by pivoting features against each other. We can only do so at this stage for features which do not have any empty values. It also makes sense doing so only for features which are categorical (Sex), ordinal (Pclass) or discrete (SibSp, Parch) type.
#
# - **Pclass** We observe significant correlation (>0.5) among Pclass=1 and Survived (classifying #3). We decide to include this feature in our model.
# - **Sex** We confirm the observation during problem definition that Sex=female had very high survival rate at 74% (classifying #1).
# - **SibSp and Parch** These features have zero correlation for certain values. It may be best to derive a feature or a set of features from these individual features (creating #1).
# In[ ]:
train_df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# In[ ]:
train_df[["Sex", "Survived"]].groupby(['Sex'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# In[ ]:
train_df[["SibSp", "Survived"]].groupby(['SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# In[ ]:
train_df[["Parch", "Survived"]].groupby(['Parch'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# ## Analyze by visualizing data
#
# Now we can continue confirming some of our assumptions using visualizations for analyzing the data.
#
# ### Correlating numerical features
#
# Let us start by understanding correlations between numerical features and our solution goal (Survived).
#
# A histogram chart is useful for analyzing continous numerical variables like Age where banding or ranges will help identify useful patterns. The histogram can indicate distribution of samples using automatically defined bins or equally ranged bands. This helps us answer questions relating to specific bands (Did infants have better survival rate?)
#
# Note that x-axis in historgram visualizations represents the count of samples or passengers.
#
# **Observations.**
#
# - Infants (Age <=4) had high survival rate.
# - Oldest passengers (Age = 80) survived.
# - Large number of 15-25 year olds did not survive.
# - Most passengers are in 15-35 age range.
#
# **Decisions.**
#
# This simple analysis confirms our assumptions as decisions for subsequent workflow stages.
#
# - We should consider Age (our assumption classifying #2) in our model training.
# - Complete the Age feature for null values (completing #1).
# - We should band age groups (creating #3).
# In[ ]:
g = sns.FacetGrid(train_df, col='Survived')
g.map(plt.hist, 'Age', bins=20)
# ### Correlating numerical and ordinal features
#
# We can combine multiple features for identifying correlations using a single plot. This can be done with numerical and categorical features which have numeric values.
#
# **Observations.**
#
# - Pclass=3 had most passengers, however most did not survive. Confirms our classifying assumption #2.
# - Infant passengers in Pclass=2 and Pclass=3 mostly survived. Further qualifies our classifying assumption #2.
# - Most passengers in Pclass=1 survived. Confirms our classifying assumption #3.
# - Pclass varies in terms of Age distribution of passengers.
#
# **Decisions.**
#
# - Consider Pclass for model training.
# In[ ]:
# grid = sns.FacetGrid(train_df, col='Pclass', hue='Survived')
grid = sns.FacetGrid(train_df, col='Survived', row='Pclass', size=2.2, aspect=1.6)
grid.map(plt.hist, 'Age', alpha=.5, bins=20)
grid.add_legend();
# ### Correlating categorical features
#
# Now we can correlate categorical features with our solution goal.
#
# **Observations.**
#
# - Female passengers had much better survival rate than males. Confirms classifying (#1).
# - Exception in Embarked=C where males had higher survival rate. This could be a correlation between Pclass and Embarked and in turn Pclass and Survived, not necessarily direct correlation between Embarked and Survived.
# - Males had better survival rate in Pclass=3 when compared with Pclass=2 for C and Q ports. Completing (#2).
# - Ports of embarkation have varying survival rates for Pclass=3 and among male passengers. Correlating (#1).
#
# **Decisions.**
#
# - Add Sex feature to model training.
# - Complete and add Embarked feature to model training.
# In[ ]:
# grid = sns.FacetGrid(train_df, col='Embarked')
grid = sns.FacetGrid(train_df, row='Embarked', size=2.2, aspect=1.6)
grid.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette='deep')
grid.add_legend()
# ### Correlating categorical and numerical features
#
# We may also want to correlate categorical features (with non-numeric values) and numeric features. We can consider correlating Embarked (Categorical non-numeric), Sex (Categorical non-numeric), Fare (Numeric continuous), with Survived (Categorical numeric).
#
# **Observations.**
#
# - Higher fare paying passengers had better survival. Confirms our assumption for creating (#4) fare ranges.
# - Port of embarkation correlates with survival rates. Confirms correlating (#1) and completing (#2).
#
# **Decisions.**
#
# - Consider banding Fare feature.
# In[ ]:
# grid = sns.FacetGrid(train_df, col='Embarked', hue='Survived', palette={0: 'k', 1: 'w'})
grid = sns.FacetGrid(train_df, row='Embarked', col='Survived', size=2.2, aspect=1.6)
grid.map(sns.barplot, 'Sex', 'Fare', alpha=.5, ci=None)
grid.add_legend()
# ## Wrangle data
#
# We have collected several assumptions and decisions regarding our datasets and solution requirements. So far we did not have to change a single feature or value to arrive at these. Let us now execute our decisions and assumptions for correcting, creating, and completing goals.
#
# ### Correcting by dropping features
#
# This is a good starting goal to execute. By dropping features we are dealing with fewer data points. Speeds up our notebook and eases the analysis.
#
# Based on our assumptions and decisions we want to drop the Cabin (correcting #2) and Ticket (correcting #1) features.
#
# Note that where applicable we perform operations on both training and testing datasets together to stay consistent.
# In[ ]:
print("Before", train_df.shape, test_df.shape, combine[0].shape, combine[1].shape)
train_df = train_df.drop(['Ticket', 'Cabin'], axis=1)
test_df = test_df.drop(['Ticket', 'Cabin'], axis=1)
combine = [train_df, test_df]
print("After", train_df.shape, test_df.shape, combine[0].shape, combine[1].shape)
# ### Creating new feature extracting from existing
#
# We want to analyze if Name feature can be engineered to extract titles and test correlation between titles and survival, before dropping Name and PassengerId features.
#
# In the following code we extract Title feature using regular expressions. The RegEx pattern `(\w+\.)` matches the first word which ends with a dot character within Name feature. The `expand=False` flag returns a DataFrame.
#
# **Observations.**
#
# When we plot Title, Age, and Survived, we note the following observations.
#
# - Most titles band Age groups accurately. For example: Master title has Age mean of 5 years.
# - Survival among Title Age bands varies slightly.
# - Certain titles mostly survived (Mme, Lady, Sir) or did not (Don, Rev, Jonkheer).
#
# **Decision.**
#
# - We decide to retain the new Title feature for model training.
# In[ ]:
for dataset in combine:
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\.', expand=False)
pd.crosstab(train_df['Title'], train_df['Sex'])
# We can replace many titles with a more common name or classify them as `Rare`.
# In[ ]:
for dataset in combine:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
train_df[['Title', 'Survived']].groupby(['Title'], as_index=False).mean()
# We can convert the categorical titles to ordinal.
# In[ ]:
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
train_df.head()
# Now we can safely drop the Name feature from training and testing datasets. We also do not need the PassengerId feature in the training dataset.
# In[ ]:
train_df = train_df.drop(['Name', 'PassengerId'], axis=1)
test_df = test_df.drop(['Name'], axis=1)
combine = [train_df, test_df]
train_df.shape, test_df.shape
# ### Converting a categorical feature
#
# Now we can convert features which contain strings to numerical values. This is required by most model algorithms. Doing so will also help us in achieving the feature completing goal.
#
# Let us start by converting Sex feature to a new feature called Gender where female=1 and male=0.
# In[ ]:
for dataset in combine:
dataset['Sex'] = dataset['Sex'].map( {'female': 1, 'male': 0} ).astype(int)
train_df.head()
# ### Completing a numerical continuous feature
#
# Now we should start estimating and completing features with missing or null values. We will first do this for the Age feature.
#
# We can consider three methods to complete a numerical continuous feature.
#
# 1. A simple way is to generate random numbers between mean and [standard deviation](https://en.wikipedia.org/wiki/Standard_deviation).
#
# 2. More accurate way of guessing missing values is to use other correlated features. In our case we note correlation among Age, Gender, and Pclass. Guess Age values using [median](https://en.wikipedia.org/wiki/Median) values for Age across sets of Pclass and Gender feature combinations. So, median Age for Pclass=1 and Gender=0, Pclass=1 and Gender=1, and so on...
#
# 3. Combine methods 1 and 2. So instead of guessing age values based on median, use random numbers between mean and standard deviation, based on sets of Pclass and Gender combinations.
#
# Method 1 and 3 will introduce random noise into our models. The results from multiple executions might vary. We will prefer method 2.
# In[ ]:
# grid = sns.FacetGrid(train_df, col='Pclass', hue='Gender')
grid = sns.FacetGrid(train_df, row='Pclass', col='Sex', size=2.2, aspect=1.6)
grid.map(plt.hist, 'Age', alpha=.5, bins=20)
grid.add_legend()
# Let us start by preparing an empty array to contain guessed Age values based on Pclass x Gender combinations.
# In[ ]:
guess_ages = np.zeros((2,3))
guess_ages
# Now we iterate over Sex (0 or 1) and Pclass (1, 2, 3) to calculate guessed values of Age for the six combinations.
# In[ ]:
for dataset in combine:
for i in range(0, 2):
for j in range(0, 3):
guess_df = dataset[(dataset['Sex'] == i) & (dataset['Pclass'] == j+1)]['Age'].dropna()
# age_mean = guess_df.mean()
# age_std = guess_df.std()
# age_guess = rnd.uniform(age_mean - age_std, age_mean + age_std)
age_guess = guess_df.median()
# Convert random age float to nearest .5 age
guess_ages[i,j] = int( age_guess/0.5 + 0.5 ) * 0.5
for i in range(0, 2):
for j in range(0, 3):
dataset.loc[ (dataset.Age.isnull()) & (dataset.Sex == i) & (dataset.Pclass == j+1), 'Age'] = guess_ages[i,j]
dataset['Age'] = dataset['Age'].astype(int)
train_df.head()
# Let us create Age bands and determine correlations with Survived.
# In[ ]:
train_df['AgeBand'] = pd.cut(train_df['Age'], 5)
train_df[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False).mean().sort_values(by='AgeBand', ascending=True)
# Let us replace Age with ordinals based on these bands.
# In[ ]:
for dataset in combine:
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age']
train_df.head()
# We can not remove the AgeBand feature.
# In[ ]:
train_df = train_df.drop(['AgeBand'], axis=1)
combine = [train_df, test_df]
train_df.head()
# ### Create new feature combining existing features
#
# We can create a new feature for FamilySize which combines Parch and SibSp. This will enable us to drop Parch and SibSp from our datasets.
# In[ ]:
for dataset in combine:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
train_df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# We can create another feature called IsAlone.
# In[ ]:
for dataset in combine:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
train_df[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False).mean()
# Let us drop Parch, SibSp, and FamilySize features in favor of IsAlone.
# In[ ]:
train_df = train_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
test_df = test_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
combine = [train_df, test_df]
train_df.head()
# We can also create an artificial feature combining Pclass and Age.
# In[ ]:
for dataset in combine:
dataset['Age*Class'] = dataset.Age * dataset.Pclass
train_df.loc[:, ['Age*Class', 'Age', 'Pclass']].head(10)
# ### Completing a categorical feature
#
# Embarked feature takes S, Q, C values based on port of embarkation. Our training dataset has two missing values. We simply fill these with the most common occurance.
# In[ ]:
freq_port = train_df.Embarked.dropna().mode()[0]
freq_port
# In[ ]:
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].fillna(freq_port)
train_df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# ### Converting categorical feature to numeric
#
# We can now convert the EmbarkedFill feature by creating a new numeric Port feature.
# In[ ]:
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int)
train_df.head()
# ### Quick completing and converting a numeric feature
#
# We can now complete the Fare feature for single missing value in test dataset using mode to get the value that occurs most frequently for this feature. We do this in a single line of code.
#
# Note that we are not creating an intermediate new feature or doing any further analysis for correlation to guess missing feature as we are replacing only a single value. The completion goal achieves desired requirement for model algorithm to operate on non-null values.
#
# We may also want round off the fare to two decimals as it represents currency.
# In[ ]:
test_df['Fare'].fillna(test_df['Fare'].dropna().median(), inplace=True)
test_df.head()
# We can not create FareBand.
# In[ ]:
train_df['FareBand'] = pd.qcut(train_df['Fare'], 4)
train_df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False).mean().sort_values(by='FareBand', ascending=True)
# Convert the Fare feature to ordinal values based on the FareBand.
# In[ ]:
for dataset in combine:
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
train_df = train_df.drop(['FareBand'], axis=1)
combine = [train_df, test_df]
train_df.head(10)
# And the test dataset.
# In[ ]:
test_df.head(10)
# ## Model, predict and solve
#
# Now we are ready to train a model and predict the required solution. There are 60+ predictive modelling algorithms to choose from. We must understand the type of problem and solution requirement to narrow down to a select few models which we can evaluate. Our problem is a classification and regression problem. We want to identify relationship between output (Survived or not) with other variables or features (Gender, Age, Port...). We are also perfoming a category of machine learning which is called supervised learning as we are training our model with a given dataset. With these two criteria - Supervised Learning plus Classification and Regression, we can narrow down our choice of models to a few. These include:
#
# - Logistic Regression
# - KNN or k-Nearest Neighbors
# - Support Vector Machines
# - Naive Bayes classifier
# - Decision Tree
# - Random Forrest
# - Perceptron
# - Artificial neural network
# - RVM or Relevance Vector Machine
# In[ ]:
X_train = train_df.drop("Survived", axis=1)
Y_train = train_df["Survived"]
X_test = test_df.drop("PassengerId", axis=1).copy()
X_train.shape, Y_train.shape, X_test.shape
# Logistic Regression is a useful model to run early in the workflow. Logistic regression measures the relationship between the categorical dependent variable (feature) and one or more independent variables (features) by estimating probabilities using a logistic function, which is the cumulative logistic distribution. Reference [Wikipedia](https://en.wikipedia.org/wiki/Logistic_regression).
#
# Note the confidence score generated by the model based on our training dataset.
# In[ ]:
# Logistic Regression
logreg = LogisticRegression()
logreg.fit(X_train, Y_train)
Y_pred = logreg.predict(X_test)
acc_log = round(logreg.score(X_train, Y_train) * 100, 2)
acc_log
# We can use Logistic Regression to validate our assumptions and decisions for feature creating and completing goals. This can be done by calculating the coefficient of the features in the decision function.
#
# Positive coefficients increase the log-odds of the response (and thus increase the probability), and negative coefficients decrease the log-odds of the response (and thus decrease the probability).
#
# - Sex is highest positivie coefficient, implying as the Sex value increases (male: 0 to female: 1), the probability of Survived=1 increases the most.
# - Inversely as Pclass increases, probability of Survived=1 decreases the most.
# - This way Age*Class is a good artificial feature to model as it has second highest negative correlation with Survived.
# - So is Title as second highest positive correlation.
# In[ ]:
coeff_df = pd.DataFrame(train_df.columns.delete(0))
coeff_df.columns = ['Feature']
coeff_df["Correlation"] = pd.Series(logreg.coef_[0])
coeff_df.sort_values(by='Correlation', ascending=False)
# Next we model using Support Vector Machines which are supervised learning models with associated learning algorithms that analyze data used for classification and regression analysis. Given a set of training samples, each marked as belonging to one or the other of **two categories**, an SVM training algorithm builds a model that assigns new test samples to one category or the other, making it a non-probabilistic binary linear classifier. Reference [Wikipedia](https://en.wikipedia.org/wiki/Support_vector_machine).
#
# Note that the model generates a confidence score which is higher than Logistics Regression model.
# In[ ]:
# Support Vector Machines
svc = SVC()
svc.fit(X_train, Y_train)
Y_pred = svc.predict(X_test)
acc_svc = round(svc.score(X_train, Y_train) * 100, 2)
acc_svc
# In pattern recognition, the k-Nearest Neighbors algorithm (or k-NN for short) is a non-parametric method used for classification and regression. A sample is classified by a majority vote of its neighbors, with the sample being assigned to the class most common among its k nearest neighbors (k is a positive integer, typically small). If k = 1, then the object is simply assigned to the class of that single nearest neighbor. Reference [Wikipedia](https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm).
#
# KNN confidence score is better than Logistics Regression but worse than SVM.
# In[ ]:
knn = KNeighborsClassifier(n_neighbors = 3)
knn.fit(X_train, Y_train)
Y_pred = knn.predict(X_test)
acc_knn = round(knn.score(X_train, Y_train) * 100, 2)
acc_knn
# In machine learning, naive Bayes classifiers are a family of simple probabilistic classifiers based on applying Bayes' theorem with strong (naive) independence assumptions between the features. Naive Bayes classifiers are highly scalable, requiring a number of parameters linear in the number of variables (features) in a learning problem. Reference [Wikipedia](https://en.wikipedia.org/wiki/Naive_Bayes_classifier).
#
# The model generated confidence score is the lowest among the models evaluated so far.
# In[ ]:
# Gaussian Naive Bayes
gaussian = GaussianNB()
gaussian.fit(X_train, Y_train)
Y_pred = gaussian.predict(X_test)
acc_gaussian = round(gaussian.score(X_train, Y_train) * 100, 2)
acc_gaussian
# The perceptron is an algorithm for supervised learning of binary classifiers (functions that can decide whether an input, represented by a vector of numbers, belongs to some specific class or not). It is a type of linear classifier, i.e. a classification algorithm that makes its predictions based on a linear predictor function combining a set of weights with the feature vector. The algorithm allows for online learning, in that it processes elements in the training set one at a time. Reference [Wikipedia](https://en.wikipedia.org/wiki/Perceptron).
# In[ ]:
# Perceptron
perceptron = Perceptron()
perceptron.fit(X_train, Y_train)
Y_pred = perceptron.predict(X_test)
acc_perceptron = round(perceptron.score(X_train, Y_train) * 100, 2)
acc_perceptron
# In[ ]:
# Linear SVC
linear_svc = LinearSVC()
linear_svc.fit(X_train, Y_train)
Y_pred = linear_svc.predict(X_test)
acc_linear_svc = round(linear_svc.score(X_train, Y_train) * 100, 2)
acc_linear_svc
# In[ ]:
# Stochastic Gradient Descent
sgd = SGDClassifier()
sgd.fit(X_train, Y_train)
Y_pred = sgd.predict(X_test)
acc_sgd = round(sgd.score(X_train, Y_train) * 100, 2)
acc_sgd
# This model uses a decision tree as a predictive model which maps features (tree branches) to conclusions about the target value (tree leaves). Tree models where the target variable can take a finite set of values are called classification trees; in these tree structures, leaves represent class labels and branches represent conjunctions of features that lead to those class labels. Decision trees where the target variable can take continuous values (typically real numbers) are called regression trees. Reference [Wikipedia](https://en.wikipedia.org/wiki/Decision_tree_learning).
#
# The model confidence score is the highest among models evaluated so far.
# In[ ]:
# Decision Tree
decision_tree = DecisionTreeClassifier()
decision_tree.fit(X_train, Y_train)
Y_pred = decision_tree.predict(X_test)
acc_decision_tree = round(decision_tree.score(X_train, Y_train) * 100, 2)
acc_decision_tree
# The next model Random Forests is one of the most popular. Random forests or random decision forests are an ensemble learning method for classification, regression and other tasks, that operate by constructing a multitude of decision trees (n_estimators=100) at training time and outputting the class that is the mode of the classes (classification) or mean prediction (regression) of the individual trees. Reference [Wikipedia](https://en.wikipedia.org/wiki/Random_forest).
#
# The model confidence score is the highest among models evaluated so far. We decide to use this model's output (Y_pred) for creating our competition submission of results.
# In[ ]:
# Random Forest
random_forest = RandomForestClassifier(n_estimators=100)
random_forest.fit(X_train, Y_train)
Y_pred = random_forest.predict(X_test)
random_forest.score(X_train, Y_train)
acc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2)
acc_random_forest
# ### Model evaluation
#
# We can now rank our evaluation of all the models to choose the best one for our problem. While both Decision Tree and Random Forest score the same, we choose to use Random Forest as they correct for decision trees' habit of overfitting to their training set.
# In[ ]:
models = pd.DataFrame({
'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression',
'Random Forest', 'Naive Bayes', 'Perceptron',
'Stochastic Gradient Decent', 'Linear SVC',
'Decision Tree'],
'Score': [acc_svc, acc_knn, acc_log,
acc_random_forest, acc_gaussian, acc_perceptron,
acc_sgd, acc_linear_svc, acc_decision_tree]})
models.sort_values(by='Score', ascending=False)
# In[ ]:
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_pred
})
# submission.to_csv('../output/submission.csv', index=False)
# Our submission to the competition site Kaggle results in scoring 3,883 of 6,082 competition entries. This result is indicative while the competition is running. This result only accounts for part of the submission dataset. Not bad for our first attempt. Any suggestions to improve our score are most welcome.
# ## References
#
# This notebook has been created based on great work done solving the Titanic competition and other sources.
#
# - [A journey through Titanic](https://www.kaggle.com/omarelgabry/titanic/a-journey-through-titanic)
# - [Getting Started with Pandas: Kaggle's Titanic Competition](https://www.kaggle.com/c/titanic/details/getting-started-with-random-forests)
# - [Titanic Best Working Classifier](https://www.kaggle.com/sinakhorami/titanic/titanic-best-working-classifier)
| [
"[email protected]"
] | |
efd6dd85796d1d65f530aaa37a650624b1f19999 | cc8905a957e9e0fa211e5e14e6cda6957727c5dc | /ipwxlearn/tests/glue/test_updates.py | 91d6c4e7364f03c7f83768bba473cdaea04aab4c | [] | no_license | korepwx/ipwxlearn | 630ae276e1a8b95e68d466debdaf4f51c5c6d634 | afbfe8ee1af114a8bf6aac73aee36c4d0930b8fc | refs/heads/master | 2021-01-14T10:15:05.883476 | 2016-07-15T01:59:23 | 2016-07-15T01:59:23 | 57,875,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,116 | py | # -*- coding: utf-8 -*-
import unittest
import numpy as np
from ipwxlearn import glue
from ipwxlearn.glue import G
class UpdatesTestCase(unittest.TestCase):
def _do_test_update(self, optimizer, n_dim=256, *args, **kwargs):
graph = G.Graph()
with graph.as_default():
# okay, compose the quadratic function.
x = G.make_variable('x', shape=[n_dim], init=G.init.Uniform([-1, 1]), dtype=glue.config.floatX)
# finally, create the training function.
loss = G.op.dot(x, x)
train_fn = G.make_function(updates=optimizer(loss, [x], *args, **kwargs), outputs=loss)
with G.Session(graph):
best_x = G.get_variable_values(x)
best_loss = np.dot(best_x, best_x)
self.assertGreater(np.mean((best_x - np.zeros_like(best_x)) ** 2), 1e-2)
for i in range(700):
train_loss = train_fn()
if train_loss < best_loss:
best_x = G.get_variable_values(x)
best_loss = train_loss
self.assertLess(np.mean((best_x - np.zeros_like(best_x)) ** 2), 1e-7)
def test_sgd(self):
"""Test training with SGD."""
self._do_test_update(G.updates.sgd, learning_rate=0.01)
def test_momentum(self):
"""Test training with momentum."""
self._do_test_update(G.updates.momentum, learning_rate=0.001)
@unittest.skipIf(glue.config.backend == 'tensorflow', 'TensorFlow has not supported Nesterov momentum yet.')
def test_nesterov_momentum(self):
"""Test training with nesterov momentum."""
self._do_test_update(G.updates.nesterov_momentum, learning_rate=0.001)
def test_adagrad(self):
"""Test training with AdaGrad."""
self._do_test_update(G.updates.adagrad, learning_rate=1.0)
def test_rmsprop(self):
"""Test training with RMSProp."""
self._do_test_update(G.updates.rmsprop, learning_rate=10.0, rho=0.999)
def test_adam(self):
"""Test training with Adam."""
self._do_test_update(G.updates.adam, learning_rate=0.01)
| [
"[email protected]"
] | |
05a0b55aa941375cb364396a2d5cb1c4b6bd978a | 1382e88bc948a1f6b506018521827a1fafb9c2df | /modules/dictionary/dictionary.py | 39305b1b1169a9c819196605b33ef8f97574931d | [] | no_license | nano13/tambi | 73c405d333b91dc478d7cd274e3f8516fde15bd5 | 9475110ddd9ebb153de4bc8c734ce95c11d63186 | refs/heads/master | 2021-01-18T12:52:22.386453 | 2019-01-19T13:52:50 | 2019-01-19T13:52:50 | 100,367,577 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,458 | py |
# -*- coding: utf_8 -*-
from interpreter.exceptions import CommandNotInThisModule
from interpreter.structs import Result
import sqlite3
class Dictionary(object):
def __init__(self):
pass
def initDbConnection(self):
self.connection = sqlite3.connect("./modules/vocable/vocables.db")
self.cursor = self.connection.cursor()
def getCommands(self):
return {
"dictionary.commands" : self.commands,
"dictionary.hebrew" : self.hebrew,
"dictionary.greek" : self.greek,
"dictionary.aramaic" : self.aramaic,
"dictionary.akkadian" : self.akkadian,
}
def interpreter(self, command, args, queue):
print("args:", args)
commands = self.getCommands()
return commands.get(command, self.commandNotFound)(command, args)
def commandNotFound(self, c, a):
raise CommandNotInThisModule("command not found in module quran")
def commands(self, c, a):
dic = self.getCommands()
commands = sorted(dic.items())
all_commands = []
for key in commands:
line = str(key).split(",")[0]
all_commands.append(str(line[2:-1]))
result_object = Result()
result_object.category = "list"
result_object.payload = all_commands
return result_object
def hebrew(self, c, args):
return self.dictionaryHelper(args, 'hebrew')
def greek(self, c, args):
return self.dictionaryHelper(args, 'greek')
def aramaic(self, c, args):
return self.dictionaryHelper(args, 'aramaic')
def akkadian(self, c, args):
return self.dictionaryHelper(args, 'akkadian')
def dictionaryHelper(self, args, language):
result_object = Result()
query = """
SELECT display, gloss
FROM {0}
WHERE display LIKE ? OR gloss LIKE ?
""".format(language)
try:
param = '%'+str(args[0])+'%'
except IndexError:
result_object.error = 'invalid parameter'
else:
self.initDbConnection()
self.cursor.execute(query, [param, param])
result_object.payload = self.cursor.fetchall()
result_object.category = "itemized"
result_object.name = "dictionary result"
return result_object
| [
"[email protected]"
] | |
c626b14c3515006e2869ad2a09ddea3d53f9a59a | 9468a03f04f91bbb76338253ccb53b885b65698a | /beam_models/EMSS/with_elevation/SKADCBeamPatterns/2019_08_06_SKA_Ku/interpolated/interpolate_beam_Ku.py | 035dcf0d39393dc2b17e0baaeeffdfca603503e6 | [
"BSD-3-Clause"
] | permissive | ska-telescope/sim-mid-pointing | 53a9cd1cb1e66584a72b4f50e51b3e15942d1de7 | 0f11d37e6fac231d7f20e4a7e20ee76e7d2d560f | refs/heads/master | 2020-05-20T06:47:51.107561 | 2020-01-23T13:50:22 | 2020-01-23T13:50:22 | 185,428,675 | 0 | 1 | BSD-3-Clause | 2019-10-15T16:21:26 | 2019-05-07T15:22:02 | Python | UTF-8 | Python | false | false | 3,440 | py | import logging
import sys
import numpy
from processing_library.image.operations import create_empty_image_like
from rascil.processing_components.image.operations import export_image_to_fits, import_image_from_fits
import matplotlib.pyplot as plt
log = logging.getLogger()
log.setLevel(logging.INFO)
log.addHandler(logging.StreamHandler(sys.stdout))
mpl_logger = logging.getLogger("matplotlib")
mpl_logger.setLevel(logging.WARNING)
import pprint
pp = pprint.PrettyPrinter()
from scipy import interpolate
# x = np.arange(0, 10)
# y = np.exp(-x/3.0)
# f = interpolate.interp1d(x, y)
#
# xnew = np.arange(0,9, 0.1)
# ynew = f(xnew) # use interpolation function returned by `interp1d`
# plt.plot(x, y, 'o', xnew, ynew, '-')
# plt.show()
elevations_in = numpy.array([15, 45, 90], dtype='float')
elevations_out = numpy.array([15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90], dtype='float')
elevations_out = numpy.arange(15.0, 90, 1.0)
default = 1
nchan = 1
npol = 4
ny = 1024
nx = 1024
array_in = numpy.zeros([nchan, npol, ny, ny, len(elevations_in)])
array_out = numpy.zeros([nchan, npol, ny, ny, len(elevations_out)])
im_in = "../Ku_{el:d}_11700_{type}.fits"
im_out = "Ku_{el:d}_11700_{type}_interpolated.fits"
im_diff_out = "Ku_{el:d}_11700_{type}_interpolated_difference.fits"
im_template = None
for type in ['real', 'imag']:
for iel, el in enumerate(elevations_in):
print("Reading elevation %s part elevation %.0f" % (type, el))
im_in_file = im_in.format(el=int(el), type=type)
im = import_image_from_fits(im_in_file)
array_in[..., iel] = im.data
if im_template is None:
im_template = create_empty_image_like(im)
f = interpolate.interp1d(elevations_in, array_in, axis=4, kind='quadratic')
array_out = f(elevations_out)
rms_vp = []
max_vp = []
min_vp = []
rms_diff = []
max_diff = []
min_diff = []
for iel, el in enumerate(elevations_out):
print("Writing elevation %s part %.0f" % (type, el))
im_template.data = array_out[..., iel]
im_out_file = im_out.format(el=int(el), type=type)
export_image_to_fits(im_template, im_out_file)
rms_vp.append(numpy.std(im_template.data[0,0:1,...]))
max_vp.append(numpy.max(im_template.data[0,0:1,...]))
min_vp.append(numpy.min(im_template.data[0,0:1,...]))
im_template.data -= array_in[..., default]
im_diff_out_file = im_diff_out.format(el=int(el), type=type)
export_image_to_fits(im_template, im_diff_out_file)
rms_diff.append(numpy.std(im_template.data[0,0:1,...]))
max_diff.append(numpy.max(im_template.data[0,0:1,...]))
min_diff.append(numpy.min(im_template.data[0,0:1,...]))
plt.clf()
plt.plot(elevations_out, rms_vp, '-', color='r', label='VP rms')
if type == 'imag':
plt.plot(elevations_out, max_vp, '.', color='g', label='VP max')
plt.plot(elevations_out, min_vp, '-', color='b', label='VP min')
plt.plot(elevations_out, rms_diff, '.', color='r', label='VP diff rms')
plt.plot(elevations_out, max_diff, '.', color='g', label='VP diff max')
plt.plot(elevations_out, min_diff, '.', color='b', label='VP diff min')
plt.xlabel('Elevation')
plt.ylabel('Value')
plt.title('Statistics in %s part of 11700MHz voltage pattern' % type)
plt.legend()
plt.savefig('%s_vp_statistics.png' % type)
plt.show(block=False) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.