blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3c227d101d4c7c8c94b047534ef9453524806f5f | 6e95e9b6a1fc996ebcb46c44d4ef7678f762e4f7 | /others/xiangmu/Shandong/淄博.py | bea8f6c3343a4e8c8f6c56153246b4f980f88462 | [
"Apache-2.0"
] | permissive | 625781186/lgd_spiders | 3a4d6917a01e446136e7aef4c92b9b7a1f8e498d | 1c8680115beb42f4daaf6be71bf3fb14fcc2c255 | refs/heads/master | 2020-08-29T13:21:12.116395 | 2019-10-21T14:28:00 | 2019-10-21T14:28:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,699 | py | # -*- coding: utf-8 -*-
import requests
import re
import time
from lxml import etree
import datetime
from common.update_mongo import Update
from common.update_mongo import Update2
from common.spider_class import TongyongSpider
now = datetime.datetime.now().strftime('%Y/%m/%d')
city = 'zibo'
class Producer(TongyongSpider):
def __init__(self, redis_db):
super(Producer, self).__init__(redis_db)
self.url = 'http://www.zbfdc.com.cn/web/building/list?page={}'
def get_links(self, url):
for i in range(1, 5):
try:
response = requests.get(url, headers=self.headers,timeout=40)
text = response.text
html = etree.HTML(text)
a_list = html.xpath('//ul[@class="list"]//li//a/@href')
for a in a_list:
print(a)
self.db.sadd(self.redis_db, a)
return
except Exception as e:
print(url, e)
def run(self):
for i in range(1, 705):
self.get_links(self.url.format(i))
time.sleep(0.5)
class Consumer(TongyongSpider):
def parse_detail(self, url):
for i in range(1, 5):
try:
response = requests.get(url, headers=self.headers,timeout=40)
text = response.text
if response.text == '{"success":false,"fieldErrors":null,"msg":"楼盘下无房屋","data":null}': return 1
html = etree.HTML(text)
position = re.sub(r'\s', '', ''.join(html.xpath('//div[@class="building-title"]//text()')))
ul = html.xpath('//ul[@class="clearfix"]')[0]
pro_name = re.sub(r'\s', '', ''.join(ul.xpath('./li[1]/span[2]//text()')))
company = re.sub(r'\s', '', ''.join(ul.xpath('./li[7]/span[2]//text()')))
area = re.sub(r'\s', '', ''.join(ul.xpath('./li[8]/span[2]//text()')))
ca_num = re.sub(r'\s', '', ''.join(ul.xpath('./li[9]/span[2]//text()')))
sale_num = re.sub(r'\s', '', ''.join(ul.xpath('./li[10]/span[2]//text()')))
yongdi_time = re.sub(r'\s', '', ''.join(ul.xpath('./li[2]/span[2]//text()')))
yongdi_time = re.search(r'(20\d\d)', yongdi_time)
yongdi_time = yongdi_time.group(1) if yongdi_time else ''
gongcheng_time = re.sub(r'\s', '', ''.join(ul.xpath('./li[4]/span[2]//text()')))
gongcheng_time = re.search(r'(20\d\d)', gongcheng_time)
gongcheng_time = gongcheng_time.group(1) if gongcheng_time else ''
pan_time = ''
price = ''
ca_time = ''
build = (pro_name, ca_num, ca_time, pan_time, sale_num, area, price, position, company, now, url)
print(build)
Update2(build, city)
return 1
except Exception:
print('解析详情页异常')
if i == 4:
return 1
def run(self):
while True:
set_num = self.db.scard(self.redis_db)
if set_num == 0:
print('数目为0')
time.sleep(10)
set_num2 = self.db.scard(self.redis_db)
if set_num2 == 0: return
link = self.db.spop(self.redis_db)
num = self.parse_detail(link)
if num == 1:
time.sleep(0.5)
pass
else:
self.db.sadd(self.redis_db, link)
def run():
p = Producer('SdZibo:Detail')
p.run()
c = Consumer('SdZibo:Detail')
c.run()
if __name__ == '__main__':
run()
| [
"lgdupup"
] | lgdupup |
fbd90a1183ff5f2c498044ca317aadebbf6dab6c | 2f63688febd21dc3ae6b19abfa79ad313c820154 | /0063_Unique_Paths_II/try_2.py | 52c31b667efe89f6f4c2c7c52a459bda0a3052c2 | [] | no_license | novayo/LeetCode | cadd03587ee4ed6e35f60294070165afc1539ac8 | 54d0b3c237e0ffed8782915d6b75b7c6a0fe0de7 | refs/heads/master | 2023-08-14T00:35:15.528520 | 2023-07-30T05:56:05 | 2023-07-30T05:56:05 | 200,248,146 | 8 | 1 | null | 2022-11-19T04:37:54 | 2019-08-02T14:24:19 | Python | UTF-8 | Python | false | false | 1,051 | py | class Solution:
def uniquePathsWithObstacles(self, obstacleGrid: List[List[int]]) -> int:
'''
上左是石頭的則+0
'''
dp = [[0 for j in range(len(obstacleGrid[0]))] for i in range(len(obstacleGrid))]
# 1th row
flag = True
for j in range(len(obstacleGrid[0])):
if flag and obstacleGrid[0][j] == 0:
dp[0][j] = 1
else:
dp[0][j] = 0
flag = False
# 1th col
flag = True
for i in range(len(obstacleGrid)):
if flag and obstacleGrid[i][0] == 0:
dp[i][0] = 1
else:
dp[i][0] = 0
flag = False
# loop remain
for i in range(1, len(obstacleGrid)):
for j in range(1, len(obstacleGrid[0])):
if obstacleGrid[i][j] == 1:
continue
dp[i][j] = dp[i-1][j] + dp[i][j-1]
return dp[-1][-1] | [
"[email protected]"
] | |
bd955766f2c05e3737ab33a80c4b5c543ad7629c | 59f03c7528c9c806e3e25b9864db89f25dfa73c2 | /tests/onegov/swissvotes/test_views_exceptions.py | 968fb33a50f3a189a70f68d75e43cdb109cc258f | [
"MIT"
] | permissive | OneGov/onegov-cloud | 8d8cd6d0378991ebc2333b62337246719102e723 | c706b38d5b67692b4146cdf14ef24d971a32c6b8 | refs/heads/master | 2023-08-24T15:37:52.536958 | 2023-08-24T14:15:54 | 2023-08-24T14:15:54 | 189,431,418 | 17 | 4 | MIT | 2023-09-14T20:39:37 | 2019-05-30T14:47:14 | Python | UTF-8 | Python | false | false | 442 | py | from webtest import TestApp as Client
def test_view_exceptions(swissvotes_app):
client = Client(swissvotes_app)
client.get('/locale/de_CH').follow()
assert (
"Sie versuchen eine Seite zu öffnen, für die Sie nicht autorisiert "
"sind"
) in client.get('/votes/update', status=403)
assert (
"Die angeforderte Seite konnte nicht gefunden werden."
) in client.get('/abstimmungen', status=404)
| [
"[email protected]"
] | |
24b7be1f03ebf1b37b40a37b19178385c6947d2b | ea1ec59c934acc6dfbaa4c8e63349b2d391e9c25 | /pandas-ta-quant-plot/pandas_ta_quant_plot/__init__.py | adc97cab839b9e24088969636475747502329f41 | [
"MIT"
] | permissive | Allensmile/pandas-ml-quant | 475c09573b47ea3d589c94644edbd85d9d1917b2 | 59b702307c1842b0b89b5cbf755c1296da97b00a | refs/heads/master | 2023-06-19T00:24:13.263411 | 2021-06-19T09:30:05 | 2021-06-19T09:30:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | """Augment pandas DataFrame with methods for quant analysis plotting"""
__version__ = '0.2.0'
from collections import namedtuple
from pandas_ta_quant_plot.plots import *
from pandas.core.base import PandasObject
from pandas_ta_quant_plot.ta_plot_context import PlotContext
_ta = getattr(PandasObject, "ta", None)
if _ta is not None:
if getattr(_ta, "plot", None) is None:
setattr(PandasObject, "plot", lambda self, *args, **kwargs: PlotContext(self, *args, **kwargs))
else:
ta = namedtuple("TA", ["plot"])
setattr(PandasObject, "ta", lambda self, *args, **kwargs: ta(plot=PlotContext(self, *args, **kwargs)))
| [
"[email protected]"
] | |
a12fa220e2127d65695380961b8a907ca2a9640b | 6e9a420485b25684b178984fd90f74bbe3ab5b58 | /api/vm/define/vm_define_nic.py | 77484d97193ab6c5c1f3b42141c180c0d77bf09d | [
"Apache-2.0"
] | permissive | BillTheBest/esdc-ce | d367a88685548d41672c773364484ca7f82c4a26 | ab71bf9dc66fb78a0de724077c02c58bc7d970ec | refs/heads/master | 2021-04-26T23:34:06.535783 | 2018-02-25T19:19:15 | 2018-02-25T19:19:15 | 124,016,413 | 1 | 0 | Apache-2.0 | 2018-03-06T03:41:33 | 2018-03-06T03:41:32 | null | UTF-8 | Python | false | false | 4,253 | py | from django.db.transaction import atomic
from api import status as scode
from api.utils.db import get_listitem
from api.task.response import SuccessTaskResponse, FailureTaskResponse
from api.vm.define.utils import is_vm_operational
from api.vm.define.api_views import VmDefineBaseView
from api.vm.define.serializers import VmDefineNicSerializer
from api.vm.messages import LOG_NIC_CREATE, LOG_NIC_UPDATE, LOG_NIC_DELETE
NIC_ID_MIN = 0
NIC_ID_MAX = 5
def _nic_params(fun):
"""Decorator for nic functions below"""
def wrap(view, vm, nic_id, *args, **kwargs):
if nic_id is None and view.diff:
return SuccessTaskResponse(view.request, view.get_diff(vm))
if view.active:
vm.revert_active(json_only=True)
if nic_id is None:
nic = vm.json_get_nics()
nics = None
kwargs['many'] = True
else:
nics, nic = get_listitem(view.request, vm.json_get_nics(), nic_id, name='VM NIC',
max_value=NIC_ID_MAX, min_value=NIC_ID_MIN)
return fun(view, vm, nic_id, nics, nic, *args, **kwargs)
return wrap
class VmDefineNicView(VmDefineBaseView):
def get_diff(self, vm):
"""Show nic differences between active and in db json. Implies full and denies active vm_define_nic."""
def_current = VmDefineNicSerializer(self.request, vm, vm.json_get_nics(), nic_id=None, many=True).data
def_active = VmDefineNicSerializer(self.request, vm, vm.json_active_get_nics(), nic_id=None, many=True).data
return self._diff_lists(def_active, def_current)
# noinspection PyUnusedLocal
@_nic_params
def get(self, vm, nic_id, nics, nic, data, many=False):
"""Get VM nic definition"""
ser = VmDefineNicSerializer(self.request, vm, nic, nic_id=nic_id, many=many)
return SuccessTaskResponse(self.request, ser.data, vm=vm)
# noinspection PyUnusedLocal
@is_vm_operational
@atomic
@_nic_params
def post(self, vm, nic_id, nics, nic, data):
"""Create VM nic definition"""
ser = VmDefineNicSerializer(self.request, vm, nic_id=nic_id, data=data)
if ser.is_valid():
nics[nic_id] = ser.jsondata
vm.resolvers = ser.resolvers
vm.save_nics(nics, monitoring_ip=ser.get_monitoring_ip())
res = SuccessTaskResponse(self.request, ser.data,
status=scode.HTTP_201_CREATED, vm=vm,
detail='nic_id=' + str(nic_id + 1), detail_dict=ser.detail_dict(),
msg=LOG_NIC_CREATE)
ser.save_ip(res.data.get('task_id')) # Always save ip.vm
return res
return FailureTaskResponse(self.request, ser.errors, vm=vm)
@is_vm_operational
@atomic
@_nic_params
def put(self, vm, nic_id, nics, nic, data):
"""Update VM nic definition"""
ser = VmDefineNicSerializer(self.request, vm, nic.copy(), nic_id=nic_id, data=data, partial=True)
if ser.is_valid():
nics[nic_id].update(ser.jsondata)
vm.resolvers = ser.resolvers
vm.save_nics(nics, monitoring_ip=ser.get_monitoring_ip())
res = SuccessTaskResponse(self.request, ser.data, vm=vm,
detail='nic_id=' + str(nic_id + 1), detail_dict=ser.detail_dict(),
msg=LOG_NIC_UPDATE)
ser.update_ip(res.data.get('task_id')) # Always update ip.vm
return res
return FailureTaskResponse(self.request, ser.errors, vm=vm)
# noinspection PyUnusedLocal
@is_vm_operational
@atomic
@_nic_params
def delete(self, vm, nic_id, nics, nic, data):
"""Delete VM nic definition"""
ser = VmDefineNicSerializer(self.request, vm, nic)
del nics[nic_id]
vm.save_nics(nics, monitoring_ip=ser.get_monitoring_ip(delete=True))
res = SuccessTaskResponse(self.request, None, vm=vm,
detail='nic_id=' + str(nic_id + 1),
msg=LOG_NIC_DELETE)
ser.delete_ip(res.data.get('task_id')) # Set ip.vm to None
return res
| [
"[email protected]"
] | |
12931f85fab5e70495912647ce9e0e73a0b33b5f | 7f114a1fb511b816c116d5b9e67cb998e3e23956 | /PyproS42.py | 2376562757fa81b9b049f4905404b79c591ba9a8 | [] | no_license | Bharanij27/bharanirep | 90ac34eb28deaa7ec96d042de456de71b96866d7 | 982133a7939c889d433c178a601441fa087293d9 | refs/heads/master | 2021-08-07T20:22:36.244395 | 2020-06-05T04:58:10 | 2020-06-05T04:58:10 | 186,580,768 | 0 | 6 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | n,k=map(int,input().split())
l=list(map(int,input().split()))
m=ma=-99999999999
def split(l,i):
m=(max(min(l[:i]),min(l[i:])))
return m
for i in range(1,n):
m=split(l,i)
if m>ma: ma=m
print(ma)
| [
"[email protected]"
] | |
80e7408c473f7878fa0a47f087e8e936739924c4 | e1efc8e0b0e4629dea61504fbc816c0527691bd9 | /15.Tomcat/Tomcat16-Connector组件.py | 394d6907603f9e207e89c25c721ef85e84967fe6 | [] | no_license | xiongmengmeng/xmind-technology | 2bb67a0bf92cfd660cac01f8ab3a2454423ccba5 | e2fdb6987ef805a65f0a4feb52d84383853f4b77 | refs/heads/main | 2023-07-31T07:10:29.868120 | 2021-09-11T08:18:17 | 2021-09-11T08:18:17 | 307,636,242 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,537 | py | import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
import xmind
from xmind.core.markerref import MarkerId
xmind_name="tomcat"
w = xmind.load(os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
s2=w.createSheet()
s2.setTitle("Connector组件")
r2=s2.getRootTopic()
r2.setTitle("Connector组件")
content={
'HTTP阻塞模式协议——Http11Protocol':[
{'套接字接收终端——JIoEndpoint':[
'端口监听客户端请求,接收套接字连接,提供一个线程池处理接收到的套接字连接,负责对连接数的控制,负责安全与非安全套接字连接的实现等',
{'LimitLatch(连接数控制器)':[
'控制套接字连接个数->控制流',
'BIO模式,连接数:线程数=1:1',
'默认情况,Tomcat处理连接池的线程数为200->BIO流量控制阀门大小也默认为200'
]},
{'Acceptor(套接字接收器)':[
'监听是否有客户端套接字连接并接收套接字',
'将套接字交由Executor执行'
]},
{'ServerSocketFactory套接字工厂':[
'接收终端安全配置不同,套接字不同,引入了工厂模'
]},
{'Executor任务执行器':[
'使用JUC工具包的ThreadPoolExecutor类'
]},
{'SocketProcessor(任务定义器)':[
'处理套接字并响应客户端',
'连接数计数器减1',
'关闭套接字'
]}
]},
{'HTTP阻塞处理器——Http11Processor':[
'套接字的读写和过滤,请求报文解析,生成Request对象,响应内容解析,生成Response对象',
'套接字输入缓冲装置——InternalInputBuffer',
'4个过滤器:IdentityInputFilter、VoidInputFilter、BufferedInputFilter、ChunkedInputFilter',
{'套接字输出缓冲装置——InternalOutputBuffer':[
'OutputStream:套接字的输出通道,通过其将字节写入到操作系统底层',
'OutputStreamOutputBuffer:提供字节流输出的通道,与OutputFilter组合实现过滤效果',
'OutputFilter:过滤器组件',
'ByteChunk:为某个流添加缓冲功能'
]}
]}
],
'HTTP非阻塞模式协议——Http11NioProtocol':[
{'非阻塞接收终端——NioEndpoint':[
'LimitLatch(连接数控制器):对于NIO模式,Tomcat默认流量阀门为10 000',
'Acceptor(套接字接收器):负责接收套接字连接并注册到通道队列里面',
'Poller(轮询器):负责轮询检查事件列表',
{'Poller(轮询器)':[
'负责轮询检查事件列表',
'内部依赖JDK的Selector对象进行轮询,选择出待处理的事件,每轮询一次就选出若干需要处理的通道'
]},
'Poller池:包含了若干Poller组件',
{'SocketProcessor(任务定义器)':[
'用NIO方式读取套接字并进行处理,输出响应报文',
'连接数计数器减一腾出通道',
'关闭套接字'
]},
'Executor(任务执行器)'
]},
{'HTTP非阻塞处理器——Http11NioProcessor':[
'提供了对HTTP协议非阻塞模式的处理,作用同Http11Processor'
]}
]
}
#构建xmind
xmind.build(content,r2)
#保存xmind
xmind.save(w,os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind") | [
"[email protected]"
] | |
39196a1a8ee52ea5b7922cb7fe6d55035522c25f | a821e5a6e45665f7e219e3e3ed07c150219e4add | /exercicio87.py | 15af26a0b52b2b4d33419be908b3021ddf0dc2b6 | [] | no_license | andreplacet/exercicios_python | 18a28af942eb2bb211438f0aca10d651b7324fe5 | 0affe524e99f7739b08fdf58e2b54c5b577c8624 | refs/heads/master | 2020-08-29T02:05:52.850805 | 2020-06-01T19:09:50 | 2020-06-01T19:09:50 | 217,887,722 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | lista1 = []
lista2 = []
lista3 = []
matriz = []
somapares = somacoluna = 0
for c in range(0, 3):
lista1.append(int(input(f'Digite um valor para a [linha 0 coluna {c}]: ')))
matriz.append(lista1[:])
for i in range(0, 3):
lista2.append(int(input(f'Digite um valor para a [linha 1 coluna {i}]: ')))
matriz.append(lista2[:])
for d in range(0, 3):
lista3.append(int(input(f'Digite um valor para a [linha 2 coluna {d}]: ')))
matriz.append(lista3[:])
for num in matriz:
print(f'[ {num[0]} ] [ {num[1]} ] [ {num[2]} ]')
for par in matriz:
for j in range(0, len(par)):
if par[j] % 2 ==0:
somapares += par[j]
for colunaum in matriz:
somacoluna += colunaum[2]
print('-=' * 20)
print(f'A soma de todos os valores pares é: {somapares}')
print(f'A soma dos valores da terceia coluna é: {somacoluna}')
print(f'O maior valor da segunda linha é {max(lista2)}')
print('-=' * 20)
print('\033[33mFinalizado com Sucesso!\033[m') | [
"[email protected]"
] | |
88366e6cbc6e5a8631c3457c55bcaef9ed8a6ec1 | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano1788.py | ae186084bcedfcf170bed7527fb4be333759830b | [] | no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,293 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/270000/06AA9498-C93C-F44E-A6EF-E904A67AA1B7.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest1788.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion | [
"[email protected]"
] | |
b0567b632314b432d96a5ec767e7e49d16a3a590 | 1f32af53c3f0d1cf1176e72a887135e1e5309e4b | /en/1_dqn/dqn.py | f55918bfaba1c556492ae116865e843d11dc1e8d | [
"MIT"
] | permissive | seungjaeryanlee/rainbow-ride | bff020c61fd86d03993d4f7c68d965d071753105 | 29b0af19f5cc3d41433a8b405e736bc49309f540 | refs/heads/master | 2020-03-20T23:30:51.723184 | 2018-06-22T04:43:45 | 2018-06-22T04:43:47 | 137,849,945 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | import torch.nn as nn
class DQN(nn.Module):
"""
A simple Deep Q-Network with fully connected layers.
"""
def __init__(self, input_dims, output_dims):
super().__init__()
self.layers = nn.Sequential(
nn.Linear(input_dims, 128),
nn.ReLU(),
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, output_dims)
)
def forward(self, x):
return self.layers(x)
| [
"[email protected]"
] | |
af6517f8f612bd6daa93525c115f29b30d940596 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02997/s415838912.py | f36fccc0a8389864ad1d55d8962f2354c082d9bc | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | N, K = map(int, input().split())
edges = [(1, to) for to in range(2, N + 1)]
M = (N - 1) * (N - 2) // 2
if K > M:
print(-1)
exit()
for fr in range(2, N + 1):
for to in range(fr + 1, N + 1):
if M == K:
break
edges.append((fr, to))
M -= 1
print(len(edges))
for fr ,to in edges:
print(fr ,to)
| [
"[email protected]"
] | |
c738a11291e2651813c258b3b4ef5316c6a08b75 | 3076bd73c41ed665c987d99218b8a3599fa05ec2 | /tests/test_hopfield_net.py | c3a7716eb7e004205ee9b22386d2acd827f3efe6 | [
"Apache-2.0"
] | permissive | lantunes/cellpylib | 5135a6986e68424d9ec8b09fb42421b3dcf046d1 | 743e936d48f8520f6f4ac652570ac7bb46414189 | refs/heads/master | 2023-03-07T03:31:32.380400 | 2023-02-21T12:34:28 | 2023-02-21T12:34:28 | 126,618,694 | 203 | 32 | Apache-2.0 | 2023-02-15T03:40:38 | 2018-03-24T16:33:15 | Python | UTF-8 | Python | false | false | 2,486 | py | import unittest
import cellpylib as cpl
import numpy as np
import os
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class TestHopfieldNet(unittest.TestCase):
def test_hopfield_net(self):
np.random.seed(0)
# patterns for training
zero = [
0, 1, 1, 1, 0,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
0, 1, 1, 1, 0,
0, 0, 0, 0, 0]
one = [
0, 1, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 0, 0, 0]
two = [
1, 1, 1, 0, 0,
0, 0, 0, 1, 0,
0, 0, 0, 1, 0,
0, 1, 1, 0, 0,
1, 0, 0, 0, 0,
1, 1, 1, 1, 1,
0, 0, 0, 0, 0]
# replace the zeroes with -1 to make these vectors bipolar instead of binary
one = [-1 if x == 0 else x for x in one]
two = [-1 if x == 0 else x for x in two]
zero = [-1 if x == 0 else x for x in zero]
P = [zero, one, two]
hopfield_net = cpl.HopfieldNet(num_cells=35)
hopfield_net.train(P)
expected_weights = self._convert_to_ndarray("hopfield_net_weights.txt")
np.testing.assert_equal(expected_weights, hopfield_net.W)
expected_activities = self._convert_to_ndarray("hopfield_net.ca")
half_two = [
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 1, 0, 0,
1, 0, 0, 0, 0,
1, 1, 1, 1, 1,
0, 0, 0, 0, 0]
half_two = [-1 if x == 0 else x for x in half_two]
cellular_automaton = np.array([half_two])
cellular_automaton = cpl.evolve(cellular_automaton, timesteps=155,
apply_rule=hopfield_net.apply_rule, r=hopfield_net.r)
np.testing.assert_equal(expected_activities, cellular_automaton)
def _convert_to_ndarray(self, filename, dtype=int):
with open(os.path.join(THIS_DIR, 'resources', filename), 'r') as content_file:
content = content_file.read()
content = content.replace('[[', '')
content = content.replace(']]', '')
content = content.replace('[', '')
content = content.replace('],', ';')
content = [[dtype(i) for i in x.split(',')] for x in content.split(';')]
return np.array(content) | [
"[email protected]"
] | |
ce6d34048467bbdff945b612bc3eba00b13c0baf | dd9de22427fd78910bdb6bff79b69dfb39d233d1 | /accounts/urls.py | 7f25807c9702aac4b0e2e9c2dae99ea84b018267 | [] | no_license | sulembutproton/joinsys | a21162d9d887194d3f252fc14da8adf538bd5c30 | 729fd046446b9389dab3a3cca25a50ddfb173af0 | refs/heads/master | 2023-04-17T01:59:14.639572 | 2021-04-28T17:44:25 | 2021-04-28T17:44:25 | 362,556,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | from django.contrib.auth import views as auth_views
from django.urls import path
from .import views
urlpatterns = [
path('register/', views.Register.as_view(), name='register'),
path('login/', views.user_login, name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='accounts/logout.html'), name='logout'),
path('settings/<int:pk>', views.AccountSettings.as_view(), name='settings'),
path('dashboard/', views.Dashboard.as_view(), name='dashboard'),
] | [
"[email protected]"
] | |
cba9ce61d1502a557bd65c9c39a8c4939f68e3e6 | e972dc486e62152981177f85b5f9cff919ac0867 | /et_wiki/wiki_comp.py | a9e8c24fbdb4fa2fc5c2dca405063eee2b8bf36a | [] | no_license | yeongsunpark/cute | d81b9b03f747f65bed742b10b2f9a59f69efea96 | d69f918f9a1f1d6db70bc62272fc0ce582d7bf50 | refs/heads/master | 2020-03-27T12:43:41.728918 | 2019-04-29T04:41:47 | 2019-04-29T04:41:47 | 146,564,948 | 0 | 2 | null | 2018-11-06T07:45:59 | 2018-08-29T07:52:20 | Python | UTF-8 | Python | false | false | 249 | py | f2 = open("new_chunk.txt", "w")
with open("/data1/we_kor/kowiki_pages_170620_sent_chunk_10.tsv", "r") as f:
for line in f:
item = line.split("\t")
title = item[1]
f2.write(title)
f2.write("\n")
f2.close() | [
"[email protected]"
] | |
854c291b441bfdbf6c527767955aed060484ef1c | a5d22c99e781270317078f8980c934bcc71e6e8b | /neodroidvision/detection/single_stage/ssd/bounding_boxes/__init__.py | 39a56c4c514c00d0e79606e1f538b09770dd807b | [
"Apache-2.0"
] | permissive | aivclab/vision | dda3b30648b01c2639d64a016b8dbcfccb87b27f | 06839b08d8e8f274c02a6bcd31bf1b32d3dc04e4 | refs/heads/master | 2023-08-21T22:35:10.114394 | 2022-11-02T10:14:08 | 2022-11-02T10:14:08 | 172,566,233 | 1 | 3 | Apache-2.0 | 2023-08-16T05:11:30 | 2019-02-25T19:00:57 | Python | UTF-8 | Python | false | false | 265 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
Created on 05/03/2020
"""
from .conversion import *
from .ssd_priors import *
from .ssd_transforms import *
from .tensor_metrics import *
| [
"[email protected]"
] | |
a9045e150a1a97f2d4d88e005f75da3043df176e | f2e97b979c648b3b121ff0f8c37cf6ae998fa513 | /python/compare_thread_asyncio.py | 8345af6a5fd57ddce861f2e20365a413f2ddb79c | [] | no_license | LeonKennedy/LearningByLanguage | 64564271a323809ab27bd09b2d142a485d013ce2 | 51d44f54b13c2e0202f9986f3556ad7f93e55e7c | refs/heads/master | 2022-11-10T22:46:07.917794 | 2022-11-08T10:48:52 | 2022-11-08T10:48:52 | 52,009,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,143 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
# @Filename: spinner_thread.py
# @Author: olenji - [email protected]
# @Description: 多线程和异步的对比
# @Create: 2018-12-10 10:29:31
# @Last Modified: 2018-12-10 10:29:31
import threading, asyncio
import itertools, time, sys, pdb
# -------------- threading --------------
class Signal:
go = True
def spin(msg, signal):
write, flush = sys.stdout.write, sys.stdout.flush
for char in itertools.cycle('|/-\\'):
status = char + ' ' + msg
write(status)
flush()
write('\x08' * len(status))
time.sleep(.1)
if not signal.go:
break
write(' ' * len(status) + '\x08' * len(status))
def slow_function():
time.sleep(3)
return 32
def supervisor():
signal = Signal()
spinner = threading.Thread(target=spin,
args=('thinking!olenji', signal))
print('spinner object:', spinner)
spinner.start()
result = slow_function()
signal.go = False
spinner.join()
return result
def main():
result = supervisor()
print('Answer:', result)
# ------------- asyncio --------------
@asyncio.coroutine
def spin_async(msg):
write, flush = sys.stdout.write, sys.stdout.flush
for char in itertools.cycle('|/-\\'):
status = char + ' ' + msg
write(status)
flush()
write('\x08' * len(status))
try:
yield from asyncio.sleep(.1)
except asyncio.CancelledError:
break
write(' ' * len(status) + '\x08' * len(status))
@asyncio.coroutine
def slow_function():
yield from asyncio.sleep(3) # sleep without blocking
return 42
@asyncio.coroutine
def supervisor_async():
spinner = asyncio.async(spin_async('thinking!'))
print('spinner object:', spinner)
result = yield from slow_function()
spinner.cancel() # Task对象课可以取消
return result
def main_async():
loop = asyncio.get_event_loop()
result = loop.run_until_complete(supervisor_async())
loop.close()
print('Answer:', result)
if __name__ == '__main__':
main_async()
| [
"[email protected]"
] | |
370bba158aa6b9ed78e5e53ff1ec9aece224f346 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03612/s889391086.py | 70896f9cae948f4c33f1f7fbb4659992722e6cbd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | n = int(input())
*p, = map(int, input().split())
q = [True if p[i] != i+1 else False for i in range(n)] + [True]
ans = 0
for i in range(n):
if not q[i]:
ans += 1
q[i] = q[i+1] = True
print(ans)
| [
"[email protected]"
] | |
7303cff5ec99102bf82cefbe7dbba1e098b81285 | 9088d49a7716bdfc9b5770e8e54ebf7be6958fcf | /Tests/Aula_20d.py | 0b3ae15da823725ffb438bbc6b939143e05b6448 | [
"MIT"
] | permissive | o-Ian/Practice-Python | 579e8ff5a63a2e7efa7388bf2d866bb1b11bdfe2 | 1e4b2d0788e70006096a53a7cf038db3148ba4b7 | refs/heads/main | 2023-05-02T02:21:48.459725 | 2021-05-18T18:46:06 | 2021-05-18T18:46:06 | 360,925,568 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | def count(*num):
print(f'Recebi ao todos os números {num}. Há {len(num)} números.')
count(12, 34, 2, 1, 4)
count(4, 3, 1, 7, 10)
count(1, 2)
| [
"[email protected]"
] | |
f4386254b812a61719e4b1a2931b317d490bcc62 | 65f3ada144f45bd5dbaf3d37ca9366ff54796f0c | /month7/findLadders.py | a417abd90108d7af8b53f6a6496eb2eadb19d4eb | [] | no_license | BruceHi/leetcode | 43977db045d9b78bef3062b16d04ae3999fe9ba7 | 0324d247a5567745cc1a48b215066d4aa796abd8 | refs/heads/master | 2022-09-22T14:18:41.022161 | 2022-09-11T23:45:21 | 2022-09-11T23:45:21 | 248,240,171 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,477 | py | # 单词接龙
from typing import List
from collections import deque
from collections import defaultdict
from string import ascii_lowercase
class Solution:
# def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
# word_set = set(wordList)
# if endWord not in word_set:
# return []
#
# def ischange(A, B):
# count = n = len(A)
# i = 0
# while i < n:
# if A[i] == B[i]:
# count -= 1
# i += 1
# return count == 1
#
# tmp, res = [beginWord], []
#
# def dfs(begin, end, word_set):
# if ischange(begin, end):
# tmp.append(end)
# res.append(tmp)
# return
# for word in word_set:
# if ischange(begin, word):
# tmp.append(word)
# word_set.remove(word)
# dfs(word, end, word_set)
# word_set.add(word) # 会打乱原有顺序
#
# dfs(beginWord, endWord, word_set)
# return res
# def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
# if endWord not in wordList:
# return []
#
# def ischange(A, B):
# count = n = len(A)
# i = 0
# while i < n:
# if A[i] == B[i]:
# count -= 1
# i += 1
# return count == 1
#
# tmp = [beginWord]
#
# def dfs(begin, end, wordList):
# if ischange(begin, end):
# tmp.append(end)
# return
# for i, word in enumerate(wordList):
# if ischange(begin, word):
# tmp.append(word)
# dfs(word, end, wordList[:i] + wordList[i+1:])
# # word_set.add(word) # 会打乱原有顺序
#
# dfs(beginWord, endWord, wordList)
# return tmp
# bfs,以后要多看看,没怎么看懂
# def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
# wordList = set(wordList)
# dic = defaultdict(list)
# n = len(beginWord)
# for w in wordList:
# for i in range(n):
# dic[w[:i] + '*' + w[i + 1:]].append(w)
# q, s = deque([(beginWord, [beginWord])]), deque() # 列表里面是一个一个的元组
# seen = set() # 访问过的结点都要记录
# res = []
# while q:
# while q:
# w, path = q.popleft()
# if w == endWord: res.append(path)
# seen.add(w)
# for i in range(n):
# for v in dic[w[:i] + '*' + w[i + 1:]]:
# if v not in seen:
# s.append((v, path + [v]))
# if res: return res # 先有结果的自然是最短的
# q, s = s, q # 因为要交换,所以两者的数据类型应该相同。
# return []
# 看得一脸懵逼
def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
if endWord not in wordList:
return []
# 同一组的转换放一块:该无向图相当于邻接表的存储方式
dic = defaultdict(list)
n = len(beginWord)
for w in wordList:
for i in range(n):
dic[w[:i] + '*' + w[i + 1:]].append(w)
queue, tmp = deque([(beginWord, [beginWord])]), deque()
res = []
visited = set()
while queue:
while queue:
w, path = queue.popleft()
if w == endWord:
res.append(path)
visited.add(w)
for i in range(n):
for v in dic[w[:i] + '*' + w[i + 1:]]:
if v not in visited:
tmp.append((v, path + [v]))
if res:
return res
queue, tmp = tmp, queue
return []
# def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
# if endWord not in wordList:
# return []
# res = [beginWord]
# n = len(beginWord)
# for i in range(n):
# for c in ascii_lowercase:
# tmp = beginWord[:i] + c + beginWord[i+1:]
# if tmp in wordList:
# res.append(tmp)
# wordList.remove(tmp)
# self.findLadders(tmp, endWord, wordList)
# wordList.append(tmp)
# return res
# def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
# if endWord not in wordList:
# return []
# dic = defaultdict(list)
# n = len(beginWord)
#
# for word in wordList:
# for i in range(n):
# tmp = word[:i] + '-' + word[i+1:]
# dic[tmp].append(word)
s = Solution()
beginWord = "hit"
endWord = "cog"
wordList = ["hot", "dot", "dog", "lot", "log", "cog"]
print(s.findLadders(beginWord, endWord, wordList))
beginWord = "hit"
endWord = "cog"
wordList = ["hot", "dot", "dog", "lot", "log"]
print(s.findLadders(beginWord, endWord, wordList))
| [
"[email protected]"
] | |
59496f68b537c7454649f2c8a3409422cdb354cc | 5179b07b8d1a31df18612ce55d35c56b851cead8 | /mmaction/datasets/pipelines/loading.py | ecaafa6ba9e86a7c801cdbee8f33028f861796f3 | [
"Apache-2.0"
] | permissive | hamidehkerdegari/VFS | 3e9c427c4a8ae0a6b66a3a1378bac5c6f9daaf51 | 8e055cc191578706f05b7484facf44be6fb1525a | refs/heads/master | 2023-08-24T09:40:46.678233 | 2021-09-26T18:24:38 | 2021-09-26T18:24:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47,320 | py | import io
import os
import os.path as osp
import shutil
import warnings
import mmcv
import numpy as np
from mmcv.fileio import FileClient
from torch.nn.modules.utils import _pair
from ...utils import get_random_string, get_shm_dir, get_thread_id
from ..registry import PIPELINES
@PIPELINES.register_module()
class SampleFrames(object):
"""Sample frames from the video.
Required keys are "filename", "total_frames", "start_index" , added or
modified keys are "frame_inds", "frame_interval" and "num_clips".
Args:
clip_len (int): Frames of each sampled output clip.
frame_interval (int): Temporal interval of adjacent sampled frames.
Default: 1.
num_clips (int): Number of clips to be sampled. Default: 1.
temporal_jitter (bool): Whether to apply temporal jittering.
Default: False.
twice_sample (bool): Whether to use twice sample when testing.
If set to True, it will sample frames with and without fixed shift,
which is commonly used for testing in TSM model. Default: False.
out_of_bound_opt (str): The way to deal with out of bounds frame
indexes. Available options are 'loop', 'repeat_last'.
Default: 'loop'.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
start_index (None): This argument is deprecated and moved to dataset
class (``BaseDataset``, ``VideoDatset``, ``RawframeDataset``, etc),
see this: https://github.com/open-mmlab/mmaction2/pull/89.
"""
def __init__(self,
clip_len,
frame_interval=1,
num_clips=1,
temporal_jitter=False,
twice_sample=False,
out_of_bound_opt='loop',
test_mode=False,
start_index=None,
random_frame_interval=False):
self.clip_len = clip_len
self.frame_interval = frame_interval
self.num_clips = num_clips
self.temporal_jitter = temporal_jitter
self.twice_sample = twice_sample
self.out_of_bound_opt = out_of_bound_opt
self.test_mode = test_mode
assert self.out_of_bound_opt in ['loop', 'repeat_last']
if start_index is not None:
warnings.warn('No longer support "start_index" in "SampleFrames", '
'it should be set in dataset class, see this pr: '
'https://github.com/open-mmlab/mmaction2/pull/89')
self.random_frame_interval = random_frame_interval
if self.random_frame_interval:
self.frame_interval = None
def _get_train_clips(self, num_frames):
"""Get clip offsets in train mode.
It will calculate the average interval for selected frames,
and randomly shift them within offsets between [0, avg_interval].
If the total number of frames is smaller than clips num or origin
frames length, it will return all zero indices.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
ori_clip_len = self.clip_len * self.frame_interval
avg_interval = (num_frames - ori_clip_len + 1) // self.num_clips
if avg_interval > 0:
base_offsets = np.arange(self.num_clips) * avg_interval
clip_offsets = base_offsets + np.random.randint(
avg_interval, size=self.num_clips)
elif num_frames > max(self.num_clips, ori_clip_len):
clip_offsets = np.sort(
np.random.randint(
num_frames - ori_clip_len + 1, size=self.num_clips))
elif avg_interval == 0:
ratio = (num_frames - ori_clip_len + 1.0) / self.num_clips
clip_offsets = np.around(np.arange(self.num_clips) * ratio)
else:
clip_offsets = np.zeros((self.num_clips, ), dtype=np.int)
return clip_offsets
def _get_test_clips(self, num_frames):
"""Get clip offsets in test mode.
Calculate the average interval for selected frames, and shift them
fixedly by avg_interval/2. If set twice_sample True, it will sample
frames together without fixed shift. If the total number of frames is
not enough, it will return all zero indices.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in test mode.
"""
ori_clip_len = self.clip_len * self.frame_interval
avg_interval = (num_frames - ori_clip_len + 1) / float(self.num_clips)
if num_frames > ori_clip_len - 1:
base_offsets = np.arange(self.num_clips) * avg_interval
clip_offsets = (base_offsets + avg_interval / 2.0).astype(np.int)
if self.twice_sample:
clip_offsets = np.concatenate([clip_offsets, base_offsets])
else:
clip_offsets = np.zeros((self.num_clips, ), dtype=np.int)
return clip_offsets
def _sample_clips(self, num_frames):
"""Choose clip offsets for the video in a given mode.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices.
"""
if self.test_mode:
clip_offsets = self._get_test_clips(num_frames)
else:
clip_offsets = self._get_train_clips(num_frames)
return clip_offsets
def __call__(self, results):
"""Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
total_frames = results['total_frames']
# TODO: force re-generate frame_interval
if self.random_frame_interval:
self.frame_interval = np.random.randint(total_frames)
clip_offsets = self._sample_clips(total_frames)
frame_inds = clip_offsets[:, None] + np.arange(
self.clip_len)[None, :] * self.frame_interval
frame_inds = np.concatenate(frame_inds)
if self.temporal_jitter:
perframe_offsets = np.random.randint(
self.frame_interval, size=len(frame_inds))
frame_inds += perframe_offsets
frame_inds = frame_inds.reshape((-1, self.clip_len))
if self.out_of_bound_opt == 'loop':
frame_inds = np.mod(frame_inds, total_frames)
elif self.out_of_bound_opt == 'repeat_last':
safe_inds = frame_inds < total_frames
unsafe_inds = 1 - safe_inds
last_ind = np.max(safe_inds * frame_inds, axis=1)
new_inds = (safe_inds * frame_inds + (unsafe_inds.T * last_ind).T)
frame_inds = new_inds
else:
raise ValueError('Illegal out_of_bound option.')
start_index = results['start_index']
frame_inds = np.concatenate(frame_inds) + start_index
results['frame_inds'] = frame_inds.astype(np.int)
results['clip_len'] = self.clip_len
results['frame_interval'] = self.frame_interval
results['num_clips'] = self.num_clips
return results
@PIPELINES.register_module()
class DuplicateFrames(object):
def __init__(self, times, as_clip=True):
self.times = times
self.as_clip = as_clip
def __call__(self, results):
if self.as_clip:
results['frame_inds'] = np.tile(results['frame_inds'], self.times)
results['num_clips'] *= self.times
else:
results['frame_inds'] = np.repeat(results['frame_inds'],
self.times)
results['clip_len'] *= self.times
return results
@PIPELINES.register_module()
class Frame2Clip(object):
def __call__(self, results):
clip_len = results['clip_len']
num_clips = results['num_clips']
results['clip_len'] = num_clips
results['num_clips'] = clip_len
return results
@PIPELINES.register_module()
class Clip2Frame(object):
def __init__(self, clip_len):
self.clip_len = clip_len
def __call__(self, results):
clip_len = results['clip_len']
num_clips = results['num_clips']
results['clip_len'] = self.clip_len
results['num_clips'] = num_clips * clip_len // self.clip_len
return results
@PIPELINES.register_module()
class AppendFrames(object):
def __init__(self,
num_frames,
frame_interval,
temporal_jitter=False,
out_of_bound_opt='loop'):
self.num_frames = num_frames
self.frame_interval = frame_interval
self.temporal_jitter = temporal_jitter
self.out_of_bound_opt = out_of_bound_opt
assert self.out_of_bound_opt in ['loop', 'repeat_last']
def __call__(self, results):
total_frames = results['total_frames']
clip_len = results['clip_len']
num_clips = results['num_clips']
assert clip_len == 1
assert num_clips % 2 == 0
frame_inds = results['frame_inds']
before_frame_offsets = -np.flip(
np.arange(self.num_frames + 1)[None, :]) * self.frame_interval
after_frame_offsets = np.arange(self.num_frames +
1)[None, :] * self.frame_interval
if self.temporal_jitter:
before_frame_offsets += np.concatenate(
(np.random.randint(self.frame_interval,
size=self.num_frames), [0]))
after_frame_offsets -= np.concatenate(
([0],
np.random.randint(self.frame_interval, size=self.num_frames)))
before_frame_inds = frame_inds[:num_clips // 2,
None] + before_frame_offsets
before_frame_inds = np.concatenate(before_frame_inds)
after_frame_inds = frame_inds[num_clips // 2:,
None] + after_frame_offsets
after_frame_inds = np.concatenate(after_frame_inds)
frame_inds = np.concatenate([before_frame_inds, after_frame_inds])
if self.out_of_bound_opt == 'loop':
frame_inds = np.mod(frame_inds, total_frames)
elif self.out_of_bound_opt == 'repeat_last':
safe_inds = frame_inds < total_frames
unsafe_inds = 1 - safe_inds
last_ind = np.max(safe_inds * frame_inds, axis=1)
new_inds = (safe_inds * frame_inds + (unsafe_inds.T * last_ind).T)
frame_inds = new_inds
else:
raise ValueError('Illegal out_of_bound option.')
results['frame_inds'] = frame_inds
results['clip_len'] += self.num_frames
return results
@PIPELINES.register_module()
class UntrimmedSampleFrames(object):
"""Sample frames from the untrimmed video.
Required keys are "filename", "total_frames", added or modified keys are
"frame_inds", "frame_interval" and "num_clips".
Args:
clip_len (int): The length of sampled clips. Default: 1.
frame_interval (int): Temporal interval of adjacent sampled frames.
Default: 16.
start_index (int): Specify a start index for frames in consideration of
different filename format. However, when taking videos as input,
it should be set to 0, since frames loaded from videos count
from 0. Default: 1.
"""
def __init__(self, clip_len=1, frame_interval=16, start_index=1):
self.clip_len = clip_len
self.frame_interval = frame_interval
self.start_index = start_index
def __call__(self, results):
"""Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
total_frames = results['total_frames']
clip_centers = np.arange(self.frame_interval // 2, total_frames,
self.frame_interval)
num_clips = clip_centers.shape[0]
frame_inds = clip_centers[:, None] + np.arange(
-(self.clip_len // 2), self.clip_len -
(self.clip_len // 2))[None, :]
# clip frame_inds to legal range
frame_inds = np.clip(frame_inds, 0, total_frames - 1)
frame_inds = np.concatenate(frame_inds) + self.start_index
results['frame_inds'] = frame_inds.astype(np.int)
results['clip_len'] = self.clip_len
results['frame_interval'] = self.frame_interval
results['num_clips'] = num_clips
return results
@PIPELINES.register_module()
class DenseSampleFrames(SampleFrames):
"""Select frames from the video by dense sample strategy.
Required keys are "filename", added or modified keys are "total_frames",
"frame_inds", "frame_interval" and "num_clips".
Args:
clip_len (int): Frames of each sampled output clip.
frame_interval (int): Temporal interval of adjacent sampled frames.
Default: 1.
num_clips (int): Number of clips to be sampled. Default: 1.
sample_range (int): Total sample range for dense sample.
Default: 64.
num_sample_positions (int): Number of sample start positions, Which is
only used in test mode. Default: 10.
temporal_jitter (bool): Whether to apply temporal jittering.
Default: False.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
"""
def __init__(self,
clip_len,
frame_interval=1,
num_clips=1,
sample_range=64,
num_sample_positions=10,
temporal_jitter=False,
out_of_bound_opt='loop',
test_mode=False):
super().__init__(
clip_len,
frame_interval,
num_clips,
temporal_jitter,
out_of_bound_opt=out_of_bound_opt,
test_mode=test_mode)
self.sample_range = sample_range
self.num_sample_positions = num_sample_positions
def _get_train_clips(self, num_frames):
"""Get clip offsets by dense sample strategy in train mode.
It will calculate a sample position and sample interval and set
start index 0 when sample_pos == 1 or randomly choose from
[0, sample_pos - 1]. Then it will shift the start index by each
base offset.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
sample_position = max(1, 1 + num_frames - self.sample_range)
interval = self.sample_range // self.num_clips
start_idx = 0 if sample_position == 1 else np.random.randint(
0, sample_position - 1)
base_offsets = np.arange(self.num_clips) * interval
clip_offsets = (base_offsets + start_idx) % num_frames
return clip_offsets
def _get_test_clips(self, num_frames):
"""Get clip offsets by dense sample strategy in test mode.
It will calculate a sample position and sample interval and evenly
sample several start indexes as start positions between
[0, sample_position-1]. Then it will shift each start index by the
base offsets.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
sample_position = max(1, 1 + num_frames - self.sample_range)
interval = self.sample_range // self.num_clips
start_list = np.linspace(
0, sample_position - 1, num=self.num_sample_positions, dtype=int)
base_offsets = np.arange(self.num_clips) * interval
clip_offsets = list()
for start_idx in start_list:
clip_offsets.extend((base_offsets + start_idx) % num_frames)
clip_offsets = np.array(clip_offsets)
return clip_offsets
@PIPELINES.register_module()
class SequentialSampleFrames(object):
def __init__(self, frame_interval=1):
self.frame_interval = frame_interval
def __call__(self, results):
"""Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
total_frames = results['total_frames']
results['frame_inds'] = np.arange(0, total_frames, self.frame_interval)
results['frame_inds'] += results['start_index']
results['clip_len'] = total_frames
results['frame_interval'] = self.frame_interval
results['num_clips'] = 1
return results
@PIPELINES.register_module()
class SampleProposalFrames(SampleFrames):
"""Sample frames from proposals in the video.
Required keys are "total_frames" and "out_proposals", added or
modified keys are "frame_inds", "frame_interval", "num_clips",
'clip_len' and 'num_proposals'.
Args:
clip_len (int): Frames of each sampled output clip.
body_segments (int): Number of segments in course period.
aug_segments (list[int]): Number of segments in starting and
ending period.
aug_ratio (int | float | tuple[int | float]): The ratio
of the length of augmentation to that of the proposal.
frame_interval (int): Temporal interval of adjacent sampled frames.
Default: 1.
test_interval (int): Temporal interval of adjacent sampled frames
in test mode. Default: 6.
temporal_jitter (bool): Whether to apply temporal jittering.
Default: False.
mode (str): Choose 'train', 'val' or 'test' mode.
Default: 'train'.
"""
def __init__(self,
clip_len,
body_segments,
aug_segments,
aug_ratio,
frame_interval=1,
test_interval=6,
temporal_jitter=False,
mode='train'):
super().__init__(
clip_len,
frame_interval=frame_interval,
temporal_jitter=temporal_jitter)
self.body_segments = body_segments
self.aug_segments = aug_segments
self.aug_ratio = _pair(aug_ratio)
if not mmcv.is_tuple_of(self.aug_ratio, (int, float)):
raise TypeError(f'aug_ratio should be int, float'
f'or tuple of int and float, '
f'but got {type(aug_ratio)}')
assert len(self.aug_ratio) == 2
assert mode in ['train', 'val', 'test']
self.mode = mode
self.test_interval = test_interval
def _get_train_indices(self, valid_length, num_segments):
"""Get indices of different stages of proposals in train mode.
It will calculate the average interval for each segment,
and randomly shift them within offsets between [0, average_duration].
If the total number of frames is smaller than num segments, it will
return all zero indices.
Args:
valid_length (int): The length of the starting point's
valid interval.
num_segments (int): Total number of segments.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
avg_interval = (valid_length + 1) // num_segments
if avg_interval > 0:
base_offsets = np.arange(num_segments) * avg_interval
offsets = base_offsets + np.random.randint(
avg_interval, size=num_segments)
else:
offsets = np.zeros((num_segments, ), dtype=np.int)
return offsets
def _get_val_indices(self, valid_length, num_segments):
"""Get indices of different stages of proposals in validation mode.
It will calculate the average interval for each segment.
If the total number of valid length is smaller than num segments,
it will return all zero indices.
Args:
valid_length (int): The length of the starting point's
valid interval.
num_segments (int): Total number of segments.
Returns:
np.ndarray: Sampled frame indices in validation mode.
"""
if valid_length >= num_segments:
avg_interval = valid_length / float(num_segments)
base_offsets = np.arange(num_segments) * avg_interval
offsets = (base_offsets + avg_interval / 2.0).astype(np.int)
else:
offsets = np.zeros((num_segments, ), dtype=np.int)
return offsets
def _get_proposal_clips(self, proposal, num_frames):
"""Get clip offsets in train mode.
It will calculate sampled frame indices in the proposal's three
stages: starting, course and ending stage.
Args:
proposal (object): The proposal object.
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
# proposal interval: [start_frame, end_frame)
start_frame = proposal.start_frame
end_frame = proposal.end_frame
ori_clip_len = self.clip_len * self.frame_interval
duration = end_frame - start_frame
assert duration != 0
valid_length = duration - ori_clip_len
valid_starting = max(0,
start_frame - int(duration * self.aug_ratio[0]))
valid_ending = min(num_frames - ori_clip_len + 1,
end_frame - 1 + int(duration * self.aug_ratio[1]))
valid_starting_length = start_frame - valid_starting - ori_clip_len
valid_ending_length = (valid_ending - end_frame + 1) - ori_clip_len
if self.mode == 'train':
starting_offsets = self._get_train_indices(valid_starting_length,
self.aug_segments[0])
course_offsets = self._get_train_indices(valid_length,
self.body_segments)
ending_offsets = self._get_train_indices(valid_ending_length,
self.aug_segments[1])
elif self.mode == 'val':
starting_offsets = self._get_val_indices(valid_starting_length,
self.aug_segments[0])
course_offsets = self._get_val_indices(valid_length,
self.body_segments)
ending_offsets = self._get_val_indices(valid_ending_length,
self.aug_segments[1])
starting_offsets += valid_starting
course_offsets += start_frame
ending_offsets += end_frame
offsets = np.concatenate(
(starting_offsets, course_offsets, ending_offsets))
return offsets
def _get_train_clips(self, num_frames, proposals):
"""Get clip offsets in train mode.
It will calculate sampled frame indices of each proposal, and then
assemble them.
Args:
num_frames (int): Total number of frame in the video.
proposals (list): Proposals fetched.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
clip_offsets = []
for proposal in proposals:
proposal_clip_offsets = self._get_proposal_clips(
proposal[0][1], num_frames)
clip_offsets = np.concatenate(
[clip_offsets, proposal_clip_offsets])
return clip_offsets
def _get_test_clips(self, num_frames):
"""Get clip offsets in test mode.
It will calculate sampled frame indices based on test interval.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in test mode.
"""
ori_clip_len = self.clip_len * self.frame_interval
return np.arange(
0, num_frames - ori_clip_len, self.test_interval, dtype=np.int)
def _sample_clips(self, num_frames, proposals):
"""Choose clip offsets for the video in a given mode.
Args:
num_frames (int): Total number of frame in the video.
proposals (list | None): Proposals fetched.
It is set to None in test mode.
Returns:
np.ndarray: Sampled frame indices.
"""
if self.mode == 'test':
clip_offsets = self._get_test_clips(num_frames)
else:
assert proposals is not None
clip_offsets = self._get_train_clips(num_frames, proposals)
return clip_offsets
def __call__(self, results):
"""Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
total_frames = results['total_frames']
out_proposals = results.get('out_proposals', None)
clip_offsets = self._sample_clips(total_frames, out_proposals)
frame_inds = clip_offsets[:, None] + np.arange(
self.clip_len)[None, :] * self.frame_interval
frame_inds = np.concatenate(frame_inds)
if self.temporal_jitter:
perframe_offsets = np.random.randint(
self.frame_interval, size=len(frame_inds))
frame_inds += perframe_offsets
start_index = results['start_index']
frame_inds = np.mod(frame_inds, total_frames) + start_index
results['frame_inds'] = np.array(frame_inds).astype(np.int)
results['clip_len'] = self.clip_len
results['frame_interval'] = self.frame_interval
results['num_clips'] = (
self.body_segments + self.aug_segments[0] + self.aug_segments[1])
if self.mode in ['train', 'val']:
results['num_proposals'] = len(results['out_proposals'])
return results
@PIPELINES.register_module()
class PyAVInit(object):
"""Using pyav to initialize the video.
PyAV: https://github.com/mikeboers/PyAV
Required keys are "filename",
added or modified keys are "video_reader", and "total_frames".
Args:
io_backend (str): io backend where frames are store.
Default: 'disk'.
kwargs (dict): Args for file client.
"""
def __init__(self, io_backend='disk', **kwargs):
self.io_backend = io_backend
self.kwargs = kwargs
self.file_client = None
def __call__(self, results):
"""Perform the PyAV initiation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
try:
import av
except ImportError:
raise ImportError('Please run "conda install av -c conda-forge" '
'or "pip install av" to install PyAV first.')
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
file_obj = io.BytesIO(self.file_client.get(results['filename']))
container = av.open(file_obj)
results['video_reader'] = container
results['total_frames'] = container.streams.video[0].frames
return results
@PIPELINES.register_module()
class PyAVDecode(object):
"""Using pyav to decode the video.
PyAV: https://github.com/mikeboers/PyAV
Required keys are "video_reader" and "frame_inds",
added or modified keys are "imgs", "img_shape" and "original_shape".
Args:
multi_thread (bool): If set to True, it will apply multi
thread processing. Default: False.
"""
def __init__(self, multi_thread=False):
self.multi_thread = multi_thread
def __call__(self, results):
"""Perform the PyAV loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
imgs = list()
if self.multi_thread:
container.streams.video[0].thread_type = 'AUTO'
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
# set max indice to make early stop
max_inds = max(results['frame_inds'])
i = 0
for frame in container.decode(video=0):
if i > max_inds + 1:
break
imgs.append(frame.to_rgb().to_ndarray())
i += 1
results['video_reader'] = None
del container
# the available frame in pyav may be less than its length,
# which may raise error
results['imgs'] = [imgs[i % len(imgs)] for i in results['frame_inds']]
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(multi_thread={self.multi_thread})'
return repr_str
@PIPELINES.register_module()
class DecordInit(object):
"""Using decord to initialize the video_reader.
Decord: https://github.com/dmlc/decord
Required keys are "filename",
added or modified keys are "video_reader" and "total_frames".
"""
def __init__(self, io_backend='disk', num_threads=1, **kwargs):
self.io_backend = io_backend
self.num_threads = num_threads
self.kwargs = kwargs
self.file_client = None
def __call__(self, results):
"""Perform the PyAV loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
try:
import decord
except ImportError:
raise ImportError(
'Please run "pip install decord" to install Decord first.')
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
file_obj = io.BytesIO(self.file_client.get(results['filename']))
container = decord.VideoReader(file_obj, num_threads=self.num_threads)
results['video_reader'] = container
results['total_frames'] = len(container)
return results
@PIPELINES.register_module()
class DecordDecode(object):
"""Using decord to decode the video.
Decord: https://github.com/dmlc/decord
Required keys are "video_reader", "filename" and "frame_inds",
added or modified keys are "imgs" and "original_shape".
"""
def __init__(self, **kwargs):
pass
def __call__(self, results):
"""Perform the Decord loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
frame_inds = results['frame_inds']
# Generate frame index mapping in order
frame_dict = {
idx: container[idx].asnumpy()
for idx in np.unique(frame_inds)
}
imgs = [frame_dict[idx] for idx in frame_inds]
results['video_reader'] = None
del container
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results
@PIPELINES.register_module()
class OpenCVInit(object):
"""Using OpenCV to initalize the video_reader.
Required keys are "filename", added or modified keys are "new_path",
"video_reader" and "total_frames".
"""
def __init__(self, io_backend='disk', **kwargs):
self.io_backend = io_backend
self.kwargs = kwargs
self.file_client = None
random_string = get_random_string()
thread_id = get_thread_id()
self.tmp_folder = osp.join(get_shm_dir(),
f'{random_string}_{thread_id}')
os.mkdir(self.tmp_folder)
def __call__(self, results):
"""Perform the OpenCV initiation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if self.io_backend == 'disk':
new_path = results['filename']
else:
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
thread_id = get_thread_id()
# save the file of same thread at the same place
new_path = osp.join(self.tmp_folder, f'tmp_{thread_id}.mp4')
with open(new_path, 'wb') as f:
f.write(self.file_client.get(results['filename']))
container = mmcv.VideoReader(new_path)
results['new_path'] = new_path
results['video_reader'] = container
results['total_frames'] = len(container)
return results
def __del__(self):
shutil.rmtree(self.tmp_folder)
@PIPELINES.register_module()
class OpenCVDecode(object):
"""Using OpenCV to decode the video.
Required keys are "video_reader", "filename" and "frame_inds", added or
modified keys are "imgs", "img_shape" and "original_shape".
"""
def __init__(self):
pass
def __call__(self, results):
"""Perform the OpenCV loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
imgs = list()
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
for frame_ind in results['frame_inds']:
cur_frame = container[frame_ind]
# last frame may be None in OpenCV
while isinstance(cur_frame, type(None)):
frame_ind -= 1
cur_frame = container[frame_ind]
imgs.append(cur_frame)
results['video_reader'] = None
del container
imgs = np.array(imgs)
# The default channel order of OpenCV is BGR, thus we change it to RGB
imgs = imgs[:, :, :, ::-1]
results['imgs'] = list(imgs)
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results
@PIPELINES.register_module()
class RawFrameDecode(object):
"""Load and decode frames with given indices.
Required keys are "frame_dir", "filename_tmpl" and "frame_inds",
added or modified keys are "imgs", "img_shape" and "original_shape".
Args:
io_backend (str): IO backend where frames are stored. Default: 'disk'.
decoding_backend (str): Backend used for image decoding.
Default: 'cv2'.
kwargs (dict, optional): Arguments for FileClient.
"""
def __init__(self, io_backend='disk', decoding_backend='cv2', **kwargs):
self.io_backend = io_backend
self.decoding_backend = decoding_backend
self.kwargs = kwargs
self.file_client = None
def __call__(self, results):
"""Perform the ``RawFrameDecode`` to pick frames given indices.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
mmcv.use_backend(self.decoding_backend)
directory = results['frame_dir']
filename_tmpl = results['filename_tmpl']
modality = results['modality']
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
imgs = list()
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
offset = results.get('offset', 0)
for frame_idx in results['frame_inds']:
frame_idx += offset
if modality == 'RGB':
if 'frame_list' in results:
filepath = osp.join(directory,
results['frame_list'][frame_idx])
else:
filepath = osp.join(directory,
filename_tmpl.format(frame_idx))
img_bytes = self.file_client.get(filepath)
# Get frame with channel order RGB directly.
cur_frame = mmcv.imfrombytes(img_bytes, channel_order='rgb')
imgs.append(cur_frame)
elif modality == 'Flow':
x_filepath = osp.join(directory,
filename_tmpl.format('x', frame_idx))
y_filepath = osp.join(directory,
filename_tmpl.format('y', frame_idx))
x_img_bytes = self.file_client.get(x_filepath)
x_frame = mmcv.imfrombytes(x_img_bytes, flag='grayscale')
y_img_bytes = self.file_client.get(y_filepath)
y_frame = mmcv.imfrombytes(y_img_bytes, flag='grayscale')
imgs.extend([x_frame, y_frame])
else:
raise NotImplementedError
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
if 'seg_map' in results:
seg_map = mmcv.imfrombytes(
self.file_client.get(results['seg_map']),
flag='unchanged',
backend='pillow')
results['ref_seg_map'] = seg_map
assert seg_map.shape == results['img_shape']
if 'pose_coord' in results:
pose_coord = results['pose_coord']
num_poses = pose_coord.shape[1]
height, width = imgs[0].shape[:2]
pose_map = np.zeros((height, width, num_poses), dtype=np.float)
sigma = results['sigma']
for j in range(num_poses):
if sigma > 0:
draw_label_map(pose_map[:, :, j], pose_coord[:, j], sigma)
else:
tx = int(pose_coord[0, j])
ty = int(pose_coord[1, j])
if 0 <= tx < width and 0 <= ty < height:
pose_map[ty, tx, j] = 1.0
results['ref_seg_map'] = pose_map
return results
def draw_label_map(img, pt, sigma):
# Draw a 2D gaussian
# Check that any part of the gaussian is in-bounds
ul = [int(pt[0] - 3 * sigma), int(pt[1] - 3 * sigma)]
br = [int(pt[0] + 3 * sigma + 1), int(pt[1] + 3 * sigma + 1)]
if (ul[0] >= img.shape[1] or ul[1] >= img.shape[0] or br[0] < 0
or br[1] < 0):
# If not, just return the image as is
return img
# Generate gaussian
size = 6 * sigma + 1
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0 = y0 = size // 2
# The gaussian is not normalized, we want the center value to equal 1
g = np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * sigma**2))
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], img.shape[1])
img_y = max(0, ul[1]), min(br[1], img.shape[0])
img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return img
@PIPELINES.register_module()
class RawImageDecode(object):
"""Load and decode frames with given indices.
Required keys are "frame_dir", "filename_tmpl" and "frame_inds",
added or modified keys are "imgs", "img_shape" and "original_shape".
Args:
io_backend (str): IO backend where frames are stored. Default: 'disk'.
decoding_backend (str): Backend used for image decoding.
Default: 'cv2'.
kwargs (dict, optional): Arguments for FileClient.
"""
def __init__(self, io_backend='disk', decoding_backend='cv2', **kwargs):
self.io_backend = io_backend
self.decoding_backend = decoding_backend
self.kwargs = kwargs
self.file_client = None
def __call__(self, results):
"""Perform the ``RawFrameDecode`` to pick frames given indices.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
mmcv.use_backend(self.decoding_backend)
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
imgs = list()
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
offset = results.get('offset', 0)
# image decode check
assert np.all(results['frame_inds'] == 0)
assert offset == 0
filename = results['filename']
for frame_idx in results['frame_inds']:
frame_idx += offset
filepath = osp.join(filename)
img_bytes = self.file_client.get(filepath)
# Get frame with channel order RGB directly.
cur_frame = mmcv.imfrombytes(img_bytes, channel_order='rgb')
imgs.append(cur_frame)
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results
@PIPELINES.register_module()
class FrameSelector(RawFrameDecode):
"""Deprecated class for ``RawFrameDecode``."""
def __init__(self, *args, **kwargs):
warnings.warn('"FrameSelector" is deprecated, please switch to'
'"RawFrameDecode"')
super().__init__(*args, **kwargs)
@PIPELINES.register_module()
class LoadLocalizationFeature(object):
"""Load Video features for localizer with given video_name list.
Required keys are "video_name" and "data_prefix",
added or modified keys are "raw_feature".
Args:
raw_feature_ext (str): Raw feature file extension. Default: '.csv'.
"""
def __init__(self, raw_feature_ext='.csv'):
valid_raw_feature_ext = ('.csv', )
if raw_feature_ext not in valid_raw_feature_ext:
raise NotImplementedError
self.raw_feature_ext = raw_feature_ext
def __call__(self, results):
"""Perform the LoadLocalizationFeature loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
video_name = results['video_name']
data_prefix = results['data_prefix']
data_path = osp.join(data_prefix, video_name + self.raw_feature_ext)
raw_feature = np.loadtxt(
data_path, dtype=np.float32, delimiter=',', skiprows=1)
results['raw_feature'] = np.transpose(raw_feature, (1, 0))
return results
@PIPELINES.register_module()
class GenerateLocalizationLabels(object):
"""Load video label for localizer with given video_name list.
Required keys are "duration_frame", "duration_second", "feature_frame",
"annotations", added or modified keys are "gt_bbox".
"""
def __call__(self, results):
"""Perform the GenerateLocalizationLabels loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
video_frame = results['duration_frame']
video_second = results['duration_second']
feature_frame = results['feature_frame']
corrected_second = float(feature_frame) / video_frame * video_second
annotations = results['annotations']
gt_bbox = []
for annotation in annotations:
current_start = max(
min(1, annotation['segment'][0] / corrected_second), 0)
current_end = max(
min(1, annotation['segment'][1] / corrected_second), 0)
gt_bbox.append([current_start, current_end])
gt_bbox = np.array(gt_bbox)
results['gt_bbox'] = gt_bbox
return results
@PIPELINES.register_module()
class LoadProposals(object):
"""Loading proposals with given proposal results.
Required keys are "video_name"
added or modified keys are 'bsp_feature', 'tmin', 'tmax',
'tmin_score', 'tmax_score' and 'reference_temporal_iou'.
Args:
top_k (int): The top k proposals to be loaded.
pgm_proposals_dir (str): Directory to load proposals.
pgm_features_dir (str): Directory to load proposal features.
proposal_ext (str): Proposal file extension. Default: '.csv'.
feature_ext (str): Feature file extension. Default: '.npy'.
"""
def __init__(self,
top_k,
pgm_proposals_dir,
pgm_features_dir,
proposal_ext='.csv',
feature_ext='.npy'):
self.top_k = top_k
self.pgm_proposals_dir = pgm_proposals_dir
self.pgm_features_dir = pgm_features_dir
valid_proposal_ext = ('.csv', )
if proposal_ext not in valid_proposal_ext:
raise NotImplementedError
self.proposal_ext = proposal_ext
valid_feature_ext = ('.npy', )
if feature_ext not in valid_feature_ext:
raise NotImplementedError
self.feature_ext = feature_ext
def __call__(self, results):
"""Perform the LoadProposals loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
video_name = results['video_name']
proposal_path = osp.join(self.pgm_proposals_dir,
video_name + self.proposal_ext)
if self.proposal_ext == '.csv':
pgm_proposals = np.loadtxt(
proposal_path, dtype=np.float32, delimiter=',', skiprows=1)
pgm_proposals = np.array(pgm_proposals[:self.top_k])
tmin = pgm_proposals[:, 0]
tmax = pgm_proposals[:, 1]
tmin_score = pgm_proposals[:, 2]
tmax_score = pgm_proposals[:, 3]
reference_temporal_iou = pgm_proposals[:, 5]
feature_path = osp.join(self.pgm_features_dir,
video_name + self.feature_ext)
if self.feature_ext == '.npy':
bsp_feature = np.load(feature_path).astype(np.float32)
bsp_feature = bsp_feature[:self.top_k, :]
results['bsp_feature'] = bsp_feature
results['tmin'] = tmin
results['tmax'] = tmax
results['tmin_score'] = tmin_score
results['tmax_score'] = tmax_score
results['reference_temporal_iou'] = reference_temporal_iou
return results
| [
"[email protected]"
] | |
df7913dd36c7238962153d6e680c9a6e7ad9d375 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r8/Gen/DecFiles/options/13164201.py | 05dfc374b8b17fa012a7aa7f0a1397e0cc3ab6c7 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,905 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r8/Gen/DecFiles/options/13164201.py generated: Fri, 27 Mar 2015 15:48:02
#
# Event Type: 13164201
#
# ASCII decay Descriptor: {[[B_s0]nos -> (D*(2007)~0 -> (D~0 -> K+ pi-) gamma ) (phi(1020) -> K+ K-) ]cc, [[B_s0]os -> (D*(2007)0 -> (D0 -> K- pi+) gamma ) (phi(1020) -> K- K+) ]cc}
#
from Configurables import Generation
Generation().EventType = 13164201
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bs_Dst0phi,D0gamma,Kpi=DecProdCut,HELAMP001.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 531,-531 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 531
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "DaughtersInLHCb"
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 531,-531 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_531.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 13164201
| [
"[email protected]"
] | |
32aae12aa67d9697a2cbea89e1ab6142f273cd3b | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/healthcareapis/azure-mgmt-healthcareapis/azure/mgmt/healthcareapis/models/__init__.py | cb27d5affb6e885dc7d62df53c2daa9ddf2efef1 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 2,970 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import CheckNameAvailabilityParameters
from ._models_py3 import ErrorDetails, ErrorDetailsException
from ._models_py3 import ErrorDetailsInternal
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationResultsDescription
from ._models_py3 import Resource
from ._models_py3 import ServiceAccessPolicyEntry
from ._models_py3 import ServiceAuthenticationConfigurationInfo
from ._models_py3 import ServiceCorsConfigurationInfo
from ._models_py3 import ServiceCosmosDbConfigurationInfo
from ._models_py3 import ServicesDescription
from ._models_py3 import ServicesNameAvailabilityInfo
from ._models_py3 import ServicesPatchDescription
from ._models_py3 import ServicesProperties
except (SyntaxError, ImportError):
from ._models import CheckNameAvailabilityParameters
from ._models import ErrorDetails, ErrorDetailsException
from ._models import ErrorDetailsInternal
from ._models import Operation
from ._models import OperationDisplay
from ._models import OperationResultsDescription
from ._models import Resource
from ._models import ServiceAccessPolicyEntry
from ._models import ServiceAuthenticationConfigurationInfo
from ._models import ServiceCorsConfigurationInfo
from ._models import ServiceCosmosDbConfigurationInfo
from ._models import ServicesDescription
from ._models import ServicesNameAvailabilityInfo
from ._models import ServicesPatchDescription
from ._models import ServicesProperties
from ._paged_models import OperationPaged
from ._paged_models import ServicesDescriptionPaged
from ._healthcare_apis_management_client_enums import (
ProvisioningState,
Kind,
ServiceNameUnavailabilityReason,
OperationResultStatus,
)
__all__ = [
'CheckNameAvailabilityParameters',
'ErrorDetails', 'ErrorDetailsException',
'ErrorDetailsInternal',
'Operation',
'OperationDisplay',
'OperationResultsDescription',
'Resource',
'ServiceAccessPolicyEntry',
'ServiceAuthenticationConfigurationInfo',
'ServiceCorsConfigurationInfo',
'ServiceCosmosDbConfigurationInfo',
'ServicesDescription',
'ServicesNameAvailabilityInfo',
'ServicesPatchDescription',
'ServicesProperties',
'ServicesDescriptionPaged',
'OperationPaged',
'ProvisioningState',
'Kind',
'ServiceNameUnavailabilityReason',
'OperationResultStatus',
]
| [
"[email protected]"
] | |
935577a52f81fd6a39af6a8ab69bbb45ab1ed8b6 | 4809471274d6e136ac66d1998de5acb185d1164e | /pypureclient/flasharray/FA_2_5/models/software_installation_step.py | 4d482e18e00abb8dc317c2cd8d70662c91a5fdfc | [
"BSD-2-Clause"
] | permissive | astrojuanlu/py-pure-client | 053fef697ad03b37ba7ae21a0bbb466abf978827 | 6fa605079950765c316eb21c3924e8329d5e3e8a | refs/heads/master | 2023-06-05T20:23:36.946023 | 2021-06-28T23:44:24 | 2021-06-28T23:44:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,629 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_5 import models
class SoftwareInstallationStep(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'start_time': 'int',
'end_time': 'int',
'checks': 'list[SoftwareInstallationStepsChecks]',
'description': 'str',
'details': 'str',
'hop_version': 'str',
'installation': 'Reference',
'status': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'start_time': 'start_time',
'end_time': 'end_time',
'checks': 'checks',
'description': 'description',
'details': 'details',
'hop_version': 'hop_version',
'installation': 'installation',
'status': 'status'
}
required_args = {
}
def __init__(
self,
id=None, # type: str
name=None, # type: str
start_time=None, # type: int
end_time=None, # type: int
checks=None, # type: List[models.SoftwareInstallationStepsChecks]
description=None, # type: str
details=None, # type: str
hop_version=None, # type: str
installation=None, # type: models.Reference
status=None, # type: str
):
"""
Keyword args:
id (str): A globally unique, system-generated ID. The ID cannot be modified.
name (str): Name of the resource. The name cannot be modified.
start_time (int): Start time in milliseconds since the UNIX epoch.
end_time (int): End time in milliseconds since the UNIX epoch.
checks (list[SoftwareInstallationStepsChecks]): A list of checks in this upgrade step.
description (str): Detailed description of the step.
details (str): Detailed result of the step used to diagnose step failures.
hop_version (str): The version to which the current hop is upgrading.
installation (Reference): Referenced `software-installation` to which the step belongs.
status (str): Status of the step. Valid values are `running` and `finished`. A status of `running` indicates that the step has not finished. A status of `finished` indicates that the check has finished.
"""
if id is not None:
self.id = id
if name is not None:
self.name = name
if start_time is not None:
self.start_time = start_time
if end_time is not None:
self.end_time = end_time
if checks is not None:
self.checks = checks
if description is not None:
self.description = description
if details is not None:
self.details = details
if hop_version is not None:
self.hop_version = hop_version
if installation is not None:
self.installation = installation
if status is not None:
self.status = status
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SoftwareInstallationStep`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SoftwareInstallationStep, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SoftwareInstallationStep):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
f31541e6d2ecf93b94e193eee325b50654e6e8d5 | d2bc6792983724b22175c9d42a5b5a0fa174d576 | /Trakttv.bundle/Contents/Code/pts/session_manager.py | d335e61d9ad5ddf1cfb39c79ce1629805df7ee6b | [] | no_license | frentrop/Plex-Trakt-Scrobbler | f8e70bc4d1cf82545f675447bd0237a6436f41f5 | 70a59f62eb3812f9dba36a45697a4123b8c89dd9 | refs/heads/master | 2021-01-17T22:13:52.603253 | 2015-02-18T11:12:16 | 2015-02-18T11:12:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,857 | py | from core.helpers import total_seconds
from core.logger import Logger
from data.watch_session import WatchSession
from pts.scrobbler import ScrobblerMethod
from datetime import datetime
from threading import Thread
import traceback
import time
log = Logger('pts.session_manager')
class SessionManager(Thread):
def __init__(self):
self.active = True
super(SessionManager, self).__init__()
def run(self):
while self.active:
try:
self.check_sessions()
except Exception, ex:
log.error('Exception raised in session manager: %s', ex, exc_info=True)
time.sleep(5)
def check_sessions(self):
sessions = WatchSession.all()
if not len(sessions):
return
for key, ws in sessions:
self.check_paused(ws)
def check_paused(self, ws):
if not ws or ws.cur_state != 'paused' or not ws.paused_since:
return
if ws.active and Datetime.Now() > ws.paused_since + Datetime.Delta(seconds=15):
log.debug("%s paused for 15s, watching status cancelled" % ws.title)
ws.active = False
ws.save()
if not self.send_action(ws, 'pause'):
log.info('Failed to send "pause" action for watch session')
def start(self):
# Cleanup sessions
self.cleanup()
# Start thread
super(SessionManager, self).start()
def stop(self):
self.active = False
@staticmethod
def send_action(ws, action):
if not ws.type:
return False
if ScrobblerMethod.handle_action(ws, action):
return False
return True
@staticmethod
def cleanup():
log.debug('Cleaning up stale or invalid sessions')
sessions = WatchSession.all()
if not len(sessions):
return
for key, ws in sessions:
delete = False
# Destroy invalid sessions
if ws is None:
delete = True
elif not ws.last_updated or type(ws.last_updated) is not datetime:
delete = True
elif total_seconds(datetime.now() - ws.last_updated) / 60 / 60 > 24:
# Destroy sessions last updated over 24 hours ago
log.debug('Session %s was last updated over 24 hours ago, queued for deletion', key)
delete = True
# Delete session or flag for update
if delete:
log.info('Session %s looks stale or invalid, deleting it now', key)
WatchSession.delete(key)
elif not ws.update_required:
log.info('Queueing session %s for update', key)
ws.update_required = True
ws.save()
log.debug('Finished cleaning up')
| [
"[email protected]"
] | |
d07ea0e9e3e5000189867d9bf01d01e77c958188 | efd471380d976614667e56c92f0aed671371fc63 | /All Programs/Day 2 - Operators.py | 7e26ea13a61812b56bc8a246512592504dfe4556 | [] | no_license | anshumanairy/Hacker-Rank | 39af46e76182d34637340d1755aff4afd7820083 | 6fef4c6a415422d9379232932358e4ee7430a6af | refs/heads/master | 2021-07-04T07:41:37.769152 | 2020-10-12T05:49:24 | 2020-10-12T05:49:24 | 181,359,750 | 2 | 2 | null | 2020-10-12T05:49:25 | 2019-04-14T19:38:18 | Python | UTF-8 | Python | false | false | 287 | py | #!/usr/bin/env python
# coding: utf-8
# In[7]:
def func():
meal_cost = float(input())
tip_percent = int(input())
tax_percent = int(input())
total_cost=meal_cost+(meal_cost*tip_percent/100)+(meal_cost*tax_percent/100)
print(round(total_cost))
func()
# In[ ]:
| [
"[email protected]"
] | |
e48f1cd2379953a3e76ed6637485ba7475088ac1 | e90a772733e73e45b4cdbb5f240ef3b4a9e71de1 | /251. Flatten 2D Vector.py | bda19790c5d3ad2a9bc44c05e8ad35702b52bffd | [] | no_license | jiewu-stanford/leetcode | 102829fcbcace17909e4de49c01c3d705b6e6e3a | cbd47f713d3307f900daf55c8f27301c70542fc4 | refs/heads/master | 2022-05-28T18:25:00.885047 | 2022-05-18T05:16:22 | 2022-05-18T05:16:22 | 214,486,622 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | '''
Title : 251. Flatten 2D Vector ($$$)
Problem : https://leetcode.com/problems/flatten-2d-vector/
: https://www.lintcode.com/problem/flatten-2d-vector/description
'''
''' Reference: https://www.cnblogs.com/lightwindy/p/8577871.html '''
class Vector2D(object):
def __init__(self, vec2d):
self.row, self.col, self.vec2d = 0, 0, vec2d
def next(self):
self.col += 1
return self.vec2d[self.row][self.col-1]
def hasNext(self):
while self.row < len(self.vec2d) and self.col == len(self.vec2d[self.row]):
self.row, self.col = self.row + 1, 0
return self.row < len(self.vec2d)
# Your Vector2D object will be instantiated and called as such:
# i, v = Vector2D(vec2d), []
# while i.hasNext(): v.append(i.next()) | [
"[email protected]"
] | |
b07be83a3998ff9cfe606edc6c01aa9efbd148ac | a904e99110721719d9ca493fdb91679d09577b8d | /month01/all_code/day05/homework/exercise05.py | 0f12f974bdd279f020581ab649d02e68d2cbf968 | [
"Apache-2.0"
] | permissive | chaofan-zheng/tedu-python-demo | 7c7c64a355e5380d1f8b6464affeddfde0d27be7 | abe983ddc52690f4726cf42cc6390cba815026d8 | refs/heads/main | 2023-03-12T05:17:34.596664 | 2021-02-27T08:33:31 | 2021-02-27T08:33:31 | 323,350,480 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | """
练习:根据下列文字,提取变量,使用字符串格式化打印信息
湖北确诊67802人,治愈63326人,治愈率0.99
70秒是01分零10秒
"""
region = "湖北"
confirmed = 67802
cure = 63326
cure_rate = 0.9912345
# print("%s确诊%d人,治愈%d人,治愈率%.2f" %
# (region, confirmed, cure, cure_rate))
print(f"{region}确诊{confirmed}人,治愈{cure}人,治愈率{cure_rate:.2f}")
total_second = 70
# print("%d秒是%.2d分零%.2d秒" %
# (total_second, total_second // 60, total_second % 60))
print(f"{total_second}秒是{total_second // 60:02}分零{total_second % 60:02}秒")
| [
"[email protected]"
] | |
08d43bcd8b9340063e0076a14ee544d6aa0c45fc | a29c6e83ae4f9010941d15c8fd4cfc67680bb054 | /keras/keras43_boston_3_lstm.py | cfebcb0b1b796a4fd2b370ae022e7dd61d10d643 | [] | no_license | ym0179/bit_seoul | f1ff5faf4ae20fbc8c0e2ed10a005f8bd4b2c2b8 | 14d1fb2752312790c39898fc53a45c1cf427a4d1 | refs/heads/master | 2023-02-27T19:52:23.577540 | 2021-02-08T00:30:16 | 2021-02-08T00:30:16 | 311,265,926 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,874 | py | #Day7
#2020-11-17
#보스턴 집값 예측: 1978년에 발표된 데이터로 미국 보스턴 지역의 주택 가격에 영향을 미치는 요소들을 정리
from sklearn.datasets import load_boston
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout
dataset = load_boston()
x = dataset.data
y = dataset.target
# print(x)
# print(x.shape, y.shape) #(506, 13) (506,)
#1. 전처리
#train-test split
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8)
x_train ,x_val, y_train, y_val = train_test_split(x_train, y_train, train_size=0.8)
#scaling
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(x_train) #fit은 train data만 함
x_train = scaler.transform(x_train)
x_val = scaler.transform(x_val)
x_test = scaler.transform(x_test)
#reshape
x_train = x_train.reshape(x_train.shape[0],13,1)
x_val = x_val.reshape(x_val.shape[0],13,1)
x_test = x_test.reshape(x_test.shape[0],13,1)
x_pred = x_test[:10]
y_pred = y_test[:10]
#2. 모델링
#input shape
#DNN - 1차원, RNN - 2차원, LSTM - 2차원
model = Sequential()
#(행,열,몇개씩 자르는지) -> 마지막에 LSTM 만들 때 한개씩 잘라서 연산하겠다는게 명시됨
model.add(LSTM(32, activation='relu',input_shape=(13,1)))
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(16, activation='relu'))
# model.add(Dropout(0.2))
model.add(Dense(8, activation='relu'))
model.add(Dense(1))
# model.summary()
#3. 컴파일, 훈련
model.compile(loss="mse", optimizer="adam", metrics=["mae"])
from tensorflow.keras.callbacks import EarlyStopping
es = EarlyStopping(monitor='val_loss',patience=10,mode='auto')
model.fit(x_train,y_train,epochs=300,batch_size=1,verbose=2,callbacks=[es],
validation_data=(x_val,y_val))
#4. 평가
loss,mae = model.evaluate(x_test,y_test,batch_size=1)
print("loss : ",loss)
print("mae : ",mae)
#5. 예측
result = model.predict(x_pred)
print("예측값 : ", result.T.reshape(10,)) #보기 쉽게
print("실제값 : ", y_pred)
y_predicted = model.predict(x_test) #x_pred 10개밖에 없음응로 x_test 가지고 RMSE, R2 계산
#RMSE
#R2
import numpy as np
from sklearn.metrics import mean_squared_error
def RMSE(y_test, y_predicted):
return np.sqrt(mean_squared_error(y_test,y_predicted))
print("RMSE : ", RMSE(y_test, y_predicted))
from sklearn.metrics import r2_score
r2 = r2_score(y_test, y_predicted)
print("R2 : ",r2) # max 값: 1
'''
loss : 12.263466835021973
mae : 2.7167487144470215
예측값 : [25.90948 6.2764387 20.263472 17.902828 13.495611 26.259878
19.45948 22.261282 23.709982 23.103811 ]
실제값 : [23.1 10.4 17.4 20.5 13. 20.5 21.8 21.2 21.8 23.1]
RMSE : 3.5019234178103877
R2 : 0.8028192283008149
''' | [
"[email protected]"
] | |
db05fb0ae0a739328e4c844cf59e78fe6fca7fd6 | 0cd64f3f67c6a3b130a788906da84ffc3d15396a | /Library/lib/python3.9/site-packages/sympy/physics/quantum/qft.py | 5d35d22e1f0985800bcb28b49c256ecca1930a4d | [
"MIT",
"BSD-3-Clause",
"0BSD",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-python-cwi",
"Python-2.0"
] | permissive | Ryorama/codeapp | 32ef44a3e8058da9858924df211bf82f5f5018f1 | cf7f5753c6c4c3431d8209cbaacf5208c3c664fa | refs/heads/main | 2023-06-26T09:24:13.724462 | 2021-07-27T17:54:25 | 2021-07-27T17:54:25 | 388,520,626 | 0 | 0 | MIT | 2021-07-22T16:01:32 | 2021-07-22T16:01:32 | null | UTF-8 | Python | false | false | 6,199 | py | """An implementation of qubits and gates acting on them.
Todo:
* Update docstrings.
* Update tests.
* Implement apply using decompose.
* Implement represent using decompose or something smarter. For this to
work we first have to implement represent for SWAP.
* Decide if we want upper index to be inclusive in the constructor.
* Fix the printing of Rk gates in plotting.
"""
from sympy import Expr, Matrix, exp, I, pi, Integer, Symbol
from sympy.functions import sqrt
from sympy.physics.quantum.qapply import qapply
from sympy.physics.quantum.qexpr import QuantumError, QExpr
from sympy.matrices import eye
from sympy.physics.quantum.tensorproduct import matrix_tensor_product
from sympy.physics.quantum.gate import (
Gate, HadamardGate, SwapGate, OneQubitGate, CGate, PhaseGate, TGate, ZGate
)
__all__ = [
'QFT',
'IQFT',
'RkGate',
'Rk'
]
#-----------------------------------------------------------------------------
# Fourier stuff
#-----------------------------------------------------------------------------
class RkGate(OneQubitGate):
"""This is the R_k gate of the QTF."""
gate_name = 'Rk'
gate_name_latex = 'R'
def __new__(cls, *args):
if len(args) != 2:
raise QuantumError(
'Rk gates only take two arguments, got: %r' % args
)
# For small k, Rk gates simplify to other gates, using these
# substitutions give us familiar results for the QFT for small numbers
# of qubits.
target = args[0]
k = args[1]
if k == 1:
return ZGate(target)
elif k == 2:
return PhaseGate(target)
elif k == 3:
return TGate(target)
args = cls._eval_args(args)
inst = Expr.__new__(cls, *args)
inst.hilbert_space = cls._eval_hilbert_space(args)
return inst
@classmethod
def _eval_args(cls, args):
# Fall back to this, because Gate._eval_args assumes that args is
# all targets and can't contain duplicates.
return QExpr._eval_args(args)
@property
def k(self):
return self.label[1]
@property
def targets(self):
return self.label[:1]
@property
def gate_name_plot(self):
return r'$%s_%s$' % (self.gate_name_latex, str(self.k))
def get_target_matrix(self, format='sympy'):
if format == 'sympy':
return Matrix([[1, 0], [0, exp(Integer(2)*pi*I/(Integer(2)**self.k))]])
raise NotImplementedError(
'Invalid format for the R_k gate: %r' % format)
Rk = RkGate
class Fourier(Gate):
"""Superclass of Quantum Fourier and Inverse Quantum Fourier Gates."""
@classmethod
def _eval_args(self, args):
if len(args) != 2:
raise QuantumError(
'QFT/IQFT only takes two arguments, got: %r' % args
)
if args[0] >= args[1]:
raise QuantumError("Start must be smaller than finish")
return Gate._eval_args(args)
def _represent_default_basis(self, **options):
return self._represent_ZGate(None, **options)
def _represent_ZGate(self, basis, **options):
"""
Represents the (I)QFT In the Z Basis
"""
nqubits = options.get('nqubits', 0)
if nqubits == 0:
raise QuantumError(
'The number of qubits must be given as nqubits.')
if nqubits < self.min_qubits:
raise QuantumError(
'The number of qubits %r is too small for the gate.' % nqubits
)
size = self.size
omega = self.omega
#Make a matrix that has the basic Fourier Transform Matrix
arrayFT = [[omega**(
i*j % size)/sqrt(size) for i in range(size)] for j in range(size)]
matrixFT = Matrix(arrayFT)
#Embed the FT Matrix in a higher space, if necessary
if self.label[0] != 0:
matrixFT = matrix_tensor_product(eye(2**self.label[0]), matrixFT)
if self.min_qubits < nqubits:
matrixFT = matrix_tensor_product(
matrixFT, eye(2**(nqubits - self.min_qubits)))
return matrixFT
@property
def targets(self):
return range(self.label[0], self.label[1])
@property
def min_qubits(self):
return self.label[1]
@property
def size(self):
"""Size is the size of the QFT matrix"""
return 2**(self.label[1] - self.label[0])
@property
def omega(self):
return Symbol('omega')
class QFT(Fourier):
"""The forward quantum Fourier transform."""
gate_name = 'QFT'
gate_name_latex = 'QFT'
def decompose(self):
"""Decomposes QFT into elementary gates."""
start = self.label[0]
finish = self.label[1]
circuit = 1
for level in reversed(range(start, finish)):
circuit = HadamardGate(level)*circuit
for i in range(level - start):
circuit = CGate(level - i - 1, RkGate(level, i + 2))*circuit
for i in range((finish - start)//2):
circuit = SwapGate(i + start, finish - i - 1)*circuit
return circuit
def _apply_operator_Qubit(self, qubits, **options):
return qapply(self.decompose()*qubits)
def _eval_inverse(self):
return IQFT(*self.args)
@property
def omega(self):
return exp(2*pi*I/self.size)
class IQFT(Fourier):
"""The inverse quantum Fourier transform."""
gate_name = 'IQFT'
gate_name_latex = '{QFT^{-1}}'
def decompose(self):
"""Decomposes IQFT into elementary gates."""
start = self.args[0]
finish = self.args[1]
circuit = 1
for i in range((finish - start)//2):
circuit = SwapGate(i + start, finish - i - 1)*circuit
for level in range(start, finish):
for i in reversed(range(level - start)):
circuit = CGate(level - i - 1, RkGate(level, -i - 2))*circuit
circuit = HadamardGate(level)*circuit
return circuit
def _eval_inverse(self):
return QFT(*self.args)
@property
def omega(self):
return exp(-2*pi*I/self.size)
| [
"[email protected]"
] | |
fd289b356f9617c2ebaaa01d271f3e18fca8ee97 | f2a2f41641eb56a17009294ff100dc9b39cb774b | /old_session/session_1/_144/_144_binary_tree_preorder_traversal.py | 09b613446a5be0cf7e9539edd9c8b63a2327f9da | [] | no_license | YJL33/LeetCode | 0e837a419d11d44239d1a692140a1468f6a7d9bf | b4da922c4e8406c486760639b71e3ec50283ca43 | refs/heads/master | 2022-08-13T01:46:14.976758 | 2022-07-24T03:59:52 | 2022-07-24T04:11:32 | 52,939,733 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | """
144. Binary Tree Preorder Traversal
Given a binary tree, return the preorder traversal of its nodes' values.
For example:
Given binary tree {1,#,2,3},
1
\
2
/
3
return [1,2,3].
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def preorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
# Preorder: Root => Left => Right
def helper(root, res):
res += root.val,
if root.left != None:
helper(root.left, res)
if root.right != None:
helper(root.right, res)
return
if root is None:
return []
res = []
helper(root, res)
return res | [
"[email protected]"
] | |
903b509e27fa5c700f0cf5445f78e218d2d3c40b | a16d190c16781bf4fde5960673d2897e469e0174 | /flink-ai-flow/lib/airflow/tests/providers/google/cloud/operators/test_dataflow.py | 9cb7490990e0c0d094473e6df35b8f081d054cf0 | [
"Apache-2.0",
"BSD-3-Clause",
"Python-2.0",
"MIT",
"BSD-2-Clause"
] | permissive | bgeng777/flink-ai-extended | 742a1bb80d07c090c3ecb960394422896b5899d7 | f83b5d661240c45c767002767c0cbddc847fff81 | refs/heads/master | 2023-08-15T00:32:40.260537 | 2021-07-27T04:20:53 | 2021-07-27T04:20:53 | 349,360,984 | 1 | 2 | Apache-2.0 | 2021-05-20T03:05:56 | 2021-03-19T09:03:50 | Python | UTF-8 | Python | false | false | 15,386 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from copy import deepcopy
from unittest import mock
from airflow.providers.google.cloud.operators.dataflow import (
CheckJobRunning,
DataflowCreateJavaJobOperator,
DataflowCreatePythonJobOperator,
DataflowStartFlexTemplateOperator,
DataflowStartSqlJobOperator,
DataflowTemplatedJobStartOperator,
)
from airflow.version import version
TASK_ID = 'test-dataflow-operator'
JOB_ID = 'test-dataflow-pipeline-id'
JOB_NAME = 'test-dataflow-pipeline-name'
TEMPLATE = 'gs://dataflow-templates/wordcount/template_file'
PARAMETERS = {
'inputFile': 'gs://dataflow-samples/shakespeare/kinglear.txt',
'output': 'gs://test/output/my_output',
}
PY_FILE = 'gs://my-bucket/my-object.py'
PY_INTERPRETER = 'python3'
JAR_FILE = 'gs://my-bucket/example/test.jar'
JOB_CLASS = 'com.test.NotMain'
PY_OPTIONS = ['-m']
DEFAULT_OPTIONS_PYTHON = DEFAULT_OPTIONS_JAVA = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
}
DEFAULT_OPTIONS_TEMPLATE = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
'tempLocation': 'gs://test/temp',
'zone': 'us-central1-f',
}
ADDITIONAL_OPTIONS = {'output': 'gs://test/output', 'labels': {'foo': 'bar'}}
TEST_VERSION = 'v{}'.format(version.replace('.', '-').replace('+', '-'))
EXPECTED_ADDITIONAL_OPTIONS = {
'output': 'gs://test/output',
'labels': {'foo': 'bar', 'airflow-version': TEST_VERSION},
}
POLL_SLEEP = 30
GCS_HOOK_STRING = 'airflow.providers.google.cloud.operators.dataflow.{}'
TEST_FLEX_PARAMETERS = {
"containerSpecGcsPath": "gs://test-bucket/test-file",
"jobName": 'test-job-name',
"parameters": {
"inputSubscription": 'test-subsription',
"outputTable": "test-project:test-dataset.streaming_beam_sql",
},
}
TEST_LOCATION = 'custom-location'
TEST_PROJECT = "test-project"
TEST_SQL_JOB_NAME = 'test-sql-job-name'
TEST_DATASET = 'test-dataset'
TEST_SQL_OPTIONS = {
"bigquery-project": TEST_PROJECT,
"bigquery-dataset": TEST_DATASET,
"bigquery-table": "beam_output",
'bigquery-write-disposition': "write-truncate",
}
TEST_SQL_QUERY = """
SELECT
sales_region as sales_region,
count(state_id) as count_state
FROM
bigquery.table.test-project.beam_samples.beam_table
GROUP BY sales_region;
"""
TEST_SQL_JOB_ID = 'test-job-id'
class TestDataflowPythonOperator(unittest.TestCase):
def setUp(self):
self.dataflow = DataflowCreatePythonJobOperator(
task_id=TASK_ID,
py_file=PY_FILE,
job_name=JOB_NAME,
py_options=PY_OPTIONS,
dataflow_default_options=DEFAULT_OPTIONS_PYTHON,
options=ADDITIONAL_OPTIONS,
poll_sleep=POLL_SLEEP,
location=TEST_LOCATION,
)
def test_init(self):
"""Test DataFlowPythonOperator instance is properly initialized."""
self.assertEqual(self.dataflow.task_id, TASK_ID)
self.assertEqual(self.dataflow.job_name, JOB_NAME)
self.assertEqual(self.dataflow.py_file, PY_FILE)
self.assertEqual(self.dataflow.py_options, PY_OPTIONS)
self.assertEqual(self.dataflow.py_interpreter, PY_INTERPRETER)
self.assertEqual(self.dataflow.poll_sleep, POLL_SLEEP)
self.assertEqual(self.dataflow.dataflow_default_options, DEFAULT_OPTIONS_PYTHON)
self.assertEqual(self.dataflow.options, EXPECTED_ADDITIONAL_OPTIONS)
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.GCSHook')
def test_exec(self, gcs_hook, dataflow_mock):
"""Test DataflowHook is created and the right args are passed to
start_python_workflow.
"""
start_python_hook = dataflow_mock.return_value.start_python_dataflow
gcs_provide_file = gcs_hook.return_value.provide_file
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
expected_options = {
'project': 'test',
'staging_location': 'gs://test/staging',
'output': 'gs://test/output',
'labels': {'foo': 'bar', 'airflow-version': TEST_VERSION},
}
gcs_provide_file.assert_called_once_with(object_url=PY_FILE)
start_python_hook.assert_called_once_with(
job_name=JOB_NAME,
variables=expected_options,
dataflow=mock.ANY,
py_options=PY_OPTIONS,
py_interpreter=PY_INTERPRETER,
py_requirements=None,
py_system_site_packages=False,
on_new_job_id_callback=mock.ANY,
project_id=None,
location=TEST_LOCATION,
)
self.assertTrue(self.dataflow.py_file.startswith('/tmp/dataflow'))
class TestDataflowJavaOperator(unittest.TestCase):
def setUp(self):
self.dataflow = DataflowCreateJavaJobOperator(
task_id=TASK_ID,
jar=JAR_FILE,
job_name=JOB_NAME,
job_class=JOB_CLASS,
dataflow_default_options=DEFAULT_OPTIONS_JAVA,
options=ADDITIONAL_OPTIONS,
poll_sleep=POLL_SLEEP,
location=TEST_LOCATION,
)
def test_init(self):
"""Test DataflowTemplateOperator instance is properly initialized."""
self.assertEqual(self.dataflow.task_id, TASK_ID)
self.assertEqual(self.dataflow.job_name, JOB_NAME)
self.assertEqual(self.dataflow.poll_sleep, POLL_SLEEP)
self.assertEqual(self.dataflow.dataflow_default_options, DEFAULT_OPTIONS_JAVA)
self.assertEqual(self.dataflow.job_class, JOB_CLASS)
self.assertEqual(self.dataflow.jar, JAR_FILE)
self.assertEqual(self.dataflow.options, EXPECTED_ADDITIONAL_OPTIONS)
self.assertEqual(self.dataflow.check_if_running, CheckJobRunning.WaitForRun)
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.GCSHook')
def test_exec(self, gcs_hook, dataflow_mock):
"""Test DataflowHook is created and the right args are passed to
start_java_workflow.
"""
start_java_hook = dataflow_mock.return_value.start_java_dataflow
gcs_provide_file = gcs_hook.return_value.provide_file
self.dataflow.check_if_running = CheckJobRunning.IgnoreJob
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
gcs_provide_file.assert_called_once_with(object_url=JAR_FILE)
start_java_hook.assert_called_once_with(
job_name=JOB_NAME,
variables=mock.ANY,
jar=mock.ANY,
job_class=JOB_CLASS,
append_job_name=True,
multiple_jobs=None,
on_new_job_id_callback=mock.ANY,
project_id=None,
location=TEST_LOCATION,
)
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.GCSHook')
def test_check_job_running_exec(self, gcs_hook, dataflow_mock):
"""Test DataflowHook is created and the right args are passed to
start_java_workflow.
"""
dataflow_running = dataflow_mock.return_value.is_job_dataflow_running
dataflow_running.return_value = True
start_java_hook = dataflow_mock.return_value.start_java_dataflow
gcs_provide_file = gcs_hook.return_value.provide_file
self.dataflow.check_if_running = True
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
gcs_provide_file.assert_not_called()
start_java_hook.assert_not_called()
dataflow_running.assert_called_once_with(
name=JOB_NAME, variables=mock.ANY, project_id=None, location=TEST_LOCATION
)
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.GCSHook')
def test_check_job_not_running_exec(self, gcs_hook, dataflow_mock):
"""Test DataflowHook is created and the right args are passed to
start_java_workflow with option to check if job is running
"""
dataflow_running = dataflow_mock.return_value.is_job_dataflow_running
dataflow_running.return_value = False
start_java_hook = dataflow_mock.return_value.start_java_dataflow
gcs_provide_file = gcs_hook.return_value.provide_file
self.dataflow.check_if_running = True
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
gcs_provide_file.assert_called_once_with(object_url=JAR_FILE)
start_java_hook.assert_called_once_with(
job_name=JOB_NAME,
variables=mock.ANY,
jar=mock.ANY,
job_class=JOB_CLASS,
append_job_name=True,
multiple_jobs=None,
on_new_job_id_callback=mock.ANY,
project_id=None,
location=TEST_LOCATION,
)
dataflow_running.assert_called_once_with(
name=JOB_NAME, variables=mock.ANY, project_id=None, location=TEST_LOCATION
)
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.GCSHook')
def test_check_multiple_job_exec(self, gcs_hook, dataflow_mock):
"""Test DataflowHook is created and the right args are passed to
start_java_workflow with option to check multiple jobs
"""
dataflow_running = dataflow_mock.return_value.is_job_dataflow_running
dataflow_running.return_value = False
start_java_hook = dataflow_mock.return_value.start_java_dataflow
gcs_provide_file = gcs_hook.return_value.provide_file
self.dataflow.multiple_jobs = True
self.dataflow.check_if_running = True
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
gcs_provide_file.assert_called_once_with(object_url=JAR_FILE)
start_java_hook.assert_called_once_with(
job_name=JOB_NAME,
variables=mock.ANY,
jar=mock.ANY,
job_class=JOB_CLASS,
append_job_name=True,
multiple_jobs=True,
on_new_job_id_callback=mock.ANY,
project_id=None,
location=TEST_LOCATION,
)
dataflow_running.assert_called_once_with(
name=JOB_NAME, variables=mock.ANY, project_id=None, location=TEST_LOCATION
)
class TestDataflowTemplateOperator(unittest.TestCase):
def setUp(self):
self.dataflow = DataflowTemplatedJobStartOperator(
task_id=TASK_ID,
template=TEMPLATE,
job_name=JOB_NAME,
parameters=PARAMETERS,
options=DEFAULT_OPTIONS_TEMPLATE,
dataflow_default_options={"EXTRA_OPTION": "TEST_A"},
poll_sleep=POLL_SLEEP,
location=TEST_LOCATION,
environment={"maxWorkers": 2},
)
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
def test_exec(self, dataflow_mock):
"""Test DataflowHook is created and the right args are passed to
start_template_workflow.
"""
start_template_hook = dataflow_mock.return_value.start_template_dataflow
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
expected_options = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
'tempLocation': 'gs://test/temp',
'zone': 'us-central1-f',
'EXTRA_OPTION': "TEST_A",
}
start_template_hook.assert_called_once_with(
job_name=JOB_NAME,
variables=expected_options,
parameters=PARAMETERS,
dataflow_template=TEMPLATE,
on_new_job_id_callback=mock.ANY,
project_id=None,
location=TEST_LOCATION,
environment={'maxWorkers': 2},
)
class TestDataflowStartFlexTemplateOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
def test_execute(self, mock_dataflow):
start_flex_template = DataflowStartFlexTemplateOperator(
task_id="start_flex_template_streaming_beam_sql",
body={"launchParameter": TEST_FLEX_PARAMETERS},
do_xcom_push=True,
project_id=TEST_PROJECT,
location=TEST_LOCATION,
)
start_flex_template.execute(mock.MagicMock())
mock_dataflow.return_value.start_flex_template.assert_called_once_with(
body={"launchParameter": TEST_FLEX_PARAMETERS},
location=TEST_LOCATION,
project_id=TEST_PROJECT,
on_new_job_id_callback=mock.ANY,
)
def test_on_kill(self):
start_flex_template = DataflowStartFlexTemplateOperator(
task_id="start_flex_template_streaming_beam_sql",
body={"launchParameter": TEST_FLEX_PARAMETERS},
do_xcom_push=True,
location=TEST_LOCATION,
project_id=TEST_PROJECT,
)
start_flex_template.hook = mock.MagicMock()
start_flex_template.job_id = JOB_ID
start_flex_template.on_kill()
start_flex_template.hook.cancel_job.assert_called_once_with(
job_id='test-dataflow-pipeline-id', project_id=TEST_PROJECT
)
class TestDataflowSqlOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
def test_execute(self, mock_hook):
start_sql = DataflowStartSqlJobOperator(
task_id="start_sql_query",
job_name=TEST_SQL_JOB_NAME,
query=TEST_SQL_QUERY,
options=deepcopy(TEST_SQL_OPTIONS),
location=TEST_LOCATION,
do_xcom_push=True,
)
start_sql.execute(mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id='google_cloud_default', delegate_to=None, drain_pipeline=False
)
mock_hook.return_value.start_sql_job.assert_called_once_with(
job_name=TEST_SQL_JOB_NAME,
query=TEST_SQL_QUERY,
options=TEST_SQL_OPTIONS,
location=TEST_LOCATION,
project_id=None,
on_new_job_id_callback=mock.ANY,
)
start_sql.job_id = TEST_SQL_JOB_ID
start_sql.on_kill()
mock_hook.return_value.cancel_job.assert_called_once_with(job_id='test-job-id', project_id=None)
| [
"[email protected]"
] | |
b843458b7624d7a008fa6052f1a9a98ce728076f | 0396bc649c5b2ddb21a6b629e3daf1501dafd13f | /evalml/pipelines/components/ensemble/stacked_ensemble_base.py | 025bc52dafc00ab673641bf89fa6d07c059915c1 | [
"BSD-3-Clause"
] | permissive | Sandy4321/evalml | 3324fe6501091c51b67b4a573214ad4c6103c5a5 | 32f9be24d9d8479cf1a4d7a261c17fde213c50d1 | refs/heads/main | 2023-02-09T04:38:53.077488 | 2021-01-01T03:59:18 | 2021-01-01T03:59:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,329 | py | from evalml.exceptions import EnsembleMissingPipelinesError
from evalml.model_family import ModelFamily
from evalml.pipelines.components import Estimator
from evalml.pipelines.components.utils import scikit_learn_wrapped_estimator
from evalml.utils import classproperty
_nonstackable_model_families = [ModelFamily.BASELINE, ModelFamily.NONE]
class StackedEnsembleBase(Estimator):
"""Stacked Ensemble Base Class."""
model_family = ModelFamily.ENSEMBLE
_stacking_estimator_class = None
_default_final_estimator = None
_default_cv = None
def __init__(self, input_pipelines=None, final_estimator=None, cv=None, n_jobs=None, random_state=0, **kwargs):
"""Stacked ensemble base class.
Arguments:
input_pipelines (list(PipelineBase or subclass obj)): List of pipeline instances to use as the base estimators.
This must not be None or an empty list or else EnsembleMissingPipelinesError will be raised.
final_estimator (Estimator or subclass): The estimator used to combine the base estimators.
cv (int, cross-validation generator or an iterable): Determines the cross-validation splitting strategy used to train final_estimator.
For int/None inputs, if the estimator is a classifier and y is either binary or multiclass, StratifiedKFold is used. In all other cases, KFold is used.
Possible inputs for cv are:
- None: 5-fold cross validation
- int: the number of folds in a (Stratified) KFold
- An scikit-learn cross-validation generator object
- An iterable yielding (train, test) splits
n_jobs (int or None): Non-negative integer describing level of parallelism used for pipelines.
None and 1 are equivalent. If set to -1, all CPUs are used. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Defaults to None.
- Note: there could be some multi-process errors thrown for values of `n_jobs != 1`. If this is the case, please use `n_jobs = 1`.
random_state (int, np.random.RandomState): seed for the random number generator
"""
if not input_pipelines:
raise EnsembleMissingPipelinesError("`input_pipelines` must not be None or an empty list.")
if [pipeline for pipeline in input_pipelines if pipeline.model_family in _nonstackable_model_families]:
raise ValueError("Pipelines with any of the following model families cannot be used as base pipelines: {}".format(_nonstackable_model_families))
parameters = {
"input_pipelines": input_pipelines,
"final_estimator": final_estimator,
"cv": cv,
"n_jobs": n_jobs
}
parameters.update(kwargs)
if len(set([pipeline.problem_type for pipeline in input_pipelines])) > 1:
raise ValueError("All pipelines must have the same problem type.")
cv = cv or self._default_cv(n_splits=3, random_state=random_state)
estimators = [scikit_learn_wrapped_estimator(pipeline) for pipeline in input_pipelines]
final_estimator = scikit_learn_wrapped_estimator(final_estimator or self._default_final_estimator())
sklearn_parameters = {
"estimators": [(f"({idx})", estimator) for idx, estimator in enumerate(estimators)],
"final_estimator": final_estimator,
"cv": cv,
"n_jobs": n_jobs
}
sklearn_parameters.update(kwargs)
super().__init__(parameters=parameters,
component_obj=self._stacking_estimator_class(**sklearn_parameters),
random_state=random_state)
@property
def feature_importance(self):
"""Not implemented for StackedEnsembleClassifier and StackedEnsembleRegressor"""
raise NotImplementedError("feature_importance is not implemented for StackedEnsembleClassifier and StackedEnsembleRegressor")
@classproperty
def default_parameters(cls):
"""Returns the default parameters for stacked ensemble classes.
Returns:
dict: default parameters for this component.
"""
return {
'final_estimator': None,
'cv': None,
'n_jobs': 1,
}
| [
"[email protected]"
] | |
435d64183cddf28cb10b05c638226a18cfe23383 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03042/s072464251.py | 0667e7e766e9808fb1edf62e69f58f1a05ba3c1c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | s = input()
def YYMM(s):
p = int(s[2:])
if 1 <= p <= 12:
return True
return False
def MMYY(s):
p = int(s[:2])
if 1 <= p <= 12:
return True
return False
if YYMM(s) and MMYY(s):
print('AMBIGUOUS')
elif YYMM(s):
print('YYMM')
elif MMYY(s):
print('MMYY')
else:
print('NA') | [
"[email protected]"
] | |
7915777eddf122fb1204b725cd49b39bf2fa5c7b | 45c0651d7785025f0e7a137d8abac0e66092a659 | /roles/lib_zabbix/library/zbx_user.py | c916fa96a61d1acba94daf38922e1d8ef0f4ede5 | [
"Apache-2.0"
] | permissive | pkdevbox/openshift-ansible | df3f0d75032b5dee4f962852e265437ba2656925 | 318ac6b9b65f42f032382114f35d3c9fa7f5610b | refs/heads/master | 2021-01-21T01:39:10.120698 | 2015-09-04T20:29:58 | 2015-09-04T20:29:58 | 41,973,441 | 1 | 0 | null | 2015-09-05T19:45:46 | 2015-09-05T19:45:46 | null | UTF-8 | Python | false | false | 6,779 | py | #!/usr/bin/env python
'''
ansible module for zabbix users
'''
# vim: expandtab:tabstop=4:shiftwidth=4
#
# Zabbix user ansible module
#
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is in place because each module looks similar to each other.
# These need duplicate code as their behavior is very similar
# but different for each zabbix class.
# pylint: disable=duplicate-code
# pylint: disable=import-error
from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
def exists(content, key='result'):
''' Check if key exists in content or the size of content[key] > 0
'''
if not content.has_key(key):
return False
if not content[key]:
return False
return True
def get_usergroups(zapi, usergroups):
''' Get usergroups
'''
ugroups = []
for ugr in usergroups:
content = zapi.get_content('usergroup',
'get',
{'search': {'name': ugr},
#'selectUsers': 'userid',
#'getRights': 'extend'
})
if content['result']:
ugroups.append({'usrgrpid': content['result'][0]['usrgrpid']})
return ugroups or None
def get_passwd(passwd):
'''Determine if password is set, if not, return 'zabbix'
'''
if passwd:
return passwd
return 'zabbix'
def get_usertype(user_type):
'''
Determine zabbix user account type
'''
if not user_type:
return None
utype = 1
if 'super' in user_type:
utype = 3
elif 'admin' in user_type or user_type == 'admin':
utype = 2
return utype
def main():
'''
ansible zabbix module for users
'''
##def user(self, name, state='present', params=None):
module = AnsibleModule(
argument_spec=dict(
zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
zbx_debug=dict(default=False, type='bool'),
login=dict(default=None, type='str'),
first_name=dict(default=None, type='str'),
last_name=dict(default=None, type='str'),
user_type=dict(default=None, type='str'),
password=dict(default=None, type='str'),
update_password=dict(default=False, type='bool'),
user_groups=dict(default=[], type='list'),
state=dict(default='present', type='str'),
),
#supports_check_mode=True
)
zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
module.params['zbx_user'],
module.params['zbx_password'],
module.params['zbx_debug']))
## before we can create a user media and users with media types we need media
zbx_class_name = 'user'
idname = "userid"
state = module.params['state']
content = zapi.get_content(zbx_class_name,
'get',
{'output': 'extend',
'search': {'alias': module.params['login']},
"selectUsrgrps": 'usergrpid',
})
if state == 'list':
module.exit_json(changed=False, results=content['result'], state="list")
if state == 'absent':
if not exists(content) or len(content['result']) == 0:
module.exit_json(changed=False, state="absent")
content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
module.exit_json(changed=True, results=content['result'], state="absent")
if state == 'present':
params = {'alias': module.params['login'],
'passwd': get_passwd(module.params['password']),
'usrgrps': get_usergroups(zapi, module.params['user_groups']),
'name': module.params['first_name'],
'surname': module.params['last_name'],
'type': get_usertype(module.params['user_type']),
}
# Remove any None valued params
_ = [params.pop(key, None) for key in params.keys() if params[key] is None]
if not exists(content):
# if we didn't find it, create it
content = zapi.get_content(zbx_class_name, 'create', params)
if content.has_key('Error'):
module.exit_json(failed=True, changed=False, results=content, state='present')
module.exit_json(changed=True, results=content['result'], state='present')
# already exists, we need to update it
# let's compare properties
differences = {}
# Update password
if not module.params['update_password']:
params.pop('passwd', None)
zab_results = content['result'][0]
for key, value in params.items():
if key == 'usrgrps':
# this must be done as a list of ordered dictionaries fails comparison
if not all([True for _ in zab_results[key][0] if _ in value[0]]):
differences[key] = value
elif zab_results[key] != value and zab_results[key] != str(value):
differences[key] = value
if not differences:
module.exit_json(changed=False, results=zab_results, state="present")
# We have differences and need to update
differences[idname] = zab_results[idname]
content = zapi.get_content(zbx_class_name, 'update', differences)
module.exit_json(changed=True, results=content['result'], state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| [
"[email protected]"
] | |
5d851d8cfe60de655c230608e5c5bb4c09078032 | e5a044708032b853f1cdf8906da63502716fd410 | /openapi_client/models/post_auth_transaction_all_of.py | 3108af8ded3b97784e38a869b08eb2ce2181a632 | [] | no_license | GBSEcom/Python | 4b93bab80476051fc99f379f018ac9fa109a8a6a | 5fa37dba8d0c3853686fdc726f863743376060c9 | refs/heads/master | 2021-12-04T12:55:29.605843 | 2021-11-19T22:01:03 | 2021-11-19T22:01:03 | 136,058,345 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,222 | py | # coding: utf-8
"""
Payment Gateway API Specification.
The documentation here is designed to provide all of the technical guidance required to consume and integrate with our APIs for payment processing. To learn more about our APIs please visit https://docs.firstdata.com/org/gateway. # noqa: E501
The version of the OpenAPI document: 21.5.0.20211029.001
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class PostAuthTransactionAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'transaction_amount': 'Amount',
'transaction_origin': 'TransactionOrigin',
'split_shipment': 'SplitShipment',
'soft_descriptor': 'SoftDescriptor'
}
attribute_map = {
'transaction_amount': 'transactionAmount',
'transaction_origin': 'transactionOrigin',
'split_shipment': 'splitShipment',
'soft_descriptor': 'softDescriptor'
}
def __init__(self, transaction_amount=None, transaction_origin=None, split_shipment=None, soft_descriptor=None): # noqa: E501
"""PostAuthTransactionAllOf - a model defined in OpenAPI""" # noqa: E501
self._transaction_amount = None
self._transaction_origin = None
self._split_shipment = None
self._soft_descriptor = None
self.discriminator = None
self.transaction_amount = transaction_amount
if transaction_origin is not None:
self.transaction_origin = transaction_origin
if split_shipment is not None:
self.split_shipment = split_shipment
if soft_descriptor is not None:
self.soft_descriptor = soft_descriptor
@property
def transaction_amount(self):
"""Gets the transaction_amount of this PostAuthTransactionAllOf. # noqa: E501
:return: The transaction_amount of this PostAuthTransactionAllOf. # noqa: E501
:rtype: Amount
"""
return self._transaction_amount
@transaction_amount.setter
def transaction_amount(self, transaction_amount):
"""Sets the transaction_amount of this PostAuthTransactionAllOf.
:param transaction_amount: The transaction_amount of this PostAuthTransactionAllOf. # noqa: E501
:type: Amount
"""
if transaction_amount is None:
raise ValueError("Invalid value for `transaction_amount`, must not be `None`") # noqa: E501
self._transaction_amount = transaction_amount
@property
def transaction_origin(self):
"""Gets the transaction_origin of this PostAuthTransactionAllOf. # noqa: E501
:return: The transaction_origin of this PostAuthTransactionAllOf. # noqa: E501
:rtype: TransactionOrigin
"""
return self._transaction_origin
@transaction_origin.setter
def transaction_origin(self, transaction_origin):
"""Sets the transaction_origin of this PostAuthTransactionAllOf.
:param transaction_origin: The transaction_origin of this PostAuthTransactionAllOf. # noqa: E501
:type: TransactionOrigin
"""
self._transaction_origin = transaction_origin
@property
def split_shipment(self):
"""Gets the split_shipment of this PostAuthTransactionAllOf. # noqa: E501
:return: The split_shipment of this PostAuthTransactionAllOf. # noqa: E501
:rtype: SplitShipment
"""
return self._split_shipment
@split_shipment.setter
def split_shipment(self, split_shipment):
"""Sets the split_shipment of this PostAuthTransactionAllOf.
:param split_shipment: The split_shipment of this PostAuthTransactionAllOf. # noqa: E501
:type: SplitShipment
"""
self._split_shipment = split_shipment
@property
def soft_descriptor(self):
"""Gets the soft_descriptor of this PostAuthTransactionAllOf. # noqa: E501
:return: The soft_descriptor of this PostAuthTransactionAllOf. # noqa: E501
:rtype: SoftDescriptor
"""
return self._soft_descriptor
@soft_descriptor.setter
def soft_descriptor(self, soft_descriptor):
"""Sets the soft_descriptor of this PostAuthTransactionAllOf.
:param soft_descriptor: The soft_descriptor of this PostAuthTransactionAllOf. # noqa: E501
:type: SoftDescriptor
"""
self._soft_descriptor = soft_descriptor
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PostAuthTransactionAllOf):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
002a0a8ed95336f133ab86237afdd69b34f56e73 | 1da61f69428c4318e6bb43b96b9a72e65d6b1a59 | /arche_papergirl/models/tests/test_newsletter.py | 5b8dc42452b1611b1eb3e6d89bd5e539e51fba31 | [] | no_license | ArcheProject/arche_papergirl | ea16e453af248ca1ab571297559d8ebd8b6770b5 | e69a4c3ddd0c7e0d27f45f354f4c96807509de1a | refs/heads/master | 2020-04-10T01:44:21.189511 | 2018-07-13T12:33:42 | 2018-07-13T12:33:42 | 68,215,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,813 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import TestCase
from pyramid import testing
from zope.interface.verify import verifyClass, verifyObject
from arche_papergirl.exceptions import AlreadyInQueueError
from arche_papergirl.interfaces import INewsletter
class NewsletterTests(TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
@property
def _cut(self):
from arche_papergirl.models.newsletter import Newsletter
return Newsletter
def test_verify_class(self):
self.failUnless(verifyClass(INewsletter, self._cut))
def test_verify_obj(self):
self.failUnless(verifyObject(INewsletter, self._cut()))
def test_add_queue(self):
obj = self._cut()
obj.add_queue('subscriber_uid', 'list_uid')
self.assertEqual(obj._queue[1], ('subscriber_uid', 'list_uid'))
self.assertEqual(obj._uid_to_status['subscriber_uid'][1][0:2], (1, 'list_uid'))
self.assertRaises(AlreadyInQueueError, obj.add_queue, 'subscriber_uid', 'list_uid')
def test_queue_len(self):
obj = self._cut()
self.assertEqual(obj.queue_len, 0)
obj.add_queue('subscriber_uid', 'list_uid')
self.assertEqual(obj.queue_len, 1)
def test_pop_next(self):
obj = self._cut()
obj.add_queue('subscriber_uid1', 'list_uid')
obj.add_queue('subscriber_uid2', 'list_uid')
obj.add_queue('subscriber_uid3', 'list_uid')
self.assertEqual(obj.pop_next(), ('subscriber_uid1', 'list_uid'))
self.assertEqual(obj.get_uid_status('subscriber_uid1')[0:2], (0, 'list_uid'))
def test_pop_next_empty(self):
obj = self._cut()
self.assertEqual(obj.pop_next(), (None, None, None))
| [
"[email protected]"
] | |
2a63aedc0143cf8e12b66793a11b1d887f1f6c7c | 458c7d8a560658fcc61a629bc6397bf515717b61 | /catkin_ws/build/realsense/realsense2_description/catkin_generated/pkg.installspace.context.pc.py | 023f1f42f52ebac06ec7089fefff2d696eced89d | [] | no_license | aboughatane/Tb3_with_OM | 52e6ee855e4b2b773289c4c9ea9684b08206aa24 | 8ee25ff9a6ce2ad770471baf1f51710e6848ebf0 | refs/heads/main | 2023-03-15T15:19:31.296028 | 2020-12-20T23:35:55 | 2020-12-20T23:35:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "realsense2_description"
PROJECT_SPACE_DIR = "/home/diakhaby/catkin_ws/install"
PROJECT_VERSION = "2.2.20"
| [
"[email protected]"
] | |
1fd8eaaf0397d32379f65f494be6bf3ed513ef53 | 828115da62a687e1e9ea96bd5072f8b148e873a3 | /Segmenter.py | a4368dcd77129a6ead937c22f4282ef74099b30a | [] | no_license | VitalyRomanov/scidoc | cafc86f0f83c1ebd7ce17c8420cb158ab0844b89 | 2029abfff4eee8b919cc9bca7251d2edc7fd005f | refs/heads/master | 2021-02-16T11:44:02.495918 | 2020-03-05T07:33:08 | 2020-03-05T07:33:08 | 245,001,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | from LanguageTools.nltk_wrapper import NltkWrapper
from nltk.classify.textcat import TextCat
class Segmenter:
def __init__(self):
self.tc = TextCat()
self.nlp_en = NltkWrapper("en")
self.nlp_ru = NltkWrapper("ru")
def __call__(self, full_text, segment_len=5, segment_overlap=2):
full_text = " ".join(full_text.split("\n"))
lang_guess = self.tc.guess_language(full_text[:200])
if lang_guess == "eng":
nlp = self.nlp_en
elif lang_guess == "rus":
nlp = self.nlp_ru
else:
nlp = None
if nlp is None:
return iter([])
sentences = nlp(full_text, tagger=False)
for ind in range(0, len(sentences) - segment_overlap, segment_len - segment_overlap):
segment_id = f"{ind}/{len(sentences)}_{segment_len}"
yield segment_id, sentences[ind:ind + segment_len]
| [
"[email protected]"
] | |
87d56f0c0931ae0db0ef2c5b9d39c0a2dd09901b | cb2e2d84d970894bb6d1b414e91551118fcfc209 | /src/hal_configurator/ui/gen/testingflow.py | 11071babd0b3f997183097c0394d1f6135f98d6c | [] | no_license | codechem/hal_automator | 0cb54f1d086e831208533f9b934806045700e1e8 | e9404f33ee34a9068293daff33434d2c80d23865 | refs/heads/master | 2021-01-13T13:58:58.552727 | 2016-06-21T17:26:34 | 2016-06-21T17:26:34 | 72,894,281 | 1 | 0 | null | 2016-11-05T00:01:32 | 2016-11-05T00:01:32 | null | UTF-8 | Python | false | false | 679 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/halicea/projects/hal_automator/utils/qtUi/testingflow.ui'
#
# Created: Sun Nov 1 19:09:21 2015
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(400, 300)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Form", None, QtGui.QApplication.UnicodeUTF8))
| [
"[email protected]"
] | |
9d4663d936f5bd08c8a3aee4b917a9858f6eace8 | 170864b6ec66be48138f231fe8ac3381481b8c9d | /python/BOJ_2563.py | ece6f7af4088ada9440550e4b7600777dadef860 | [] | no_license | hyesungoh/AA_Algorithm | 5da3d8312d035d324dfaa31eef73f01a238231f3 | d68f52eaa29cfc4656a8b5623359166779ded06e | refs/heads/master | 2023-06-09T14:49:01.402456 | 2021-06-28T10:10:09 | 2021-06-28T10:10:09 | 272,701,231 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | import sys
input = sys.stdin.readline
n = int(input())
paper = [[0 for _ in range(100)] for _ in range(100)]
ans = 0
for _ in range(n):
y, x = map(int, input().split())
for i in range(y, y+10):
for j in range(x, x+10):
if not paper[i][j]:
paper[i][j] = 1
ans += 1
print(ans)
| [
"[email protected]"
] | |
03097fb708d399c95e2aaad8f59df7478613eea5 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_258/ch81_2019_06_05_12_38_09_879499.py | cab21705c88ed0161cb1240956590b1c9a7fc3c7 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | def interseccao_valores(dic1, dic2):
lista_dic1 = []
lista_dic2 = []
for e in dic1.values():
lista_dic1.append(e)
for k in dic2.values():
lista_dic2.append(k)
lista_interseccao = []
for m in range(0, len(lista_dic1)):
for n in range(0, len(lista_dic2)):
if lista_dic1[m] == lista_dic2[n]:
lista_interseccao.append(lista_dic1[m])
return lista_interseccao | [
"[email protected]"
] | |
b84e96d2e5c7ab1fd2060b7a26ec821333dca8bc | 767c3ca94095db80c7c23577a93f85cf27dd0579 | /testcases/test_register.py | e83ce01d24f95e007513e92b9fb8c763ef8cdabe | [] | no_license | zhang0123456789/interface_test | 2e95c532c0fc5662f8ecba6e74c905b8374c2034 | d77b4e3c6deace6ae0a87893d62a5fa40cdca462 | refs/heads/master | 2022-12-12T13:45:24.194832 | 2019-01-05T01:31:29 | 2019-01-05T01:31:29 | 164,169,447 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,767 | py | #!/usr/bin/env python
# -*- coding:utf-8-*-
#@author:蜜蜜
#@file: test_register.py
#@time: 2018/12/31
#@email:[email protected]
import json
import unittest
from ddt import ddt, data
from common import contants
from common.do_execl import DoExecl
from common.mysql_util import MysqlUtil
from common.request import Request
from common.logger2 import MyLog
do_excel = DoExecl(contants.cases_file)
cases = do_excel.get_cases('register')
@ddt
class TestRegister(unittest.TestCase):
@classmethod
def setUpClass(cls):
global mysql
mysql = MysqlUtil()
sql = 'select mobilephone from future.member where ' \
' mobilephone != ""order by mobilephone desc limit 1 '
global max_phone
max_phone = mysql.fetch_one(sql)['mobilephone']
# def setUp(self):
# # 查询最大手机号码
# self.mysql = MysqlUtil() #
# self.sql = 'select mobilephone from future.member where ' \
# ' mobilephone != "" order by mobilephone desc limit 1 '
#
# self.max_phone = self.mysql.fetch_one(self.sql)['mobilephone']
@data(*cases)
def test_register(self, case):
data = json.loads(case.data) # 将字符串序列化为字典
if data['mobilephone'] == '${register}': # 判断是否是需要进行参数化
data['mobilephone'] = int(max_phone) + 1 # 取到数据库里面最大的手机号码进行加1
MyLog.info('测试用例名称:{0}'.format(case.title))
MyLog.info('测试用例数据:{0}'.format(case.data))
MyLog.error('测试用例数据error')
resp = Request(method=case.method, url=case.url, data=data) # 通过封装的Request类来完成接口的调用
MyLog.debug('status_code:{0}'.format(resp.get_status_code()))
resp_dict = resp.get_json() # 获取请求响应,字典
self.assertEqual(case.expected, resp.get_text())
if resp_dict['code'] == 20110: # 注册成功的数据校验,判断数据库有这条数据
sql = 'select * from future.member where mobilephone = "{0}"'.format(max_phone)
expected = int(self.max_phone) + 1
member = self.mysql.fetch_one(sql)
if member is not None: # 正常注册成功就不应该返回None
self.assertEqual(expected,member['mobilephone'])
else:# 返回None则代表注册成功之后但是数据库里面没有插入数据
MyLog.error('注册失败')
raise AssertionError
# else:# 注册失败的数据校验,判断数据库没有这条数据,自己写
# def tearDown(self):
# self.mysql.close()
@classmethod
def tearDownClass(cls):
mysql.close()
| [
"[email protected]"
] | |
eb06ee00f22ecf3f7526bbf89d4810daa1b97b13 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/delegatednetwork/get_delegated_subnet_service_details.py | 1cea4ab12edb53966c5bbea1377cd526ee76b4bb | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 6,281 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetDelegatedSubnetServiceDetailsResult',
'AwaitableGetDelegatedSubnetServiceDetailsResult',
'get_delegated_subnet_service_details',
]
@pulumi.output_type
class GetDelegatedSubnetServiceDetailsResult:
"""
Represents an instance of a orchestrator.
"""
def __init__(__self__, controller_details=None, id=None, location=None, name=None, provisioning_state=None, resource_guid=None, subnet_details=None, tags=None, type=None):
if controller_details and not isinstance(controller_details, dict):
raise TypeError("Expected argument 'controller_details' to be a dict")
pulumi.set(__self__, "controller_details", controller_details)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if subnet_details and not isinstance(subnet_details, dict):
raise TypeError("Expected argument 'subnet_details' to be a dict")
pulumi.set(__self__, "subnet_details", subnet_details)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="controllerDetails")
def controller_details(self) -> Optional['outputs.ControllerDetailsResponse']:
"""
Properties of the controller.
"""
return pulumi.get(self, "controller_details")
@property
@pulumi.getter
def id(self) -> str:
"""
An identifier that represents the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The current state of dnc delegated subnet resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
Resource guid.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="subnetDetails")
def subnet_details(self) -> Optional['outputs.SubnetDetailsResponse']:
"""
subnet details
"""
return pulumi.get(self, "subnet_details")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of resource.
"""
return pulumi.get(self, "type")
class AwaitableGetDelegatedSubnetServiceDetailsResult(GetDelegatedSubnetServiceDetailsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDelegatedSubnetServiceDetailsResult(
controller_details=self.controller_details,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
subnet_details=self.subnet_details,
tags=self.tags,
type=self.type)
def get_delegated_subnet_service_details(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDelegatedSubnetServiceDetailsResult:
"""
Represents an instance of a orchestrator.
API Version: 2021-03-15.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the resource. It must be a minimum of 3 characters, and a maximum of 63.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:delegatednetwork:getDelegatedSubnetServiceDetails', __args__, opts=opts, typ=GetDelegatedSubnetServiceDetailsResult).value
return AwaitableGetDelegatedSubnetServiceDetailsResult(
controller_details=__ret__.controller_details,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
subnet_details=__ret__.subnet_details,
tags=__ret__.tags,
type=__ret__.type)
| [
"[email protected]"
] | |
1d205dda89d185af991d054d0eca492e10d11142 | 5c94e032b2d43ac347f6383d0a8f0c03ec3a0485 | /Alesis_V/__init__.py | a0c19a33939f7a8259468648ba791b0f6717a22d | [] | no_license | Elton47/Ableton-MRS-10.1.13 | 997f99a51157bd2a2bd1d2dc303e76b45b1eb93d | 54bb64ba5e6be52dd6b9f87678ee3462cc224c8a | refs/heads/master | 2022-07-04T01:35:27.447979 | 2020-05-14T19:02:09 | 2020-05-14T19:02:09 | 263,990,585 | 0 | 0 | null | 2020-05-14T18:12:04 | 2020-05-14T18:12:03 | null | UTF-8 | Python | false | false | 966 | py | # uncompyle6 version 3.6.7
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.17 (default, Dec 23 2019, 21:25:33)
# [GCC 4.2.1 Compatible Apple LLVM 11.0.0 (clang-1100.0.33.16)]
# Embedded file name: /Users/versonator/Jenkins/live/output/Live/mac_64_static/Release/python-bundle/MIDI Remote Scripts/Alesis_V/__init__.py
# Compiled at: 2020-01-09 15:21:34
from __future__ import absolute_import, print_function, unicode_literals
from .Alesis_V import Alesis_V
from _Framework.Capabilities import controller_id, inport, outport, CONTROLLER_ID_KEY, PORTS_KEY, NOTES_CC, SCRIPT, REMOTE
def get_capabilities():
return {CONTROLLER_ID_KEY: controller_id(vendor_id=5042, product_ids=[
134, 135, 136], model_name=[
'V25', 'V49', 'V61']),
PORTS_KEY: [
inport(props=[NOTES_CC, SCRIPT, REMOTE]), outport(props=[SCRIPT])]}
def create_instance(c_instance):
return Alesis_V(c_instance) | [
"[email protected]"
] | |
d3e388b971965bb7667505ef54d6f50b4e5d98c5 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/ton_425/sdB_ton_425_coadd.py | f3713f8fcfe32e4dcaaa4e8452709e8086361d7c | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[142.527417,31.716667], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_ton_425/sdB_ton_425_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_ton_425/sdB_ton_425_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
9b12d02cc2a2150cdd9759b246560d369ebebc9a | 8fc999f5262b5a2dadc830f1cc345f51b6dde862 | /samples/conceptual_samples/exceptional_handling/exceptional_handling.py | c0f88a8c6cdb9961b2c83bc310c86e153c0ed4de | [] | no_license | pandiyan07/python_2.x_tutorial_for_beginners_and_intermediate | 5ca5cb5fcfe7ce08d109fb32cdf8138176ac357a | a4c14deaa518fea1f8e95c2cc98783c8ca3bd4ae | refs/heads/master | 2022-04-09T20:33:28.527653 | 2020-03-27T06:35:50 | 2020-03-27T06:35:50 | 250,226,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | # this is a sample python script program which is created to demonstrate the exceptional handling concept in the python
def get_number():
"the function returns a float number"
number=float(input("enter a float number:\n"))
return number
exit(0)
while True:
try:
print get_number()
break
except:
print"\nYou have entered a wrong value."
print"\nPlease enter a value that is integer or a float value"
else:
print"there is a error over here, better be carefully about executing it..!!"
# this is the end of the program file. happy coding..!! | [
"[email protected]"
] | |
0dd328e28c261b6378ae5bb07c13860fccdbabd7 | b6a84594f8c29d968014faaddd49abeb7537a5fc | /python/443.string-compression.py | 3159cf2b852f7a4b099661f5428f9a01ceb3108e | [] | no_license | nickyfoto/lc | 8a6af3df114e693e265d0ede03f4d4e1283e010e | 3633b4df3e24968057c7d684689b931c5a8032d3 | refs/heads/master | 2020-09-16T19:23:07.765917 | 2020-06-07T17:18:06 | 2020-06-07T17:18:06 | 223,866,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,737 | py | #
# @lc app=leetcode id=443 lang=python3
#
# [443] String Compression
#
# https://leetcode.com/problems/string-compression/description/
#
# algorithms
# Easy (37.79%)
# Total Accepted: 56.7K
# Total Submissions: 149.2K
# Testcase Example: '["a","a","b","b","c","c","c"]'
#
# Given an array of characters, compress it in-place.
#
# The length after compression must always be smaller than or equal to the
# original array.
#
# Every element of the array should be a character (not int) of length 1.
#
# After you are done modifying the input array in-place, return the new length
# of the array.
#
#
# Follow up:
# Could you solve it using only O(1) extra space?
#
#
# Example 1:
#
#
# Input:
# ["a","a","b","b","c","c","c"]
#
# Output:
# Return 6, and the first 6 characters of the input array should be:
# ["a","2","b","2","c","3"]
#
# Explanation:
# "aa" is replaced by "a2". "bb" is replaced by "b2". "ccc" is replaced by
# "c3".
#
#
#
#
# Example 2:
#
#
# Input:
# ["a"]
#
# Output:
# Return 1, and the first 1 characters of the input array should be: ["a"]
#
# Explanation:
# Nothing is replaced.
#
#
#
#
# Example 3:
#
#
# Input:
# ["a","b","b","b","b","b","b","b","b","b","b","b","b"]
#
# Output:
# Return 4, and the first 4 characters of the input array should be:
# ["a","b","1","2"].
#
# Explanation:
# Since the character "a" does not repeat, it is not compressed. "bbbbbbbbbbbb"
# is replaced by "b12".
# Notice each digit has it's own entry in the array.
#
#
#
#
# Note:
#
#
# All characters have an ASCII value in [35, 126].
# 1 <= len(chars) <= 1000.
#
#
#
class Solution:
# def compress(self, chars: List[str]) -> int:
def compress(self, chars):
# n = len(chars)
i = 0
current = chars[i]
# res = []
while i < len(chars):
count = 1
while i < len(chars) - 1 and chars[i+1] == current:
count += 1
# i += 1
chars.pop(i+1)
if count > 1:
l = list(str(count))
while l:
chars.insert(i+1, l.pop(0))
i += 1
# res.extend([current, str(count)])
# else:
# res.append(current)
# print(current, count, 'i=', i, chars)
if i < len(chars) - 1:
current = chars[i+1]
# count = 1
i += 1
# chars = list("".join(res))
# print(chars)
return len(chars)
# s = Solution()
# chars = ["a","a","b","b","c","c","c"]
# print(s.compress(chars))
# chars = ["a","b","b","b","b","b","b","b","b","b","b","b","b"]
# print(s.compress(chars))
# chars = ['a']
# print(s.compress(chars))
| [
"[email protected]"
] | |
9aeca146b15c38122ca0078e969386954136da3f | 2cb6294bc2a92b082edb34110937902bf5227303 | /6/CalThreeKingdomV1.py | b218d853adc19d8edb2ecf544703849e77f8dbe7 | [] | no_license | arnoqlk/icourse163-Python | f1e08a43a833f278c64fa79d57e0b6a261895b0b | 2766e500151fc7990617a9e3f9df3af5e259f960 | refs/heads/master | 2020-04-15T22:45:38.695397 | 2019-01-11T03:13:28 | 2019-01-11T03:13:28 | 165,085,541 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | # 三国演义人物出场统计排序
import jieba
txt = open(r"D:\PyCode\6\threekingdoms.txt", "r", encoding="utf-8").read()
# jieba库错误分析为人名的词语
excludes = {"将军","却说","荆州","二人","不可","不能","如此"}
words = jieba.lcut(txt)
counts = {}
for word in words:
if len(word) == 1:
continue
elif word == "诸葛亮" or word == "孔明曰":
rword = "孔明"
elif word == "关公" or word == "云长":
rword = "关羽"
elif word == "玄德" or word == "玄德曰":
rword = "刘备"
elif word == "孟德" or word == "丞相":
rword = "曹操"
else:
rword = word
counts[rword] = counts.get(rword, 0) + 1
for word in excludes:
del counts[word]
items = list(counts.items())
items.sort(key=lambda x:x[1], reverse=True)
for i in range(10):
word, count = items[i]
print("{0:<10}{1:>5}".format(word, count))
| [
"[email protected]"
] | |
24763cae17d6a3c622a7b72ed61c651427d049f0 | 37433c8f7ec4ff9fded3c7bcc9403e2293436552 | /blog/__init__.py | 769f9fefb0af3e91d49a5562259c1b41b59d6e38 | [] | no_license | FreeGodCode/TYCarry_Blog | 4420d896e735789ac9104568e7bf59a85b796373 | 9be47be8ff1e33980f237227786bc9d472155114 | refs/heads/master | 2023-03-22T09:36:35.476398 | 2021-03-17T07:29:44 | 2021-03-17T07:29:44 | 314,501,947 | 1 | 0 | null | 2021-03-17T07:29:45 | 2020-11-20T09:10:48 | Python | UTF-8 | Python | false | false | 60 | py | # import pymysql as pymysql
#
# pymysql.install_as_MySQLdb() | [
"[email protected]"
] | |
bdd48947c531b535b04e28f94a17564d959c22fe | fa2ffc5487bef8240a1a5c7cfb893c234df21ee0 | /modelformproject/modelformproject/settings.py | c9a0a0c311cb15f9ef257ce997acb17937e0fc42 | [] | no_license | sandipdeshmukh77/django-practice-projects | cfd4d8f29aa74832ed3dc5501a651cba3f201251 | 78f9bd9f0fac4aaeecce4a94e88c6880e004b873 | refs/heads/main | 2023-02-11T05:59:28.029867 | 2020-12-29T22:12:52 | 2020-12-29T22:12:52 | 325,446,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,252 | py | """
Django settings for modelformproject project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
TEMPLATES_DIR=os.path.join(BASE_DIR,'templates')
STATIC_DIR=os.path.join(BASE_DIR,'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e4@wu))+2^3^8xpw^)dag3fsx*jwv)7bcq$+5pyoev(tp*kto!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'testapp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'modelformproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'modelformproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=[STATIC_DIR,]
| [
"[email protected]"
] | |
b53c0419d05dd7cd3d70cc10ab5ff7381ba63d1d | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2007/applications/network/wengophone/actions.py | ce51c4bbb7335818be785452d31ef95f3c286ab3 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright © 2006 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import scons
from pisi.actionsapi import get
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
def build():
scons.make("mode=release-symbols \
enable-shared-portaudio=no enable-shared-webcam=no \
enable-shared-wengocurl=no enable-shared-phapi=no \
softphone-runtime softphone")
def install():
scons.install("prefix=%s/usr mode=release-symbols softphone-install" % get.installDIR())
pisitools.dosed("%s/usr/bin/wengophone" % get.installDIR(), get.installDIR(), "")
shelltools.chmod("%s/usr/bin/wengophone" % get.installDIR())
pisitools.insinto("/usr/share/pixmaps", "wengophone.png")
pisitools.insinto("/usr/share/applications", "wengophone.desktop")
pisitools.dodoc("COPYING", "TODO", "README*")
| [
"[email protected]"
] | |
d42b0dfc0e08ccb4a075e3b8c1c12a1368c26efc | b149a744da7b512d9ec2bfc3c0d855638d23d7fb | /docs/conf.py | d24946fe260123e404a39a2deb04ff9c0a4c97b0 | [
"Apache-2.0"
] | permissive | codeprimate123/imaps | 250c8f7c6b71fd1725c676a70b2f3b171a1e75e2 | 241bcd586013c43db8aa4dcb2d42058ac9d142f8 | refs/heads/master | 2020-09-01T13:58:44.124344 | 2019-12-09T16:48:05 | 2019-12-09T16:48:05 | 218,973,850 | 0 | 0 | Apache-2.0 | 2019-11-01T11:36:12 | 2019-11-01T11:36:12 | null | UTF-8 | Python | false | false | 1,890 | py | import imp
import os
import shlex
import sys
import sphinx_rtd_theme
base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
# Get package metadata from 'imaps/__about__.py' file
about = {}
with open(os.path.join(base_dir, 'imaps', '__about__.py')) as f:
exec(f.read(), about)
# -- General configuration ------------------------------------------------
# The extension modules to enable.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx_rtd_theme',
]
# The suffix(es) of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = about['__summary__']
version = about['__version__']
release = version
author = about['__author__']
copyright = about['__copyright__']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages.
html_theme = 'sphinx_rtd_theme'
# Output file base name for HTML help builder.
htmlhelp_basename = 'imapsdoc'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| [
"[email protected]"
] | |
a7f4b5315497455f122da51f24d9c8695537822c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02722/s256113021.py | df12675f2b68c6a34a4ddfe3d05d28fbbdf84c3c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,445 | py | import sys, math
from itertools import permutations, combinations
from collections import defaultdict, Counter, deque
from math import factorial#, gcd
from bisect import bisect_left #bisect_left(list, value)
sys.setrecursionlimit(10**7)
enu = enumerate
MOD = 10**9+7
def input(): return sys.stdin.readline()[:-1]
def pri(x): print('\n'.join(map(str, x)))
def prime_decomposition(n):
i = 2
table = []
while i*i <= n:
while n%i == 0:
n //= i
table.append(i)
i += 1
if n > 1:
table.append(n)
return table
def prime_decomposition2(n):
i = 2
table = defaultdict(int)
while i*i <= n:
while n%i == 0:
n //= i
table[i] += 1
i += 1
if n > 1:
table[n] += 1
return table
def make_divisor(n):
divisors = []
for i in range(1, int(n**0.5)+1):
if n%i == 0:
divisors.append(i)
if i != n//i:
divisors.append(n//i)
return divisors
N = int(input())
list_pd1 = make_divisor(N)
list_pd1.sort()
dict_pd2 = prime_decomposition2(N-1)
#print(N, ':', list_pd1)
#print(N-1, ':', dict_pd2)
cnt = 1
# -1 nohou
for val in dict_pd2.values():
cnt *= (val+1)
cnt -= 1
#print(cnt)
for k in list_pd1[1:]:
#print('k:', k)
sN = N
while sN >= k:
if sN%k==0:
sN //= k
else:
sN %= k
if sN == 1:
cnt += 1
print(cnt)
| [
"[email protected]"
] | |
f968c6c408331121b36ed5279da8245194e2aa68 | 9a3674c63ecc8a8e11f69588b8878ea4643c93ca | /backend/chat_time_21849/wsgi.py | 5cef89a70bc02457b55f9cf177243fe5a8711c2b | [] | no_license | crowdbotics-apps/chat-time-21849 | 3bf6e7e984b5b76ca7f7e27f0975b80f10e81bc1 | a581a39cde51273163b54f5db3d91a76ac76a421 | refs/heads/master | 2023-01-01T17:07:02.611073 | 2020-10-22T23:50:54 | 2020-10-22T23:50:54 | 306,481,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
WSGI config for chat_time_21849 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "chat_time_21849.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
cda4b8d0aa3dedeaa0fafc9a73d659b37f98b784 | ca4e9da419a3fb3a59682dbcb668ac1c08c20dc7 | /hw/ip/otbn/util/yaml_to_doc.py | 757ab4c6cb1e9e77a115efebe8d7c943f364bfdd | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ckdur/opentitan | fdf0888605e0d0e08e969c383ea19775f6271afd | e26a00ba788992c16ac362b76a857a48d39b24bc | refs/heads/master | 2022-12-11T20:48:21.446346 | 2020-09-01T23:11:45 | 2020-09-03T02:07:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,729 | py | #!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
'''Generate Markdown documentation for the instructions in insns.yml'''
import argparse
import sys
from typing import Dict, List
from shared.bool_literal import BoolLiteral
from shared.encoding import Encoding
from shared.insn_yaml import Insn, InsnsFile, load_file
from shared.operand import Operand
def render_operand_row(operand: Operand) -> str:
'''Generate the single row of a markdown table for an operand'''
# This is in <tr><td> form, but we want to embed arbitrary markup (and
# don't want to have to faff around with < encodings. So we have to
# include a blank line above and below. This makes (at least) Github
# flavoured markdown switch back to "markdown mode" for the contents.
parts = []
parts.append('<tr><td>\n\n')
parts.append('`<{}>`'.format(operand.name))
parts.append('\n\n</td><td>')
# The "description" cell contains any documentation supplied in the file,
# and then any extra documentation that's implied by the type of the
# operand.
if operand.doc is not None:
parts.append('\n\n')
parts.append(operand.doc)
if operand.op_type is not None:
ot_doc = operand.op_type.markdown_doc()
if ot_doc is not None:
parts.append('\n\n')
parts.append(ot_doc)
parts.append('\n\n</td></tr>')
return ''.join(parts)
def render_operand_table(insn: Insn) -> str:
'''Generate the operand table for an instruction'''
# We have to generate this in <tr><td> form because we want to put
# block-level elements into the table cells (and markdown tables only
# support inline elements).
parts = []
parts.append('<table><thead>'
'<tr><th>Assembly symbol</th><th>Description</th></tr>'
'</thead>'
'<tbody>')
for operand in insn.operands:
parts.append(render_operand_row(operand))
parts.append('</tbody></table>\n\n')
return ''.join(parts)
def render_encoding(mnemonic: str,
name_to_operand: Dict[str, Operand],
encoding: Encoding) -> str:
'''Generate a table displaying an instruction encoding'''
parts = []
parts.append('<table style="font-size: 75%">')
parts.append('<tr>')
parts.append('<td></td>')
for bit in range(31, -1, -1):
parts.append('<td>{}</td>'.format(bit))
parts.append('</tr>')
# Build dictionary of bit ranges, keyed by the msb and with value a pair
# (width, desc) where width is the width of the range in bits and desc is a
# string describing what is stored in the range.
by_msb = {}
for field_name, field in encoding.fields.items():
scheme_field = field.scheme_field
# If this field is a literal value, explode it into single bits. To do
# so, we walk the ranges and match up with ranges in the value.
if isinstance(field.value, BoolLiteral):
assert field.value.width > 0
assert field.value.width == scheme_field.bits.width
bits_seen = 0
for msb, lsb in scheme_field.bits.ranges:
val_msb = scheme_field.bits.width - 1 - bits_seen
val_lsb = val_msb - msb + lsb
bits_seen += msb - lsb + 1
for idx in range(0, msb - lsb + 1):
desc = field.value.char_for_bit(val_lsb + idx)
by_msb[lsb + idx] = (1, '' if desc == 'x' else desc)
continue
# Otherwise this field's value is an operand name
assert isinstance(field.value, str)
operand_name = field.value
# Figure out whether there's any shifting going on.
shift = name_to_operand[operand_name].op_type.get_shift()
# If there is only one range (and no shifting), that's easy.
if len(scheme_field.bits.ranges) == 1 and shift == 0:
msb, lsb = scheme_field.bits.ranges[0]
by_msb[msb] = (msb - lsb + 1, operand_name)
continue
# Otherwise, we have to split up the operand into things like "foo[8:5]"
bits_seen = 0
for msb, lsb in scheme_field.bits.ranges:
val_msb = shift + scheme_field.bits.width - 1 - bits_seen
val_lsb = val_msb - msb + lsb
bits_seen += msb - lsb + 1
if msb == lsb:
desc = '{}[{}]'.format(operand_name, val_msb)
else:
desc = '{}[{}:{}]'.format(operand_name, val_msb, val_lsb)
by_msb[msb] = (msb - lsb + 1, desc)
parts.append('<tr>')
parts.append('<td>{}</td>'.format(mnemonic.upper()))
# Now run down the ranges in descending order of msb to get the table cells
next_bit = 31
for msb in sorted(by_msb.keys(), reverse=True):
# Sanity check to make sure we have a dense table
assert msb == next_bit
width, desc = by_msb[msb]
next_bit = msb - width
parts.append('<td colspan="{}">{}</td>'.format(width, desc))
assert next_bit == -1
parts.append('</tr>')
parts.append('</table>\n\n')
return ''.join(parts)
def render_literal_pseudo_op(rewrite: List[str]) -> str:
'''Generate documentation with expansion of a pseudo op'''
parts = []
parts.append('This instruction is a pseudo-operation and expands to the '
'following instruction sequence:\n```\n')
for line in rewrite:
parts.append(line)
parts.append('\n')
parts.append('```\n\n')
return ''.join(parts)
def render_insn(insn: Insn, heading_level: int) -> str:
'''Generate the documentation for an instruction
heading_level is the current Markdown heading level. It should be greater
than zero. For example, if it is 3, then the instruction will be introduced
with "### <insn_name>".
'''
assert heading_level > 0
parts = []
# Heading, based on mnemonic (upper-cased)
parts.append('{} {}\n'.format('#' * heading_level,
insn.mnemonic.upper()))
# If there's a note, render it as a callout
if insn.note is not None:
parts.append('<div class="bd-callout bd-callout-warning">'
'<h5>Note</h5>\n\n')
parts.append(insn.note)
parts.append('\n\n</div>\n\n')
# Optional synopsis: some bold-face text expanding the mnemonic to
# something more understandable.
if insn.synopsis is not None:
parts.append('**{}.**\n'.format(insn.synopsis))
# Optional documentation (using existing markdown formatting). Add a blank
# line afterwards to separate from the syntax and operand table.
if insn.doc is not None:
parts.append(insn.doc + '\n\n')
# Syntax example: either given explicitly or figured out from operands
parts.append("```\n")
parts.append(insn.mnemonic.upper() + ('' if insn.glued_ops else ' '))
parts.append(insn.syntax.render_doc())
parts.append("\n```\n\n")
# If this came from the RV32I instruction set, say so.
if insn.rv32i:
parts.append('This instruction is defined in the RV32I instruction set.\n\n')
# Show any trailing documentation (stuff that should come after the syntax
# example but before the operand table).
if insn.trailing_doc is not None:
parts.append('\n')
parts.append(insn.trailing_doc)
parts.append('\n\n')
# Show the operand table if at least one operand has an associated
# description.
if any(op.doc is not None for op in insn.operands):
parts.append(render_operand_table(insn))
# Show encoding if we have one
if insn.encoding is not None:
parts.append(render_encoding(insn.mnemonic,
insn.name_to_operand,
insn.encoding))
# If this is a pseudo-op with a literal translation, show it
if insn.literal_pseudo_op is not None:
parts.append(render_literal_pseudo_op(insn.literal_pseudo_op))
# Show decode pseudo-code if given
if insn.decode is not None:
parts.append('{} Decode\n\n'
'```python3\n'
'{}\n'
'```\n\n'
.format('#' * (heading_level + 1),
insn.decode))
# Show operation pseudo-code if given
if insn.operation is not None:
parts.append('{} Operation\n\n'
'```python3\n'
'{}\n'
'```\n\n'
.format('#' * (heading_level + 1),
insn.operation))
return ''.join(parts)
def render_insns(insns: InsnsFile, heading_level: int) -> str:
'''Render documentation for all instructions'''
parts = []
for group, group_insns in insns.grouped_insns():
parts.append('{} {}\n\n'.format('#' * heading_level, group.title))
parts.append(group.doc)
parts.append('\n\n')
if not group_insns:
parts.append('No instructions in group.\n\n')
continue
for insn in group_insns:
parts.append(render_insn(insn, heading_level + 1))
return ''.join(parts)
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('yaml_file')
args = parser.parse_args()
try:
insns = load_file(args.yaml_file)
except RuntimeError as err:
sys.stderr.write('{}\n'.format(err))
return 1
print(render_insns(insns, 2))
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
] | |
d91558179efd30f1c3e5e855ef97de00d8721ad3 | 58cfad962e57b935e7782bb214a2008d689751d6 | /xero_python/payrolluk/models/employee_leave_type_object.py | 352b44d1e9d4189a25aaae9039a94a68a273e287 | [
"MIT"
] | permissive | XeroAPI/xero-python | ce43c060c216a42efd5f47159987468deb0e4622 | 07efa3bfc87a3bd08ba217dd2b642f6a3515ddff | refs/heads/master | 2023-07-21T04:01:27.461727 | 2023-07-11T02:35:44 | 2023-07-11T02:35:44 | 240,158,613 | 109 | 42 | MIT | 2023-07-11T02:35:45 | 2020-02-13T02:17:05 | Python | UTF-8 | Python | false | false | 3,178 | py | # coding: utf-8
"""
Xero Payroll UK
This is the Xero Payroll API for orgs in the UK region. # noqa: E501
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class EmployeeLeaveTypeObject(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"pagination": "Pagination",
"problem": "Problem",
"leave_type": "EmployeeLeaveType",
}
attribute_map = {
"pagination": "pagination",
"problem": "problem",
"leave_type": "leaveType",
}
def __init__(self, pagination=None, problem=None, leave_type=None): # noqa: E501
"""EmployeeLeaveTypeObject - a model defined in OpenAPI""" # noqa: E501
self._pagination = None
self._problem = None
self._leave_type = None
self.discriminator = None
if pagination is not None:
self.pagination = pagination
if problem is not None:
self.problem = problem
if leave_type is not None:
self.leave_type = leave_type
@property
def pagination(self):
"""Gets the pagination of this EmployeeLeaveTypeObject. # noqa: E501
:return: The pagination of this EmployeeLeaveTypeObject. # noqa: E501
:rtype: Pagination
"""
return self._pagination
@pagination.setter
def pagination(self, pagination):
"""Sets the pagination of this EmployeeLeaveTypeObject.
:param pagination: The pagination of this EmployeeLeaveTypeObject. # noqa: E501
:type: Pagination
"""
self._pagination = pagination
@property
def problem(self):
"""Gets the problem of this EmployeeLeaveTypeObject. # noqa: E501
:return: The problem of this EmployeeLeaveTypeObject. # noqa: E501
:rtype: Problem
"""
return self._problem
@problem.setter
def problem(self, problem):
"""Sets the problem of this EmployeeLeaveTypeObject.
:param problem: The problem of this EmployeeLeaveTypeObject. # noqa: E501
:type: Problem
"""
self._problem = problem
@property
def leave_type(self):
"""Gets the leave_type of this EmployeeLeaveTypeObject. # noqa: E501
:return: The leave_type of this EmployeeLeaveTypeObject. # noqa: E501
:rtype: EmployeeLeaveType
"""
return self._leave_type
@leave_type.setter
def leave_type(self, leave_type):
"""Sets the leave_type of this EmployeeLeaveTypeObject.
:param leave_type: The leave_type of this EmployeeLeaveTypeObject. # noqa: E501
:type: EmployeeLeaveType
"""
self._leave_type = leave_type
| [
"[email protected]"
] | |
0ccc0e47066590b574b24615b6b772781536b4e1 | bfdde1d758e9b366f0eee27599e56322340e93e0 | /version1/feature4StructuralVAr/f4StrucVarTesting/Bayesian_Classifier.py | 3c1caaed13808eaf1002f0a67a910966d26125de | [] | no_license | A-Infinite/Sarcasm-Detection | cb22db5c38705a5d1c3ed10bbdbf1ba4d9a16227 | fd42ece85a9412b0e6b377874c82fe9544a19701 | refs/heads/master | 2020-03-08T14:13:02.085085 | 2018-06-05T07:39:03 | 2018-06-05T07:39:03 | 128,179,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,170 | py | import numpy as np
import csv
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.naive_bayes import GaussianNB
x = []
y = []
with open('feature4StrucVar.csv') as csvfile:
reader = csv.reader(csvfile, delimiter = ' ')
for row in reader:
x.append(row[0: (len(row))])
for i in x:
i[0] = i[0].split(',')
y.append(i[0][-1])
del i[0][-1]
X = []
for i in x:
X.append(i[0])
Y = []
for i in y:
Y.append(i)
#print(str(x[0]) + "\n")
#print(str(x[0]) + " " + str(y[4000]) + "\n")
#X = np.asarray(X)
#Y = np.asarray(Y)
x = []
y = []
for i in X:
temp = []
for j in i:
temp.append(float(j))
x.append(temp)
for i in Y:
temp = []
for j in i:
temp.append(float(j))
y.append(temp)
#print(y[0])
x = np.asarray(x)
y = np.asarray(y)
#print(x[0])
#Naive Bayes Classifier
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.1, random_state = 42)
clfnb = GaussianNB()
clfnb.fit(x_train, y_train)
print("Naive Bayes classifier : ")
print(clfnb.score(x_test, y_test))
print("\n")
#****************************************************************************************** | [
"[email protected]"
] | |
9a10bb495cf4fc83b00c8d9c97248edd59a5dfc9 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/323/usersdata/284/89341/submittedfiles/mdc.py | d1667dac5fa29ea45712274a24489656ba05b237 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | # -*- coding: utf-8 -*-
import math
def divisor(x):
x=int(input('digite o valor:'))
for n in range(1,x+1,1):
if(x%n==0):
return(n)
| [
"[email protected]"
] | |
74d26786f508654108a2147365f55668187dad26 | 9044b440bed2b8407ed9e04f7fb9d3d2a7593136 | /recommendation/knet/krecall/ops/openblas_top_k/test.py | 942125ff70e3d537f3ca6765d04febc83c0cce72 | [] | no_license | xuzhezhaozhao/ai | d4264f5d15cc5fa514e81adb06eb83731a0ca818 | 925cbd31ad79f8827e2c3c706f4b51910f9f85d1 | refs/heads/master | 2022-01-22T07:04:29.082590 | 2022-01-17T06:49:39 | 2022-01-17T06:49:39 | 136,691,051 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | #! /usr/bin/env python
# -*- coding=utf8 -*-
import tensorflow as tf
import numpy as np
import struct
openblas_top_k_ops = tf.load_op_library('openblas_top_k_ops.so')
openblas_top_k = openblas_top_k_ops.openblas_top_k
WEIGHTS_PATH = 'weights.bin'
BIASES_PATH = 'biases.bin'
weights = np.arange(100).reshape([20, 5]).astype(np.float)
biases = np.array([0.1]*20)
def save_numpy_float_array(array, filename):
with open(filename, 'wb') as f:
for d in array.shape:
f.write(struct.pack('<q', d))
fl = array.flat
for v in fl:
f.write(struct.pack('<f', v))
save_numpy_float_array(weights, WEIGHTS_PATH)
save_numpy_float_array(biases, BIASES_PATH)
sess = tf.Session()
user_vector = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
values, indices = openblas_top_k(input=user_vector, k=5,
weights_path=WEIGHTS_PATH,
biases_path=BIASES_PATH)
values = sess.run(values)
indices = sess.run(indices)
print(values)
print(indices)
| [
"[email protected]"
] | |
9a337d7549581b57b42242cdd52a155c5dcec46e | 55cf061ccf7cff9e02190ea8dec10a3fc5e82729 | /tutorials/3.CodeOrganization/Person.py | 8b427a49315da90747d8f4f3b9f8e0a3baab66e0 | [
"MIT"
] | permissive | Jess3Jane/pyforms | a9e491310590f510ece910beabb2ea291273cfa1 | f34816db018f05c581ede42804771faa39e78824 | refs/heads/master | 2021-04-26T23:53:08.877082 | 2018-03-05T06:36:07 | 2018-03-05T06:36:07 | 123,875,438 | 0 | 0 | MIT | 2018-03-05T06:33:19 | 2018-03-05T06:33:19 | null | UTF-8 | Python | false | false | 289 | py |
class Person(object):
def __init__(self, firstName, middleName, lastName):
self._firstName = firstName
self._middleName = middleName
self._lastName = lastName
@property
def fullName(self):
return "{0} {1} {2}".format(self._firstName, self._middleName, self._lastName) | [
"[email protected]"
] | |
d22998740bc6893a04a40937d946e100dbe6da98 | 8c1aa957a41954daac70b13f1be06df0c4046bb2 | /wagtailwebsitebuilder/home/migrations/0020_auto_20200423_0206.py | cd8b535ae009fd5d82f56238976b661409b23530 | [] | no_license | hanztura/wagtailwebsitebuilder | 6c1a2358d53877e4f70d70e5c7c6b472fabec974 | f56d1b799f9eda53b5596ed882b60df154581cc5 | refs/heads/master | 2021-05-21T08:30:16.170885 | 2020-08-29T22:35:59 | 2020-08-29T22:35:59 | 252,619,323 | 1 | 0 | null | 2021-04-16T20:26:46 | 2020-04-03T03:01:27 | Python | UTF-8 | Python | false | false | 1,538 | py | # Generated by Django 2.2.12 on 2020-04-23 02:06
from django.db import migrations
import puputextension.helpers
import wagtail.contrib.table_block.blocks
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('home', '0019_auto_20200422_1457'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='body',
field=wagtail.core.fields.StreamField([('with_id', wagtail.core.blocks.StructBlock([('id', wagtail.core.blocks.CharBlock()), ('paragraph', wagtail.core.blocks.RichTextBlock())], template='home/blocks/with_id.html')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('table', wagtail.contrib.table_block.blocks.TableBlock(table_options={'contextMenu': ['row_above', 'row_below', '---------', 'col_left', 'col_right', '---------', 'remove_row', 'remove_col', '---------', 'undo', 'redo', '---------', 'copy', 'cut---------', 'alignment'], 'minSpareRows': 0, 'startCols': 3, 'startRows': 3})), ('code', wagtail.core.blocks.StructBlock([('language', wagtail.core.blocks.ChoiceBlock(blank=False, choices=[('bash', 'Bash/Shell'), ('java', 'Java'), ('python3', 'Python 3'), ('javascript', 'Javascript'), ('css', 'CSS'), ('html', 'HTML')], null=False)), ('caption', wagtail.core.blocks.CharBlock(blank=True, nullable=True, required=False)), ('code', puputextension.helpers.CodeTextBlock())])), ('image', wagtail.images.blocks.ImageChooserBlock())]),
),
]
| [
"[email protected]"
] | |
884c23b9dde9349f41a3614ef6a4675579014561 | 5b4c803f68e52849a1c1093aac503efc423ad132 | /UnPyc/tests/tests/CFG/2/pass/pass_while+else_try+except+else+finally_.py | 08b949ad1aa4d769c103db68c1a684ce80ac8cfa | [] | no_license | Prashant-Jonny/UnPyc | 9ce5d63b1e0d2ec19c1faa48d932cc3f71f8599c | 4b9d4ab96dfc53a0b4e06972443e1402e9dc034f | refs/heads/master | 2021-01-17T12:03:17.314248 | 2013-02-22T07:22:35 | 2013-02-22T07:22:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | try:
while 1:
pass
else:
pass
except:
while 1:
pass
else:
pass
else:
while 1:
pass
else:
pass
finally:
while 1:
pass
else:
pass
| [
"[email protected]"
] | |
ebaf97305f4ceba81dcd7be04a37e23fcef110dd | 0206ac23a29673ee52c367b103dfe59e7733cdc1 | /src/crcm5/nemo_vs_hostetler/main_for_lake_effect_snow.py | d06ec1dc7eb5bafd751cbec8f504961727056066 | [] | no_license | guziy/RPN | 2304a93f9ced626ae5fc8abfcc079e33159ae56a | 71b94f4c73d4100345d29a6fbfa9fa108d8027b5 | refs/heads/master | 2021-11-27T07:18:22.705921 | 2021-11-27T00:54:03 | 2021-11-27T00:54:03 | 2,078,454 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 17,504 | py |
# at 10km resolution 100km distance is approximated as 10 * dx
import os
from collections import OrderedDict
from matplotlib import cm
from matplotlib.colors import BoundaryNorm
from matplotlib.gridspec import GridSpec
from pathlib import Path
from rpn import level_kinds
from rpn.rpn import RPN
from application_properties import main_decorator
from crcm5.nemo_vs_hostetler import nemo_hl_util
from crcm5.nemo_vs_hostetler import commons
import numpy as np
import matplotlib.pyplot as plt
from crcm5.nemo_vs_hostetler.rpn_lakeice_manager import RPNLakeIceManager
from util import plot_utils
from netCDF4 import Dataset, num2date, MFDataset
img_folder = "nemo_vs_hostetler"
def get_mask_of_points_near_lakes(lake_mask, npoints_radius=10):
"""
Get the mask of points near lakes where lake effect snow is probable
:param lake_mask:
:param npoints_radius:
:return:
"""
i_list, j_list = np.where(lake_mask)
nx, ny = lake_mask.shape
the_mask = np.zeros_like(lake_mask, dtype=np.bool)
for i, j in zip(i_list, j_list):
imin = max(0, i - npoints_radius)
imax = min(nx - 1, i + npoints_radius)
jmin = max(0, j - npoints_radius)
jmax = min(ny - 1, j + npoints_radius)
the_mask[imin:imax + 1, jmin:jmax + 1] = True
the_mask[lake_mask] = False
return the_mask
def get_map_ij_to_nonlocal_mask(region_of_lake_effect_snow_mask, lake_mask, npoints_radius=50):
"""
Return the non-local vicinity of each point from the reion of the lake effect snow
:param region_of_lake_effect_snow_mask:
:param lake_mask:
:param npoints_radius:
:return:
"""
i_arr, j_arr = np.where(region_of_lake_effect_snow_mask)
nx, ny = region_of_lake_effect_snow_mask.shape
result = {}
for i, j in zip(i_arr, j_arr):
the_mask = np.zeros_like(region_of_lake_effect_snow_mask)
imin = max(0, i - npoints_radius)
imax = min(nx - 1, i + npoints_radius)
jmin = max(0, j - npoints_radius)
jmax = min(ny - 1, j + npoints_radius)
the_mask[imin:imax + 1, jmin:jmax + 1] = True
the_mask[lake_mask | region_of_lake_effect_snow_mask] = False
result[(i, j)] = the_mask
return result
def get_wind_blows_from_lake_mask(lake_mask, lake_effect_region, u_field, v_field, dx=0.1, dy=0.1, lake_ice_frac=None, lats_rot=None):
"""
"""
if lake_ice_frac is None:
lake_ice_frac = np.zeros_like(lake_mask)
dtx = np.asarray(dx / np.abs(u_field))
if lats_rot is not None:
dtx *= np.cos(np.radians(lats_rot))
dty = np.asarray(dy / np.abs(v_field))
wind_blows_from_lake = np.zeros_like(lake_mask, dtype=np.bool)
nx, ny = lake_mask.shape
for i, j in zip(*np.where(lake_mask)):
i1 = i
j1 = j
nsteps = 0
if lake_ice_frac[i, j] > 0.7:
continue
while True:
if dtx[i1, j1] < dty[i1, j1] / 3.0:
sgn = np.sign(u_field[i1, j1])
i1 += int(sgn + 0.5 * sgn)
elif dtx[i1, j1] > dty[i1, j1] / 3.0:
sgn = np.sign(v_field[i1, j1])
j1 += int(sgn + 0.5 * sgn)
else:
i1 += int(np.sign(u_field[i1, j1]) * 1.5)
j1 += int(np.sign(v_field[i1, j1]) * 1.5)
nsteps += 1
if (i1 < 0) or (i1 >= nx) or (j1 < 0) or (j1 >= ny):
break
else:
if not (lake_effect_region[i1, j1] or lake_mask[i1, j1]):
break
else:
if wind_blows_from_lake[i1, j1]:
break
else:
wind_blows_from_lake[i1, j1] = True
return wind_blows_from_lake & lake_effect_region
@main_decorator
def main():
start_year = 1979
end_year = 1981
HL_LABEL = "CRCM5_HL"
NEMO_LABEL = "CRCM5_NEMO"
dx = 0.1
dy = 0.1
file_prefix = "pm"
PR_level = -1
PR_level_type = level_kinds.ARBITRARY
tprecip_vname = "PR"
sprecip_vname = "SN"
TT_level = 1
TT_level_type = level_kinds.HYBRID
sim_label_to_path = OrderedDict(
[(HL_LABEL, "/RESCUE/skynet3_rech1/huziy/CNRCWP/C5/2016/2-year-runs/coupled-GL+stfl_oneway/Samples"),
(NEMO_LABEL, "/HOME/huziy/skynet3_rech1/CNRCWP/C5/2016/2-year-runs/coupled-GL+stfl/Samples")]
)
# get a coord file ... (use pm* files, since they contain NEM1 variable)
# Should be NEMO_LABEL, since the hostetler case does not calculate NEM? vars
coord_file = ""
found_coord_file = False
for mdir in os.listdir(sim_label_to_path[NEMO_LABEL]):
mdir_path = os.path.join(sim_label_to_path[NEMO_LABEL], mdir)
if not os.path.isdir(mdir_path):
continue
for fn in os.listdir(mdir_path):
if fn[:2] not in ["pm", ]:
continue
if fn[-9:-1] == "0" * 8:
continue
coord_file = os.path.join(mdir_path, fn)
found_coord_file = True
if found_coord_file:
break
bmp, lons, lats = nemo_hl_util.get_basemap_obj_and_coords_from_rpn_file(path=coord_file)
xx, yy = bmp(lons, lats)
r = RPN(coord_file)
lats_rot = r.get_first_record_for_name("^^")
lons_rot = r.get_first_record_for_name(">>")
lake_mask = np.greater(commons.get_nemo_lake_mask_from_rpn(coord_file, vname="NEM1"), 0)
# Get the 100km region around the lakes
lake_effect_regions = get_mask_of_points_near_lakes(lake_mask, npoints_radius=10)
local_amplification_limit = 4 * 1e-2 / (24.0 * 3600.0)
# the radius is 500 km, i.e. 50 gridpoints
ij_to_non_local_mask = get_map_ij_to_nonlocal_mask(lake_effect_regions, lake_mask, npoints_radius=50)
# Snowfall amount criteria (>= 10 cm)
lower_snow_fall_limit = 10 * 1e-2 / (24.0 * 3600.0) # convert to M/s
# wind blows from lake: time limit
wind_blows_from_lake_time_limit_hours = 6.0
months_of_interest = [10, 11, 12, 1, 2, 3, 4, 5]
sim_label_to_duration_mean = {}
sim_label_to_lake_effect_sprecip_mean = {}
sim_label_to_year_to_lake_effect_snow_fall_duration = OrderedDict([(sim_label, OrderedDict()) for sim_label in sim_label_to_path])
for sim_label, samples_dir_path in sim_label_to_path.items():
# calculate the composites for the (Oct - March) period
lake_effect_snowfall_mean_duration = None # the duration is in time steps
lake_effect_mean_snowrate_m_per_s = None
snowfall_current_event = None
duration_current_event = None # the duration is in time steps
n_events = None
sn_previous = None
time_wind_blows_from_lake = None
samples_dir = Path(samples_dir_path)
snowfall_file = samples_dir.parent / "{}_snow_fall_{}-{}.nc".format(sim_label, start_year, end_year)
wind_components_file = samples_dir.parent / "rotated_wind_{}.nc".format(sim_label)
ds_wind = Dataset(str(wind_components_file))
print("Working on {} ...".format(sim_label))
lkice_manager = RPNLakeIceManager(samples_dir=samples_dir)
with Dataset(str(snowfall_file)) as ds:
time_var = ds.variables["time"]
nt = time_var.shape[0]
snowfall_var_m_per_s = ds.variables["SN"]
u_var = ds_wind.variables["UU"]
v_var = ds_wind.variables["VV"]
time_var_wind = ds_wind.variables["time"]
assert time_var_wind.shape == time_var.shape
assert time_var_wind[0] == time_var[0]
assert time_var_wind[-1] == time_var_wind[-1]
assert (u_var.shape == snowfall_var_m_per_s.shape) and (v_var.shape == snowfall_var_m_per_s.shape)
times = num2date(time_var[:], time_var.units)
dt_seconds = (times[1] - times[0]).total_seconds()
year_to_lake_effect_snow_fall_duration = sim_label_to_year_to_lake_effect_snow_fall_duration[sim_label]
for ti, t in enumerate(times):
if t.month not in months_of_interest:
continue
if t.year > end_year or t.year < start_year:
continue
sn_current = snowfall_var_m_per_s[ti, :, :]
if t.year not in year_to_lake_effect_snow_fall_duration:
year_to_lake_effect_snow_fall_duration[t.year] = np.zeros_like(sn_current)
# initialize aggragtion fields
if lake_effect_snowfall_mean_duration is None:
lake_effect_snowfall_mean_duration = np.zeros_like(sn_current)
lake_effect_mean_snowrate_m_per_s = np.zeros_like(sn_current)
n_events = np.zeros_like(sn_current)
snowfall_current_event = np.zeros_like(sn_current)
duration_current_event = np.zeros_like(sn_current)
sn_previous = np.zeros_like(sn_current)
time_wind_blows_from_lake = np.zeros_like(sn_current)
where_lake_effect_snow = (sn_current > lower_snow_fall_limit) & lake_effect_regions & (~lake_mask)
# add a condition on the local amplification
i_arr, j_arr = np.where(where_lake_effect_snow)
for i, j in zip(i_arr, j_arr):
the_mask = ij_to_non_local_mask[(i, j)]
where_lake_effect_snow[i, j] = sn_current[the_mask].mean() < sn_current[i, j] - local_amplification_limit
# add a condition on the wind fetch from lakes and ice fraction.
wind_blows_from_lake = get_wind_blows_from_lake_mask(lake_mask, lake_effect_regions, u_var[ti, :, :], v_var[ti, :, :],
dx=dx, dy=dy,
lake_ice_frac=lkice_manager.get_lake_fraction_for_date(the_date=t),
lats_rot=lats_rot)
time_wind_blows_from_lake[wind_blows_from_lake] += dt_seconds / 3600.0
where_lake_effect_snow = where_lake_effect_snow & (time_wind_blows_from_lake >= wind_blows_from_lake_time_limit_hours)
time_wind_blows_from_lake[~wind_blows_from_lake] = 0
# update accumulators for current lake effect snowfall events
snowfall_current_event[where_lake_effect_snow] += sn_current[where_lake_effect_snow]
duration_current_event[where_lake_effect_snow] += 1.0
where_lake_effect_snow_finished = (~where_lake_effect_snow) & (sn_previous > lower_snow_fall_limit)
# recalculate mean lake effect snowfall duration and rate
lake_effect_snowfall_mean_duration[where_lake_effect_snow_finished] = (lake_effect_snowfall_mean_duration[where_lake_effect_snow_finished] * n_events[where_lake_effect_snow_finished] + duration_current_event[where_lake_effect_snow_finished]) / (n_events[where_lake_effect_snow_finished] + 1)
lake_effect_mean_snowrate_m_per_s[where_lake_effect_snow_finished] = (lake_effect_mean_snowrate_m_per_s[where_lake_effect_snow_finished] * n_events[where_lake_effect_snow_finished] + snowfall_current_event[where_lake_effect_snow_finished]) / (n_events[where_lake_effect_snow_finished] + 1)
year_to_lake_effect_snow_fall_duration[t.year][where_lake_effect_snow_finished] += duration_current_event[where_lake_effect_snow_finished] * dt_seconds
# reset the current accumulators
snowfall_current_event[where_lake_effect_snow_finished] = 0
duration_current_event[where_lake_effect_snow_finished] = 0
n_events[where_lake_effect_snow_finished] += 1
sn_previous = sn_current
if ti % 1000 == 0:
print("Done {} of {}".format(ti + 1, nt))
# normalization
lake_effect_snowfall_mean_duration *= dt_seconds / (24 * 60 * 60.0) # convert to days
lake_effect_mean_snowrate_m_per_s = np.ma.masked_where(~lake_effect_regions, lake_effect_mean_snowrate_m_per_s)
lake_effect_snowfall_mean_duration = np.ma.masked_where(~lake_effect_regions, lake_effect_snowfall_mean_duration)
for y, yearly_durations in sim_label_to_year_to_lake_effect_snow_fall_duration[sim_label].items():
sim_label_to_year_to_lake_effect_snow_fall_duration[sim_label][y] = np.ma.masked_where(~lake_effect_regions, yearly_durations) / (24 * 3600.0)
sim_label_to_duration_mean[sim_label] = lake_effect_snowfall_mean_duration
sim_label_to_lake_effect_sprecip_mean[sim_label] = lake_effect_mean_snowrate_m_per_s * 100 * 24 * 3600.0
# close the file with rotated wind components
ds_wind.close()
plot_utils.apply_plot_params(font_size=6, width_cm=18, height_cm=10)
fig = plt.figure()
gs = GridSpec(3, 3)
duration_clevs = 20 # np.arange(0, 1.1, 0.1)
snowrate_clevs = 20 # np.arange(0, 36, 4)
duration_clevs_diff = 20 # np.arange(-1, 1.1, 0.1)
snowrate_clevs_diff = 20 # np.arange(-10, 12, 2)
vmax_duration = None
vmax_snowrate = None
vmax_days_per_year = None
for row, sim_label in enumerate(sim_label_to_path):
if vmax_duration is None:
vmax_duration = sim_label_to_duration_mean[sim_label].max()
vmax_snowrate = sim_label_to_lake_effect_sprecip_mean[sim_label].max()
vmax_days_per_year = sim_label_to_year_to_lake_effect_snow_fall_duration[sim_label][1980].max()
else:
vmax_duration = max(vmax_duration, sim_label_to_duration_mean[sim_label].max())
vmax_snowrate = max(vmax_snowrate, sim_label_to_lake_effect_sprecip_mean[sim_label].max())
vmax_days_per_year = max(vmax_days_per_year, sim_label_to_year_to_lake_effect_snow_fall_duration[sim_label][1980].max())
for col, sim_label in enumerate(sim_label_to_path):
# plot the duration of lake-effect snow events
ax = fig.add_subplot(gs[0, col])
cs = bmp.pcolormesh(xx, yy, sim_label_to_duration_mean[sim_label], ax=ax, vmin=0, vmax=vmax_duration, cmap="rainbow_r")
bmp.drawcoastlines(linewidth=0.3, ax=ax)
plt.colorbar(cs, ax=ax)
ax.set_title("Duration (days)")
ax.set_xlabel("{}".format(sim_label))
# plot the mean intensity of the lake-effect snow events
ax = fig.add_subplot(gs[1, col])
cs = bmp.pcolormesh(xx, yy, sim_label_to_lake_effect_sprecip_mean[sim_label],
ax=ax, vmax=vmax_snowrate, vmin=lower_snow_fall_limit, cmap="rainbow_r")
bmp.drawcoastlines(linewidth=0.3, ax=ax)
plt.colorbar(cs, ax=ax)
ax.set_title("Snowfall rate, (cm/day)")
ax.set_xlabel("{}".format(sim_label))
# plot the mean duration of the lake effect snowfall events per year
ax = fig.add_subplot(gs[2, col])
to_plot = sim_label_to_year_to_lake_effect_snow_fall_duration[sim_label][1980]
clevs = [0, 0.1, ] + list(np.arange(0.4, 3.2, 0.4))
bn = BoundaryNorm(clevs, len(clevs))
cmap = cm.get_cmap("spectral_r", len(clevs))
cs = bmp.pcolormesh(xx, yy, to_plot, ax=ax, norm=bn, cmap=cmap)
bmp.drawcoastlines(linewidth=0.3, ax=ax)
plt.colorbar(cs, ax=ax, extend="max")
ax.set_title("# Days per year")
ax.set_xlabel("{}".format(sim_label))
# plot the difference
# plot the duration of lake-effect snow events
col = 2
cmap = cm.get_cmap("seismic", 40)
vmin = -np.max(sim_label_to_duration_mean[NEMO_LABEL] - sim_label_to_duration_mean[HL_LABEL])
ax = fig.add_subplot(gs[0, col])
cs = bmp.pcolormesh(xx, yy, sim_label_to_duration_mean[NEMO_LABEL] - sim_label_to_duration_mean[HL_LABEL], vmin=vmin, ax=ax, cmap=cmap)
plt.colorbar(cs, ax=ax)
bmp.drawcoastlines(linewidth=0.3, ax=ax)
ax.set_title("Duration (days)")
ax.set_xlabel("{} - {}".format(NEMO_LABEL, HL_LABEL))
# plot the mean intensity of the lake-effect snow events
ax = fig.add_subplot(gs[1, col])
vmin = -np.max(sim_label_to_lake_effect_sprecip_mean[NEMO_LABEL] - sim_label_to_lake_effect_sprecip_mean[HL_LABEL])
cs = bmp.pcolormesh(xx, yy, sim_label_to_lake_effect_sprecip_mean[NEMO_LABEL] - sim_label_to_lake_effect_sprecip_mean[HL_LABEL], ax=ax, vmin=vmin, cmap=cmap) # convert to cm/day
bmp.drawcoastlines(linewidth=0.3, ax=ax)
plt.colorbar(cs, ax=ax)
ax.set_title("Snowfall rate, (cm/day)")
ax.set_xlabel("{} - {}".format(NEMO_LABEL, HL_LABEL))
# plot the mean duration of the lake effect snowfall events per year
ax = fig.add_subplot(gs[2, col])
to_plot = (sim_label_to_year_to_lake_effect_snow_fall_duration[NEMO_LABEL][1980] - sim_label_to_year_to_lake_effect_snow_fall_duration[HL_LABEL][1980])
cs = bmp.pcolormesh(xx, yy, to_plot, ax=ax, vmin=-to_plot.max(), cmap="seismic")
bmp.drawcoastlines(linewidth=0.3, ax=ax)
plt.colorbar(cs, ax=ax)
ax.set_title("# Days per year")
ax.set_xlabel("{} - {}".format(NEMO_LABEL, HL_LABEL))
fig.tight_layout()
fig.savefig(os.path.join(img_folder, "lake_effect_snow_10cm_limit_and_loc_ampl_{}-{}.png".format(start_year, end_year)), dpi=commons.dpi, transparent=True, bbox_inches="tight")
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
ebb1eba75e644fcc50606ed8fd173e72a66784ad | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2788/60617/264414.py | 2b2b3b9b9de2320c65e7ff5f40cb8ce89ca0f4d8 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | def dance():
n=int(input())
boys=list(map(int, input().split(" ")))
m=int(input())
girls=list(map(int, input().split(" ")))
pairs=0
boys.sort()
girls.sort()
if n<=m:
for ele in boys:
if ele-1 in girls:
pairs+=1
girls.remove(ele-1)
elif ele in girls:
pairs+=1
girls.remove(ele)
elif ele+1 in girls:
pairs+=1
girls.remove(ele+1)
else:
for ele in girls:
if ele-1 in boys:
pairs+=1
boys.remove(ele-1)
elif ele in boys:
pairs+=1
boys.remove(ele)
elif ele+1 in boys:
pairs+=1
boys.remove(ele+1)
print(pairs)
if __name__=='__main__':
dance() | [
"[email protected]"
] | |
d458f4e83c13f6a8060121c872a13308240f3fc4 | 2fe58e7f6bfc3efdb78ca56f72a4e2a75a24c270 | /eric/eric6/Plugins/UiExtensionPlugins/Translator/ConfigurationPage/__init__.py | c589525eb78455712c21edf6336a189a21bfd13e | [] | no_license | testerclub/eric6-20.3 | 3053e0e6962060b213f5df329ee331a4893d18e6 | bba0b9f13fa3eb84938422732d751219bc3e29e2 | refs/heads/master | 2023-03-18T08:24:03.472297 | 2020-03-14T06:44:14 | 2020-03-14T06:44:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2014 - 2020 Detlev Offenbach <[email protected]>
#
"""
Package implementing the Translator page of the configuration dialog.
"""
| [
"[email protected]"
] | |
ac73581c07d933e9d8e3d52a3f3a553ed7d1a77b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03680/s252443262.py | 3378adf8c331e679f71f21f1cc7f8e53ad921529 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | import sys
from sys import exit
from collections import deque
from bisect import bisect_left, bisect_right, insort_left, insort_right #func(リスト,値)
from heapq import heapify, heappop, heappush
sys.setrecursionlimit(10**6)
INF = 10**20
def mint():
return map(int,input().split())
def lint():
return map(int,input().split())
N = int(input())
a = [int(input()) for _ in range(N)]
tmp = 1
for i in range(1,N+1):
tmp = a[tmp-1]
if tmp==2:
print(i)
exit()
print(-1) | [
"[email protected]"
] | |
4d83f3a47a7ed8fd3ea7a4cdeae9b47ccee67219 | 0b25dc3f9b4ef736e739aadddec33b96dd65a0c8 | /算法/__init__.py | be81ea962744f6e93efcec51c9ad969ded93ff0e | [] | no_license | ttp55/LearnPy | b123f44a74e4364771491c572705742c15eb33ff | 1530e158bde152e5c585f496dd1e5ffcffdb73bc | refs/heads/master | 2023-05-11T13:02:30.157285 | 2023-05-08T07:13:57 | 2023-05-08T07:13:57 | 196,953,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | # @Time : 2021/6/10 15:49
# @Author : WZG
# --coding:utf-8-- | [
"[email protected]"
] | |
46fe99471204a18d79b8e197d11e569f943d6c86 | 07e2e27f20531452bb070597803a52f1e4f4e4a0 | /average.py | b4d6974dfd0ee869169925f5589f308fc315d0d5 | [] | no_license | ashantisease19/lab-08-loops | de413028d7ede6aee0cf98def3aa63b4bc3ba066 | 0117f23dd1371b01bc31fdb3f8aa952bf1a28516 | refs/heads/master | 2023-04-07T08:02:27.834787 | 2021-04-11T03:16:33 | 2021-04-11T03:16:33 | 354,944,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | average = 0
sum = 0
for i in range (0,4,1):
userinput = input("Just give me a number.")
usernum = int(userinput, 10)
sum = sum + usernum
print("So you put the number " + str(usernum) + " and the current sum is " + str(sum))
average = sum / 4
print("Okay, bro, so the average is " + str(average))
| [
"[email protected]"
] | |
f7762269d29611eb2b9e9e3d4461c52fc55f5133 | b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1 | /tensorflow/python/kernel_tests/sparse_xent_op_test.py | f4515e59d6b4845f25afeca20253efd6b0d5f902 | [
"Apache-2.0"
] | permissive | uve/tensorflow | e48cb29f39ed24ee27e81afd1687960682e1fbef | e08079463bf43e5963acc41da1f57e95603f8080 | refs/heads/master | 2020-11-29T11:30:40.391232 | 2020-01-11T13:43:10 | 2020-01-11T13:43:10 | 230,088,347 | 0 | 0 | Apache-2.0 | 2019-12-25T10:49:15 | 2019-12-25T10:49:14 | null | UTF-8 | Python | false | false | 14,627 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseSoftmaxCrossEntropyWithLogits op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import time
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import app
from tensorflow.python.platform import test
class SparseXentTest(test.TestCase):
def _npXent(self, features, labels):
features = np.reshape(features, [-1, features.shape[-1]])
labels = np.reshape(labels, [-1])
batch_dim = 0
class_dim = 1
batch_size = features.shape[batch_dim]
e = np.exp(features - np.reshape(
np.amax(
features, axis=class_dim), [batch_size, 1]))
probs = e / np.reshape(np.sum(e, axis=class_dim), [batch_size, 1])
labels_mat = np.zeros_like(probs).astype(probs.dtype)
labels_mat[np.arange(batch_size), labels] = 1.0
bp = (probs - labels_mat)
l = -np.sum(labels_mat * np.log(probs + 1.0e-20), axis=1)
return l, bp
def _testXent(self, np_features, np_labels):
np_loss, np_backprop = self._npXent(np_features, np_labels)
with self.cached_session(use_gpu=True) as sess:
loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
np_features, np_labels)
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss)
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
def testSingleClass(self):
for label_dtype in np.int32, np.int64:
with self.cached_session(use_gpu=True) as sess:
loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
np.array([[1.], [-1.], [0.]]).astype(np.float32),
np.array([0, 0, 0]).astype(label_dtype))
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllClose([0.0, 0.0, 0.0], tf_loss)
self.assertAllClose([[0.0], [0.0], [0.0]], tf_backprop)
@test_util.run_deprecated_v1
def testInvalidLabel(self):
features = [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 2., 3., 4.],
[1., 2., 3., 4.]]
labels = [4, 3, 0, -1]
if test.is_built_with_cuda() and test.is_gpu_available():
with self.session(use_gpu=True) as sess:
loss, backprop = (
gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
features, labels))
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllClose(
[[np.nan] * 4, [0.25, 0.25, 0.25, -0.75],
[-0.968, 0.087, 0.237, 0.6439], [np.nan] * 4],
tf_backprop,
rtol=1e-3,
atol=1e-3)
self.assertAllClose(
[np.nan, 1.3862, 3.4420, np.nan], tf_loss, rtol=1e-3, atol=1e-3)
with self.session(use_gpu=False) as sess:
loss, backprop = (
gen_nn_ops.sparse_softmax_cross_entropy_with_logits(features, labels))
with self.assertRaisesOpError("Received a label value of"):
self.evaluate([loss, backprop])
def testNpXent(self):
# We create 2 batches of logits for testing.
# batch 0 is the boring uniform distribution: 1, 1, 1, 1, with target 3.
# batch 1 has a bit of difference: 1, 2, 3, 4, with target 0.
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
labels = [3, 0]
# For batch 0, we expect the uniform distribution: 0.25, 0.25, 0.25, 0.25
# With a hard target 3, the backprop is [0.25, 0.25, 0.25, -0.75]
# The loss for this batch is -log(0.25) = 1.386
#
# For batch 1, we have:
# exp(0) = 1
# exp(1) = 2.718
# exp(2) = 7.389
# exp(3) = 20.085
# SUM = 31.192
# So we have as probabilities:
# exp(0) / SUM = 0.032
# exp(1) / SUM = 0.087
# exp(2) / SUM = 0.237
# exp(3) / SUM = 0.644
# With a hard 1, the backprop is [0.032 - 1.0 = -0.968, 0.087, 0.237, 0.644]
# The loss for this batch is [1.0 * -log(0.25), 1.0 * -log(0.032)]
# = [1.3862, 3.4420]
np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
self.assertAllClose(
np.array([[0.25, 0.25, 0.25, -0.75], [-0.968, 0.087, 0.237, 0.6439]]),
np_backprop,
rtol=1.e-3,
atol=1.e-3)
self.assertAllClose(
np.array([1.3862, 3.4420]), np_loss, rtol=1.e-3, atol=1.e-3)
def testShapeMismatch(self):
with self.session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, ".*Rank mismatch:*"):
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=[[0, 2]], logits=[[0., 1.], [2., 3.], [2., 3.]])
def testScalar(self):
with self.session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, ".*Logits cannot be scalars*"):
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=constant_op.constant(0), logits=constant_op.constant(1.0))
@test_util.run_deprecated_v1
def testLabelsPlaceholderScalar(self):
with self.session(use_gpu=True):
labels = array_ops.placeholder(np.int32)
y = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=[[7.]])
with self.assertRaisesOpError("labels must be 1-D"):
y.eval(feed_dict={labels: 0})
def testVector(self):
with self.session(use_gpu=True):
loss = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=constant_op.constant(0), logits=constant_op.constant([1.0]))
self.assertAllClose(0.0, self.evaluate(loss))
def testFloat(self):
for label_dtype in np.int32, np.int64:
self._testXent(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32),
np.array([3, 0]).astype(label_dtype))
def testDouble(self):
for label_dtype in np.int32, np.int64:
self._testXent(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64),
np.array([0, 3]).astype(label_dtype))
def testHalf(self):
for label_dtype in np.int32, np.int64:
self._testXent(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16),
np.array([3, 0]).astype(label_dtype))
def testEmpty(self):
self._testXent(np.zeros((0, 3)), np.zeros((0,), dtype=np.int32))
@test_util.run_deprecated_v1
def testGradient(self):
with self.session(use_gpu=True):
l = constant_op.constant([3, 0, 1], name="l")
f = constant_op.constant(
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
shape=[3, 4],
dtype=dtypes.float64,
name="f")
x = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=l, logits=f, name="xent")
err = gradient_checker.compute_gradient_error(f, [3, 4], x, [3])
print("cross entropy gradient err = ", err)
self.assertLess(err, 5e-8)
@test_util.run_deprecated_v1
def testSecondGradient(self):
images_placeholder = array_ops.placeholder(dtypes.float32, shape=(3, 2))
labels_placeholder = array_ops.placeholder(dtypes.int32, shape=(3))
weights = variables.Variable(random_ops.truncated_normal([2], stddev=1.0))
weights_with_zeros = array_ops.stack([array_ops.zeros([2]), weights],
axis=1)
logits = math_ops.matmul(images_placeholder, weights_with_zeros)
cross_entropy = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=labels_placeholder, logits=logits)
loss = math_ops.reduce_mean(cross_entropy)
# Taking ths second gradient should fail, since it is not
# yet supported.
with self.assertRaisesRegexp(LookupError,
"explicitly disabled"):
_ = gradients_impl.hessians(loss, [weights])
def _testHighDim(self, features, labels):
np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
# manually reshape loss
np_loss = np.reshape(np_loss, np.array(labels).shape)
with self.cached_session(use_gpu=True) as sess:
loss = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=features)
backprop = loss.op.inputs[0].op.outputs[1]
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss)
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
@test_util.run_deprecated_v1
def testHighDim(self):
features = [[[1., 1., 1., 1.]], [[1., 2., 3., 4.]]]
labels = [[3], [0]]
self._testHighDim(features, labels)
@test_util.run_deprecated_v1
def testHighDim2(self):
features = [[[1., 1., 1., 1.], [2., 2., 2., 2.]],
[[1., 2., 3., 4.], [5., 6., 7., 8.]]]
labels = [[3, 2], [0, 3]]
self._testHighDim(features, labels)
@test_util.run_deprecated_v1
def testScalarHandling(self):
with self.session(use_gpu=False) as sess:
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
".*labels must be 1-D.*"):
labels = array_ops.placeholder(dtypes.int32, shape=[None, 1])
logits = array_ops.placeholder(dtypes.float32, shape=[None, 3])
ce = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=array_ops.squeeze(labels), logits=logits)
labels_v2 = np.zeros((1, 1), dtype=np.int32)
logits_v2 = np.random.randn(1, 3)
sess.run([ce], feed_dict={labels: labels_v2, logits: logits_v2})
def _sparse_vs_dense_xent_benchmark_dense(labels, logits):
labels = array_ops.identity(labels)
logits = array_ops.identity(logits)
with ops_lib.device("/cpu:0"): # Sparse-to-dense must be on CPU
batch_size = array_ops.shape(logits)[0]
num_entries = array_ops.shape(logits)[1]
length = batch_size * num_entries
labels += num_entries * math_ops.range(batch_size)
target = sparse_ops.sparse_to_dense(labels,
array_ops.stack([length]), 1.0, 0.0)
target = array_ops.reshape(target, array_ops.stack([-1, num_entries]))
crossent = nn_ops.softmax_cross_entropy_with_logits(
labels=target, logits=logits, name="SequenceLoss/CrossEntropy")
crossent_sum = math_ops.reduce_sum(crossent)
grads = gradients_impl.gradients([crossent_sum], [logits])[0]
return (crossent_sum, grads)
def _sparse_vs_dense_xent_benchmark_sparse(labels, logits):
# Using sparse_softmax_cross_entropy_with_logits
labels = labels.astype(np.int64)
labels = array_ops.identity(labels)
logits = array_ops.identity(logits)
crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits, labels, name="SequenceLoss/CrossEntropy")
crossent_sum = math_ops.reduce_sum(crossent)
grads = gradients_impl.gradients([crossent_sum], [logits])[0]
return (crossent_sum, grads)
def sparse_vs_dense_xent_benchmark(batch_size, num_entries, use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
config.gpu_options.per_process_gpu_memory_fraction = 0.3
labels = np.random.randint(num_entries, size=batch_size).astype(np.int32)
logits = np.random.randn(batch_size, num_entries).astype(np.float32)
def _timer(sess, ops):
# Warm in
for _ in range(20):
sess.run(ops)
# Timing run
start = time.time()
for _ in range(20):
sess.run(ops)
end = time.time()
return (end - start) / 20.0 # Average runtime per iteration
# Using sparse_to_dense and softmax_cross_entropy_with_logits
with session.Session(config=config) as sess:
if not use_gpu:
with ops_lib.device("/cpu:0"):
ops = _sparse_vs_dense_xent_benchmark_dense(labels, logits)
else:
ops = _sparse_vs_dense_xent_benchmark_dense(labels, logits)
delta_dense = _timer(sess, ops)
# Using sparse_softmax_cross_entropy_with_logits
with session.Session(config=config) as sess:
if not use_gpu:
with test_util.device("/cpu:0"):
ops = _sparse_vs_dense_xent_benchmark_sparse(labels, logits)
else:
ops = _sparse_vs_dense_xent_benchmark_sparse(labels, logits)
delta_sparse = _timer(sess, ops)
print("%d \t %d \t %s \t %f \t %f \t %f" % (batch_size, num_entries, use_gpu,
delta_dense, delta_sparse,
delta_sparse / delta_dense))
def main(_):
print("Sparse Xent vs. SparseToDense + Xent")
print("batch \t depth \t gpu \t dt(dense) \t dt(sparse) "
"\t dt(sparse)/dt(dense)")
for use_gpu in (False, True):
for batch_size in (32, 64, 128):
for num_entries in (100, 1000, 10000):
sparse_vs_dense_xent_benchmark(batch_size, num_entries, use_gpu)
sparse_vs_dense_xent_benchmark(32, 100000, use_gpu)
sparse_vs_dense_xent_benchmark(8, 1000000, use_gpu)
if __name__ == "__main__":
if "--benchmarks" in sys.argv:
sys.argv.remove("--benchmarks")
app.run()
else:
test.main()
| [
"[email protected]"
] | |
eb1c2d999d1c52084a21a951371b6816ed211083 | 1d1c1dce863a4e8b6c9987e9c50fa46908aa0ff6 | /pipeline/feature-extraction/seri/extraction_lbp_top_codebook_final.py | c8d81fa0b1fc3d0357cfff5d8307e440ad06ff20 | [] | no_license | I2Cvb/lemaitre-2016-apr | 4692f64b365c3e8095c96944431e8e03bc9ecc7e | 266dc93026fa70c83a34790c1bd9ae14a23492ba | refs/heads/master | 2021-01-18T19:30:26.845275 | 2016-03-19T21:11:11 | 2016-03-19T21:11:11 | 54,284,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,067 | py | #title :extraction_codebook.py
#description :This will create a header for a python script.
#author :Guillaume Lemaitre
#date :2015/06/07
#version :0.1
#notes :
#python_version :2.7.6
#==============================================================================
# Import the needed libraries
# Numpy library
import numpy as np
# Panda library
import pandas as pd
# OS library
import os
from os.path import join
# SYS library
import sys
# Joblib library
### Module to performed parallel processing
from joblib import Parallel, delayed
# Multiprocessing library
import multiprocessing
from protoclass.extraction.codebook import *
#########################################################################
### Definition of the parallel codebook
def CBComputation(idx_test, (pat_test_norm, pat_test_dme),
filename_normal, filename_dme, nw):
pat_train_norm = np.delete(filename_normal, idx_test)
pat_train_dme = np.delete(filename_dme, idx_test)
# Open the current training data
training_data = np.concatenate((np.concatenate([get_lbp_data(f) for f in pat_train_norm],
axis=0),
np.concatenate([get_lbp_data(f) for f in pat_train_dme],
axis=0)),
axis=0)
print 'The size of the training dataset is {}'.format(training_data.shape)
# Create the codebook using the training data
num_cores = 8
cbook = [CodeBook(n_words=w, init='k-means++', n_jobs=num_cores, n_init=5)
for w in nw]
# Fit each code book for the data currently open
for idx_cb, c in enumerate(cbook):
print 'Fitting for dictionary with {} words'.format(nw[idx_cb])
c.fit(training_data)
return cbook
################################################################################################
################################################################################################
# Define the number of words
nb_words = [int(sys.argv[3])]
################################################################################################
# Read the csv file with the ground truth
gt_csv_filename = '/work/le2i/gu5306le/retinopathy/OCT/SERI/data.csv'
gt_csv = pd.read_csv(gt_csv_filename)
gt = gt_csv.values
data_filename = gt[:, 0]
# Get the good extension
radius = sys.argv[1]
data_filename = np.array([f + '_nlm_flatten_lbp_' + str(radius) + '_hist.npz' for f in data_filename])
label = gt[:, 1]
label = ((label + 1.) / 2.).astype(int)
from collections import Counter
count_gt = Counter(label)
if (count_gt[0] != count_gt[1]):
raise ValueError('Not balanced data.')
else:
# Split data into positive and negative
# TODO TACKLE USING PERMUTATION OF ELEMENTS
filename_normal = data_filename[label == 0]
filename_dme = data_filename[label == 1]
# Get the input folder where the information are located
input_folder = sys.argv[2]
# Build the data folder from the radius given
data_folder = join(input_folder, 'r_' + str(radius) + '_hist_npz')
# Open the data
### Features
get_lbp_data = lambda f: np.load(join(data_folder, f))['vol_lbp_top_hist']
# Compute a codebook for each fold
codebook_list = []
for idx_test, (pat_test_norm, pat_test_dme) in enumerate(zip(filename_normal, filename_dme)):
codebook_list.append(CBComputation(idx_test, (pat_test_norm, pat_test_dme),
filename_normal, filename_dme, nb_words))
# We have to store the final codebook
# Give the location of the random codebook previously generated
codebook_type = 'codebook_final'
codebook_path = join(data_folder, codebook_type)
codebook_filename = join(codebook_path, 'codebook.pkl')
if not os.path.exists(codebook_path):
os.makedirs(codebook_path)
from sklearn.externals import joblib
joblib.dump(codebook_list, codebook_filename)
| [
"[email protected]"
] | |
1d5ecb9898306e73daa11e2c834b4fa76e3d4b76 | 7826681647933249c8949c00238392a0128b4a18 | /cosypose/utils/resources.py | 0d0042740659a09b95b1032cea0c91c7fe8516b9 | [
"MIT"
] | permissive | imankgoyal/cosypose | b35678a32a6491bb15d645bc867f4b2e49bee6d2 | fa494447d72777f1d3bd5bd134d79e5db0526009 | refs/heads/master | 2022-12-09T11:18:23.188868 | 2020-08-31T15:34:02 | 2020-08-31T15:34:02 | 291,834,596 | 2 | 0 | MIT | 2020-08-31T22:06:12 | 2020-08-31T22:06:11 | null | UTF-8 | Python | false | false | 517 | py | import os
import psutil
from shutil import which
def is_egl_available():
return is_gpu_available and 'EGL_VISIBLE_DEVICES' in os.environ
def is_gpu_available():
return which('nvidia-smi') is not None
def is_slurm_available():
return which('sinfo') is not None
def get_total_memory():
current_process = psutil.Process(os.getpid())
mem = current_process.memory_info().rss
for child in current_process.children(recursive=True):
mem += child.memory_info().rss
return mem / 1e9
| [
"[email protected]"
] | |
4985a0fa1bd9664fb90cd7db6fe4ebf87eb5bf80 | 7ef39106ff091002c3a22781628fe3076a0941b0 | /bindings/python/pyiree/rt/vm_test.py | ed7e66f7b3ab0b3c19c8bde23f8fab9ccec738e7 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | YashRajSingh-4799/iree | 798c01b44696f1360014075f4eca275c7a4dc87f | ace19e886efe3702bfe7b17185a5daaa20808e82 | refs/heads/master | 2022-04-20T23:52:58.303107 | 2020-04-23T01:39:08 | 2020-04-23T02:47:41 | 258,150,320 | 2 | 0 | Apache-2.0 | 2020-04-23T09:09:26 | 2020-04-23T09:09:26 | null | UTF-8 | Python | false | false | 5,127 | py | # Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=unused-variable
from absl.testing import absltest
import numpy as np
from pyiree import compiler
from pyiree import rt
def create_simple_static_mul_module():
ctx = compiler.Context()
input_module = ctx.parse_asm("""
func @simple_mul(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32>
attributes { iree.module.export } {
%0 = "xla_hlo.multiply"(%arg0, %arg1) {name = "mul.1"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
return %0 : tensor<4xf32>
}
""")
binary = input_module.compile()
m = rt.VmModule.from_flatbuffer(binary)
return m
def create_simple_dynamic_abs_module():
ctx = compiler.Context()
# TODO(laurenzo): Compile for more backends as dynamic shapes come online.
target_backends = ["vmla"]
input_module = ctx.parse_asm("""
func @simple_mul(%arg0: tensor<?x?xf32>) -> tensor<?x?xf32>
attributes { iree.module.export } {
%0 = "xla_hlo.abs"(%arg0) : (tensor<?x?xf32>) -> tensor<?x?xf32>
return %0 : tensor<?x?xf32>
}
""")
binary = input_module.compile(target_backends=target_backends)
m = rt.VmModule.from_flatbuffer(binary)
return m
class VmTest(absltest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
driver_names = rt.HalDriver.query()
print("DRIVER_NAMES =", driver_names)
cls.driver = rt.HalDriver.create("vmla")
cls.device = cls.driver.create_default_device()
cls.hal_module = rt.create_hal_module(cls.device)
cls.htf = rt.HostTypeFactory.get_numpy()
def test_variant_list(self):
l = rt.VmVariantList(5)
print(l)
self.assertEqual(l.size, 0)
def test_context_id(self):
instance = rt.VmInstance()
context1 = rt.VmContext(instance)
context2 = rt.VmContext(instance)
self.assertGreater(context2.context_id, context1.context_id)
def test_module_basics(self):
m = create_simple_static_mul_module()
f = m.lookup_function("simple_mul")
self.assertGreater(f.ordinal, 0)
notfound = m.lookup_function("notfound")
self.assertIs(notfound, None)
def test_dynamic_module_context(self):
instance = rt.VmInstance()
context = rt.VmContext(instance)
m = create_simple_static_mul_module()
context.register_modules([self.hal_module, m])
def test_static_module_context(self):
m = create_simple_static_mul_module()
print(m)
instance = rt.VmInstance()
print(instance)
context = rt.VmContext(instance, modules=[self.hal_module, m])
print(context)
def test_dynamic_shape_compile(self):
m = create_simple_dynamic_abs_module()
print(m)
instance = rt.VmInstance()
print(instance)
context = rt.VmContext(instance, modules=[self.hal_module, m])
print(context)
def test_synchronous_dynamic_shape_invoke_function(self):
m = create_simple_dynamic_abs_module()
instance = rt.VmInstance()
context = rt.VmContext(instance, modules=[self.hal_module, m])
f = m.lookup_function("simple_mul")
abi = context.create_function_abi(self.device, self.htf, f)
print("INVOKING:", abi)
arg0 = np.array([[-1., 2.], [3., -4.]], dtype=np.float32)
inputs = abi.raw_pack_inputs((arg0,))
print("INPUTS:", inputs)
allocated_results = abi.allocate_results(inputs, static_alloc=False)
print("ALLOCATED RESULTS:", allocated_results)
print("--- INVOKE:")
context.invoke(f, inputs, allocated_results)
print("--- DONE.")
results = abi.raw_unpack_results(allocated_results)
print("RESULTS:", results)
np.testing.assert_allclose(results[0], [[1., 2.], [3., 4.]])
def test_synchronous_invoke_function(self):
m = create_simple_static_mul_module()
instance = rt.VmInstance()
context = rt.VmContext(instance, modules=[self.hal_module, m])
f = m.lookup_function("simple_mul")
abi = context.create_function_abi(self.device, self.htf, f)
print("INVOKING:", abi)
arg0 = np.array([1., 2., 3., 4.], dtype=np.float32)
arg1 = np.array([4., 5., 6., 7.], dtype=np.float32)
inputs = abi.raw_pack_inputs((arg0, arg1))
print("INPUTS:", inputs)
allocated_results = abi.allocate_results(inputs, static_alloc=False)
print("ALLOCATED RESULTS:", allocated_results)
print("--- INVOKE:")
context.invoke(f, inputs, allocated_results)
print("--- DONE.")
results = abi.raw_unpack_results(allocated_results)
print("RESULTS:", results)
np.testing.assert_allclose(results[0], [4., 10., 18., 28.])
if __name__ == "__main__":
absltest.main()
| [
"[email protected]"
] | |
98fd540f8660d0e5851214ffcfc28a448989e90e | 06a863150a7a3a7bfc0c341b9c3f267727606464 | /lib/gii/core/AssetUtils.py | 260e5649c1ed35c15c0ad503ee6b87826f8902a3 | [
"MIT"
] | permissive | brucelevis/gii | c843dc738a958b4a2ffe42178cff0dd04da44071 | 03624a57cf74a07e38bfdc7f53c50bd926b7b5a7 | refs/heads/master | 2020-10-02T00:41:02.723597 | 2016-04-08T07:44:45 | 2016-04-08T07:44:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | import os
import os.path
import subprocess
import platform
import ctypes
##TOOL Functions
def openFileInOS(path):
sysName=platform.system()
if sysName=='Darwin':
subprocess.call(["open", path])
elif sysName == 'Windows':
os.startfile( os.path.normpath(path) )
#TODO:linux?
def showFileInBrowser(path):
sysName=platform.system()
if sysName=='Darwin':
subprocess.call(["open", "--reveal", path])
elif sysName == 'Windows':
ctypes.windll.shell32.ShellExecuteW(None, u'open', u'explorer.exe', u'/n,/select, ' + os.path.normpath(path), None, 1)
#TODO:linux? | [
"[email protected]"
] | |
5d29772916a157b070f30c565edce75aee066945 | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /120/120.triangle.249953022.Runtime-Error.leetcode.py | bb813fc6a70d765796b9ff45665c1b95a1f683dc | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | class Solution(object):
def minimumTotal(self, triangle):
for row in triangle[::-1]:
for col in range(len(triangle[row])):
triangle[row][col] += min(triangle[row + 1][col], triangle[row + 1][col + 1])
return triangle[0][0]
| [
"[email protected]"
] | |
b8787456ced7e03774305dc6e4fff1a28b3daa6d | 316a07bd7ab47d447606d341c5d221d8318f65b9 | /horizon/horizon/dashboards/settings/ec2/urls.py | 46367bbe0d21ef5617312058a720009d0f481983 | [] | no_license | kumarcv/openstack-nf | 791d16a4844df4666fb2b82a548add98f4832628 | ad2d8c5d49f510292b1fe373c7c10e53be52ba23 | refs/heads/master | 2020-05-20T03:10:54.495411 | 2013-06-16T23:44:11 | 2013-06-16T23:44:11 | 7,497,218 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Openstack, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls.defaults import patterns, url
from .views import IndexView
urlpatterns = patterns('horizon.dashboards.settings.ec2.views',
url(r'^$', IndexView.as_view(), name='index'),
)
| [
"[email protected]"
] | |
3282e7d18794deb4d673a620e223b8ab2c976279 | 3d90d6753cbb48c74dc8e72f4a886635a706ee16 | /day20-django之数据库外键多表,ajax/application/application/settings.py | 07dcebd87c9f8e60a649fde1b389c928c01bb465 | [] | no_license | shun-zi/python | 01354dfc23e470c67ae6adc323b7b23c446faf95 | 9b9851a608cfa18392464b7d887659ced8eb58a6 | refs/heads/master | 2021-09-12T23:15:35.586858 | 2018-04-22T12:32:41 | 2018-04-22T12:32:41 | 113,460,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,322 | py | """
Django settings for application project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yuqr0@1y0kt_)oib%&o2b_=q=78d4=c^q4cr7=-o%(l5nlwid^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'application_host',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'application.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'application.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'application',
'USER': 'root',
'PASSWORD': 'z960520@',
"HOST": "localhost",
"port": '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/statics/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'statics'),
)
| [
"[email protected]"
] | |
b7bf9d90e7b82119aa39546ac07392e2794321d0 | d916d9f15b039abe4f824d58714e0c3277939711 | /Encapsulation/Encapsulation-Lab/01_person.py | 2924a2a7ca413bcab6ea528fd27ca351de366977 | [] | no_license | jesalshah14/SoftUni-Python-OOP-February-2021 | a641e31e7144785cd05b0b6324348570ff90d7d7 | 45a584316951bca4d1bcfe35861f140d9fedf62a | refs/heads/main | 2023-04-09T20:15:32.764633 | 2021-04-18T15:29:57 | 2021-04-18T15:29:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | class Person:
def __init__(self, name, age):
self.__name = name
self.__age = age
def get_name(self):
return self.__name
def get_age(self):
return self.__age
# class Person:
# def __init__(self, name, age):
# self.__name = name
# self.__age = age
#
# @property
# def name(self):
# return self.__name
#
# @name.setter
# def name(self, value):
# self.__name = value
#
# @property
# def age(self):
# return self.__age
#
# @age.setter
# def age(self, value):
# self.__age = value
# class Person:
# def __init__(self, name, age):
# self.name = name
# self.age = age
#
# @property
# def name(self):
# return self.__name
#
# @name.setter
# def name(self, value):
# if not value or not isinstance(value, str):
# raise ValueError("Name must be a non-empty string")
# self.__name = value
person = Person("George", 32)
print(person.get_name())
print(person.get_age())
# person = Person("George", 32)
# print(person.name)
# print(person.age)
| [
"[email protected]"
] | |
bc6f06c449429d99388dfabc101bd41903a224ec | 3479fca8dd50fb0f27a981cca2e4d1cd9a34d36b | /post/permissions.py | f61e1d34af829fedff5583d7f68a9fae3a0e4672 | [] | no_license | Harshvartak/blogproj | bdea67e935789ba2bacd29ec91d070b0650f73da | 4fd23d3664218bfb0c0f6817995b9595c7aa08f2 | refs/heads/master | 2020-11-25T23:24:47.459955 | 2019-12-23T20:27:13 | 2019-12-23T20:27:13 | 228,888,052 | 0 | 0 | null | 2019-12-18T19:01:54 | 2019-12-18T17:16:57 | Python | UTF-8 | Python | false | false | 465 | py | from rest_framework import permissions
'''
class BasePermission(object):
def has_permission(self, request, view):
return True
def has_object_permission(self, request, view, obj):
return True
'''
class IsAuthorOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.author == request.user
| [
"[email protected]"
] | |
fe10ec94891a3d476f0c90796f87d44a9790613f | a0127e596323a7016b1662d57cedea7bae3f3588 | /calendly/common/logger.py | 8c1c938878c5238c4ba3c5ce9761f3901ccab360 | [] | no_license | cpj2195/calendly | 042710b959b3c4e390b9044927e92bbe7a384908 | 157907ddaf0c4330a03c8acf407239955b056d70 | refs/heads/master | 2022-06-13T16:17:43.936713 | 2020-04-06T12:52:07 | 2020-04-06T12:52:07 | 252,102,917 | 1 | 0 | null | 2022-05-25T03:25:24 | 2020-04-01T07:27:03 | Python | UTF-8 | Python | false | false | 371 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import traceback
def log_to_cloudwatch(log_marker, message):
'''
This functions is used to print the log messages so that they can be logged
to cloudwatch.
PARAMETERS
----------
message : str
message to be logged
'''
traceback.print_exc()
print(log_marker)
print(message)
| [
"[email protected]"
] | |
03d30a5a7bb912b677db9b56561c7e7e5fb31035 | 18dc0f38e3d43a4b457c626c20fa16da715d1773 | /qa/Test_dm_sip_idb.py | 19f3a61475ee919eec91948be065d7a53c3791e2 | [
"MIT"
] | permissive | KDahlgren/orik | 424f6495effe0113ca56db3954453e708aa857a5 | 4e66107cf2dc2cd1a30ba4bfbe15c1ad1c176c0f | refs/heads/master | 2021-01-01T15:38:24.385363 | 2018-06-23T01:24:55 | 2018-06-23T01:24:55 | 97,662,025 | 2 | 2 | MIT | 2018-09-24T20:57:27 | 2017-07-19T02:04:15 | Python | UTF-8 | Python | false | false | 7,064 | py | #!/usr/bin/env python
'''
Test_dm_sip_idb.py
'''
#############
# IMPORTS #
#############
# standard python packages
import inspect, logging, os, re, string, sqlite3, sys, unittest
# ------------------------------------------------------ #
# import sibling packages HERE!!!
if not os.path.abspath( __file__ + "/../../src" ) in sys.path :
sys.path.append( os.path.abspath( __file__ + "/../../src" ) )
from derivation import FactNode, GoalNode, Node, ProvTree, RuleNode
if not os.path.abspath( __file__ + "/../../lib/iapyx/src" ) in sys.path :
sys.path.append( os.path.abspath( __file__ + "/../../lib/iapyx/src" ) )
from dedt import dedt, dedalusParser, clockRelation, dedalusRewriter
from utils import dumpers, globalCounters, tools
from evaluators import c4_evaluator
# ------------------------------------------------------ #
#####################
# TEST DM SIP IDB #
#####################
class Test_dm_sip_idb( unittest.TestCase ) :
logging.basicConfig( format='%(levelname)s:%(message)s', level=logging.DEBUG )
#logging.basicConfig( format='%(levelname)s:%(message)s', level=logging.INFO )
#logging.basicConfig( format='%(levelname)s:%(message)s', level=logging.WARNING )
PRINT_STOP = False
#############
# SIMPLOG #
#############
#@unittest.skip( "works." )
def test_simplog( self ) :
test_id = "simplog"
test_file_name = "simplog_driver"
print " >>> RUNNING " + test_id + " <<<"
test_id = "dm_sip_idb_" + test_id
serial_nodes_path = "./testFiles/" + test_id + "_expected_nodes.txt"
serial_edges_path = "./testFiles/" + test_id + "_expected_edges.txt"
input_file = "./dedalus_drivers/" + test_file_name + ".ded"
argDict = self.getArgDict( input_file )
argDict[ 'data_save_path' ] = "./data/" + test_id + "/"
argDict[ 'EOT' ] = 6
argDict[ 'nodes' ] = [ "a", "b", "c" ]
cursor = self.set_up_test( test_id, argDict )
provTree = self.get_prov_tree( serial_nodes_path, \
serial_edges_path, \
argDict, \
cursor )
provTree.create_pydot_graph( 0, 0, test_id )
###############
# PATH LINK #
###############
#@unittest.skip( "works." )
def test_path_link( self ) :
test_id = "path_link"
test_file_name = "path_link"
print " >>> RUNNING " + test_id + " <<<"
test_id = "dm_sip_idb_" + test_id
serial_nodes_path = "./testFiles/" + test_id + "_expected_nodes.txt"
serial_edges_path = "./testFiles/" + test_id + "_expected_edges.txt"
input_file = "./testFiles/" + test_file_name + ".ded"
argDict = self.getArgDict( input_file )
argDict[ 'data_save_path' ] = "./data/" + test_id + "/"
argDict[ 'EOT' ] = 1
argDict[ 'nodes' ] = [ "a" ]
cursor = self.set_up_test( test_id, argDict )
provTree = self.get_prov_tree( serial_nodes_path, \
serial_edges_path, \
argDict, \
cursor )
provTree.create_pydot_graph( 0, 0, test_id )
###################
# GET PROV TREE #
###################
def get_prov_tree( self, serial_nodes_path, serial_edges_path, argDict, cursor ) :
if not os.path.exists( argDict[ "data_save_path" ] ) :
os.system( "mkdir " + argDict[ "data_save_path" ] )
# --------------------------------------------------------------- #
# convert dedalus into c4 datalog and evaluate
parsedResults = self.get_program_results( argDict, cursor )
# --------------------------------------------------------------- #
# build provenance tree
provTree = ProvTree.ProvTree( rootname = "FinalState", \
parsedResults = parsedResults, \
cursor = cursor, \
treeType = "goal", \
isNeg = False, \
eot = argDict[ "EOT" ], \
prev_prov_recs = {}, \
argDict = argDict )
# get actual serialized graph
if serial_nodes_path :
actual_serial_nodes = provTree.nodeset_pydot_str
if serial_edges_path :
actual_serial_edges = provTree.edgeset_pydot_str
if self.PRINT_STOP :
if serial_nodes_path :
for n in actual_serial_nodes :
logging.debug( " n = " + n.rstrip() )
if serial_nodes_path :
for e in actual_serial_edges :
logging.debug( " e = " + e.rstrip() )
tools.bp( __name__, inspect.stack()[0][3], "print stop." )
return provTree
#########################
# GET PROGRAM RESULTS #
#########################
# convert the input dedalus program into c4 datalog and evaluate.
# return evaluation results dictionary.
def get_program_results( self, argDict, cursor ) :
# convert dedalus into c4 datalog
allProgramData = dedt.translateDedalus( argDict, cursor )
# run c4 evaluation
results_array = c4_evaluator.runC4_wrapper( allProgramData[0], argDict )
parsedResults = tools.getEvalResults_dict_c4( results_array )
return parsedResults
#################
# SET UP TEST #
#################
def set_up_test( self, test_id, argDict ) :
if os.path.exists( "./IR_" + test_id + ".db*" ) :
os.remove( "./IR*.db*" )
testDB = "./IR_" + test_id + ".db"
IRDB = sqlite3.connect( testDB )
cursor = IRDB.cursor()
dedt.createDedalusIRTables(cursor)
dedt.globalCounterReset()
return cursor
##################
# GET ARG DICT #
##################
# specify the default test arguments.
# return dictionary.
def getArgDict( self, inputfile ) :
# initialize
argDict = {}
# populate with unit test defaults
argDict[ 'prov_diagrams' ] = False
argDict[ 'use_symmetry' ] = False
argDict[ 'crashes' ] = 0
argDict[ 'solver' ] = None
argDict[ 'disable_dot_rendering' ] = False
argDict[ 'settings' ] = "./settings_dm_sip_idb.ini"
argDict[ 'negative_support' ] = False
argDict[ 'strategy' ] = None
argDict[ 'file' ] = inputfile
argDict[ 'EOT' ] = 4
argDict[ 'find_all_counterexamples' ] = False
argDict[ 'nodes' ] = [ "a", "b", "c" ]
argDict[ 'evaluator' ] = "c4"
argDict[ 'EFF' ] = 2
argDict[ 'data_save_path' ] = "./data/"
argDict[ 'neg_writes' ] = "dm"
return argDict
##############################
# MAIN THREAD OF EXECUTION #
##############################
if __name__ == "__main__":
unittest.main()
#########
# EOF #
#########
| [
"[email protected]"
] | |
eea9054193fcde002fa2322da0daf6e6b6bbd769 | d561fab22864cec1301393d38d627726671db0b2 | /python/helpers/typeshed/third_party/3.6/click/decorators.pyi | a3dcdddeb065b9be1908801b10384ecdca051c3b | [
"Apache-2.0",
"MIT"
] | permissive | Vedenin/intellij-community | 724dcd8b3e7c026936eed895cf964bb80574689a | 74a89fa7083dedc6455a16e10cf779d191d79633 | refs/heads/master | 2021-01-25T00:47:43.514138 | 2017-03-27T15:48:36 | 2017-03-27T15:54:02 | 86,361,176 | 1 | 1 | null | 2017-03-27T16:54:23 | 2017-03-27T16:54:23 | null | UTF-8 | Python | false | false | 5,494 | pyi | from typing import Any, Callable, Dict, List, TypeVar, Union
from click.core import Command, Group, Argument, Option, Parameter, Context
from click.types import ParamType
T = TypeVar('T')
Decorator = Callable[[T], T]
def pass_context(T) -> T:
...
def pass_obj(T) -> T:
...
def make_pass_decorator(
object_type: type, ensure: bool = False
) -> Callable[[T], T]:
...
# NOTE: Decorators below have **attrs converted to concrete constructor
# arguments from core.pyi to help with type checking.
def command(
name: str = None,
cls: type = Command,
# Command
help: str = None,
epilog: str = None,
short_help: str = None,
options_metavar: str = '[OPTIONS]',
add_help_option: bool = True,
) -> Decorator:
...
# This inherits attrs from Group, MultiCommand and Command.
def group(
name: str = None,
cls: type = Group,
# Group
commands: Dict[str, Command] = None,
# MultiCommand
invoke_without_command: bool = False,
no_args_is_help: bool = None,
subcommand_metavar: str = None,
chain: bool = False,
result_callback: Callable = None,
# Command
help: str = None,
epilog: str = None,
short_help: str = None,
options_metavar: str = '[OPTIONS]',
add_help_option: bool = True,
) -> Decorator:
...
def argument(
*param_decls: str,
cls: type = Argument,
# Argument
required: bool = None,
# Parameter
type: Union[type, ParamType] = None,
default: Any = None,
callback: Callable[[Context, Parameter, str], Any] = None,
nargs: int = None,
metavar: str = None,
expose_value: bool = True,
is_eager: bool = False,
envvar: Union[str, List[str]] = None
) -> Decorator:
...
def option(
*param_decls: str,
cls: type = Option,
# Option
show_default: bool = False,
prompt: bool = False,
confirmation_prompt: bool = False,
hide_input: bool = False,
is_flag: bool = None,
flag_value: Any = None,
multiple: bool = False,
count: bool = False,
allow_from_autoenv: bool = True,
type: Union[type, ParamType] = None,
help: str = None,
# Parameter
default: Any = None,
callback: Callable[[Context, Parameter, str], Any] = None,
nargs: int = None,
metavar: str = None,
expose_value: bool = True,
is_eager: bool = False,
envvar: Union[str, List[str]] = None
) -> Decorator:
...
# Defaults copied from the decorator body.
def confirmation_option(
*param_decls: str,
cls: type = Option,
# Option
show_default: bool = False,
prompt: str = 'Do you want to continue?',
confirmation_prompt: bool = False,
hide_input: bool = False,
is_flag: bool = True,
flag_value: Any = None,
multiple: bool = False,
count: bool = False,
allow_from_autoenv: bool = True,
type: Union[type, ParamType] = None,
help: str = 'Confirm the action without prompting.',
# Parameter
default: Any = None,
callback: Callable[[Context, Parameter, str], Any] = None,
nargs: int = None,
metavar: str = None,
expose_value: bool = False,
is_eager: bool = False,
envvar: Union[str, List[str]] = None
) -> Decorator:
...
# Defaults copied from the decorator body.
def password_option(
*param_decls: str,
cls: type = Option,
# Option
show_default: bool = False,
prompt: bool = True,
confirmation_prompt: bool = True,
hide_input: bool = True,
is_flag: bool = None,
flag_value: Any = None,
multiple: bool = False,
count: bool = False,
allow_from_autoenv: bool = True,
type: Union[type, ParamType] = None,
help: str = None,
# Parameter
default: Any = None,
callback: Callable[[Context, Parameter, str], Any] = None,
nargs: int = None,
metavar: str = None,
expose_value: bool = True,
is_eager: bool = False,
envvar: Union[str, List[str]] = None
) -> Decorator:
...
# Defaults copied from the decorator body.
def version_option(
version: str = None,
*param_decls: str,
cls: type = Option,
# Option
show_default: bool = False,
prompt: bool = False,
confirmation_prompt: bool = False,
hide_input: bool = False,
is_flag: bool = True,
flag_value: Any = None,
multiple: bool = False,
count: bool = False,
allow_from_autoenv: bool = True,
type: Union[type, ParamType] = None,
help: str = 'Show the version and exit.',
# Parameter
default: Any = None,
callback: Callable[[Context, Parameter, str], Any] = None,
nargs: int = None,
metavar: str = None,
expose_value: bool = False,
is_eager: bool = True,
envvar: Union[str, List[str]] = None
) -> Decorator:
...
# Defaults copied from the decorator body.
def help_option(
*param_decls: str,
cls: type = Option,
# Option
show_default: bool = False,
prompt: bool = False,
confirmation_prompt: bool = False,
hide_input: bool = False,
is_flag: bool = True,
flag_value: Any = None,
multiple: bool = False,
count: bool = False,
allow_from_autoenv: bool = True,
type: Union[type, ParamType] = None,
help: str = 'Show this message and exit.',
# Parameter
default: Any = None,
callback: Callable[[Context, Parameter, str], Any] = None,
nargs: int = None,
metavar: str = None,
expose_value: bool = False,
is_eager: bool = True,
envvar: Union[str, List[str]] = None
) -> Decorator:
...
| [
"[email protected]"
] | |
f17014c2e1af3c37315c054d5633d98ac328b1c3 | 9784a90cac667e8e0aaba0ca599b4255b215ec67 | /gluon/datasets/librispeech_asr_dataset.py | 9726c17ef5fa85b5af7e5f85752be4319238b0ff | [
"MIT"
] | permissive | osmr/imgclsmob | d2f48f01ca541b20119871393eca383001a96019 | f2993d3ce73a2f7ddba05da3891defb08547d504 | refs/heads/master | 2022-07-09T14:24:37.591824 | 2021-12-14T10:15:31 | 2021-12-14T10:15:31 | 140,285,687 | 3,017 | 624 | MIT | 2022-07-04T15:18:37 | 2018-07-09T12:57:46 | Python | UTF-8 | Python | false | false | 5,226 | py | """
LibriSpeech ASR dataset.
"""
__all__ = ['LibriSpeech', 'LibriSpeechMetaInfo']
import os
import numpy as np
from .dataset_metainfo import DatasetMetaInfo
from .asr_dataset import AsrDataset, asr_test_transform
class LibriSpeech(AsrDataset):
"""
LibriSpeech dataset for Automatic Speech Recognition (ASR).
Parameters:
----------
root : str
Path to folder storing the dataset.
mode : str, default 'test'
'train', 'val', 'test', or 'demo'.
subset : str, default 'dev-clean'
Data subset.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="test",
subset="dev-clean",
transform=None):
super(LibriSpeech, self).__init__(
root=root,
mode=mode,
transform=transform)
self.vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'"]
vocabulary_dict = {c: i for i, c in enumerate(self.vocabulary)}
import soundfile
root_dir_path = os.path.expanduser(root)
assert os.path.exists(root_dir_path)
data_dir_path = os.path.join(root_dir_path, subset)
assert os.path.exists(data_dir_path)
for speaker_id in os.listdir(data_dir_path):
speaker_dir_path = os.path.join(data_dir_path, speaker_id)
for chapter_id in os.listdir(speaker_dir_path):
chapter_dir_path = os.path.join(speaker_dir_path, chapter_id)
transcript_file_path = os.path.join(chapter_dir_path, "{}-{}.trans.txt".format(speaker_id, chapter_id))
with open(transcript_file_path, "r") as f:
transcripts = dict(x.split(" ", maxsplit=1) for x in f.readlines())
for flac_file_name in os.listdir(chapter_dir_path):
if flac_file_name.endswith(".flac"):
wav_file_name = flac_file_name.replace(".flac", ".wav")
wav_file_path = os.path.join(chapter_dir_path, wav_file_name)
if not os.path.exists(wav_file_path):
flac_file_path = os.path.join(chapter_dir_path, flac_file_name)
pcm, sample_rate = soundfile.read(flac_file_path)
soundfile.write(wav_file_path, pcm, sample_rate)
text = transcripts[wav_file_name.replace(".wav", "")]
text = text.strip("\n ").lower()
text = np.array([vocabulary_dict[c] for c in text], dtype=np.long)
self.data.append((wav_file_path, text))
class LibriSpeechMetaInfo(DatasetMetaInfo):
def __init__(self):
super(LibriSpeechMetaInfo, self).__init__()
self.label = "LibriSpeech"
self.short_label = "ls"
self.root_dir_name = "LibriSpeech"
self.dataset_class = LibriSpeech
self.dataset_class_extra_kwargs = {"subset": "dev-clean"}
self.ml_type = "asr"
self.num_classes = 29
self.val_metric_extra_kwargs = [{"vocabulary": None}]
self.val_metric_capts = ["Val.WER"]
self.val_metric_names = ["WER"]
self.test_metric_extra_kwargs = [{"vocabulary": None}]
self.test_metric_capts = ["Test.WER"]
self.test_metric_names = ["WER"]
self.val_transform = asr_test_transform
self.test_transform = asr_test_transform
self.test_net_extra_kwargs = {"from_audio": True}
self.allow_hybridize = False
self.saver_acc_ind = 0
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for dataset specific metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(LibriSpeechMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--subset",
type=str,
default="dev-clean",
help="data subset")
def update(self,
args):
"""
Update dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(LibriSpeechMetaInfo, self).update(args)
self.dataset_class_extra_kwargs["subset"] = args.subset
def update_from_dataset(self,
dataset):
"""
Update dataset metainfo after a dataset class instance creation.
Parameters:
----------
args : obj
A dataset class instance.
"""
vocabulary = dataset._data.vocabulary
self.num_classes = len(vocabulary) + 1
self.val_metric_extra_kwargs[0]["vocabulary"] = vocabulary
self.test_metric_extra_kwargs[0]["vocabulary"] = vocabulary
| [
"[email protected]"
] | |
cf20a934ec81a718afd3d4f520e7a181de4d4ea6 | 479117fe710b1cadf9252f08769d8fd1476934e4 | /flask_ipywidgets/__init__.py | ead135d9ab9e51af7a15b2604dd43d767070b29b | [
"BSD-3-Clause"
] | permissive | jf---/flask-ipywidgets | 3f1b310c81015c3b4fbc66ee24d356987998613e | 3c0d7356c7185cb59e9dfa0f13e702273bbd7696 | refs/heads/master | 2020-03-15T12:32:05.166269 | 2018-07-30T10:06:15 | 2018-07-30T10:06:15 | 132,146,254 | 0 | 0 | null | 2018-05-04T13:51:06 | 2018-05-04T13:51:06 | null | UTF-8 | Python | false | false | 3,088 | py | from .kernel import *
from flask_sockets import Sockets
_kernel_spec = {
"display_name": "flask_kernel",
"language": "python",
"argv": ["python", "doesnotworkthisway"],
"env": {
},
"display_name": "Flask kernel",
"language": "python",
"interrupt_mode": "signal",
"metadata": {},
}
from flask import Flask, Blueprint
http = Blueprint('jupyter', __name__)
websocket = Blueprint('jupyter', __name__)
@http.route('/api/kernelspecs')
def kernelspecs(name=None):
return jsonify({
'default': 'flask_kernel',
'kernelspecs': {
'flask_kernel': {
'name': 'flask_kernel',
'resources': {},
'spec': _kernel_spec
}
}
})
@http.route('/api/kernels', methods=['GET', 'POST'])
def kernels_normal():
data = {
"id": "4a8a8c6c-188c-40aa-8bab-3c79500a4b26",
"name":
"flask_kernel",
"last_activity": "2018-01-30T19:32:04.563616Z",
"execution_state":
"starting",
"connections": 0
}
return jsonify(data), 201
@websocket.route('/api/kernels/<id>/<name>')
def kernels(ws, id, name):
print(id, name)
kernel = FlaskKernel.instance()
#kernel.stream.last_ws = ws
while not ws.closed:
message = ws.receive()
if message is not None:
msg = json.loads(message)
msg_serialized = kernel.session.serialize(msg)
# print("msg from front end", msg)
# print(kernel.comm_manager.comms)
msg_id = msg['header']['msg_id']
kernel.session.websockets[msg_id] = ws
if msg['channel'] == 'shell':
kernel.dispatch_shell(WebsocketStreamWrapper(ws, msg['channel']), [
BytesWrap(k) for k in msg_serialized])
else:
print('unknown channel', msg['channel'])
def app(prefix='/jupyter'):
kernel = FlaskKernel.instance()
app = Flask(__name__)
@app.template_filter()
def ipywidget_view(widget):
from jinja2 import Markup, escape
import json
return Markup("""<script type="application/vnd.jupyter.widget-view+json">%s</script>""" % json.dumps(widget.get_view_spec()))
@app.template_filter()
def ipywidget_state(widgets):
from jinja2 import Markup, escape
from ipywidgets import embed as wembed
drop_defaults = True
state = wembed.dependency_state(widgets, drop_defaults=drop_defaults)
from ipywidgets import Widget
json_data = Widget.get_manager_state(widgets=[])
json_data['state'] = state
json_data_str = json.dumps(json_data, indent=' ')
snippet = wembed.snippet_template.format(
load='', widget_views='', json_data=json_data_str)
return Markup(snippet)
sockets = Sockets(app)
app.register_blueprint(http, url_prefix=prefix)
sockets.register_blueprint(websocket, url_prefix=prefix)
return app
def init(app):
kernel = FlaskKernel.instance()
sockets = Sockets(app)
| [
"[email protected]"
] | |
c0639249f7c07c28cd08a1583e8193dcd657342f | e23881d9b059f3fbe3f75a7c8c53737ed0f53545 | /Django_two_factor_auth/manage.py | 842f7f604e05ff01e4c5825614bc8edaf8790bb5 | [] | no_license | GK-SVG/Django_Boy | b1fbf9c2b3d35e38bcd2da54956476aad0f2310d | 27121c1dc70b44065cd2c5fe854335cd5d1214c5 | refs/heads/master | 2023-05-07T22:47:02.414738 | 2021-06-03T17:03:43 | 2021-06-03T17:03:43 | 308,200,818 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Django_two_factor_auth.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
85052a32ac62faefb4696f65719c5b84466465aa | facf7941a8ef5b1f3eceac59b390ef78ea18c6b8 | /EmoEstimator/utils/evaluate.py | f9134555f6cb0e1a9f4e8b46ce9fae19ab3e90b7 | [] | no_license | augustdemi/demi | efd29caa3fcccbd92b3ac4e9ba39ed910c3a75ef | 059a1bc93f9597b4db98e2c8e8c6f60d180d4fc3 | refs/heads/master | 2020-03-18T17:05:25.398744 | 2019-03-13T18:55:28 | 2019-03-13T18:55:28 | 135,005,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,475 | py | import numpy as np
import pandas as pd
pd.set_option('display.float_format', lambda x: '%.2f' % x)
def _process(y_hat, y_lab, fun):
'''
- split y_true and y_pred in lists
- removes frames where labels are unknown (-1)
- returns list of predictions
'''
y1 = [x for x in y_hat.T]
y2 = [x for x in y_lab.T]
out = []
for i, [_y1, _y2] in enumerate(zip(y1, y2)):
idx = _y2!=-1
_y1 = _y1[idx]
_y2 = _y2[idx]
if np.all(_y2==-1):
out.append(np.nan)
else:
out.append(fun(_y1,_y2))
return np.array(out)
def _acc(y_hat, y_lab):
def fun(y_hat,y_lab):
y_hat = np.round(y_hat)
y_lab = np.round(y_lab)
return np.mean(y_hat==y_lab)
return _process(y_hat, y_lab, fun)
def _mae(y_hat, y_lab):
def fun(y_hat,y_lab):
y_hat = np.float32(y_hat)
y_lab = np.float32(y_lab)
return np.mean(np.abs(y_hat-y_lab))
return _process(y_hat, y_lab, fun)
def _mse(y_hat, y_lab):
def fun(y_hat,y_lab):
y_hat = np.float32(y_hat)
y_lab = np.float32(y_lab)
return np.mean((y_hat-y_lab)**2)
return _process(y_hat, y_lab, fun)
def _rmse(y_hat, y_lab):
def fun(y_hat,y_lab):
y_hat = np.float32(y_hat)
y_lab = np.float32(y_lab)
return (np.mean((y_hat-y_lab)**2))**0.5
return _process(y_hat, y_lab, fun)
def _f1(y_hat, y_lab, threshold=1):
def fun(y_hat,y_lab):
y_hat = np.array(y_hat>=threshold)
y_lab = np.array(y_lab>=threshold)
tp = np.sum( (y_hat==1) * (y_lab==1) )
fp = np.sum( (y_hat==1) * (y_lab==0) )
fn = np.sum( (y_hat==0) * (y_lab==1) )
if tp==0:
return 0
else:
return (2*tp)/float(2*tp+fp+fn)
return _process(y_hat, y_lab, fun)
def _icc(y_hat, y_lab, cas=3, typ=1):
def fun(y_hat,y_lab):
y_hat = y_hat[None,:]
y_lab = y_lab[None,:]
Y = np.array((y_lab, y_hat))
# number of targets
n = Y.shape[2]
# mean per target
mpt = np.mean(Y, 0)
# print mpt.eval()
mpr = np.mean(Y, 2)
# print mpr.eval()
tm = np.mean(mpt, 1)
# within target sum sqrs
WSS = np.sum((Y[0]-mpt)**2 + (Y[1]-mpt)**2, 1)
# within mean sqrs
WMS = WSS/n
# between rater sum sqrs
RSS = np.sum((mpr - tm)**2, 0) * n
# between rater mean sqrs
RMS = RSS
# between target sum sqrs
TM = np.tile(tm, (y_hat.shape[1], 1)).T
BSS = np.sum((mpt - TM)**2, 1) * 2
# between targets mean squares
BMS = BSS / (n - 1)
# residual sum of squares
ESS = WSS - RSS
# residual mean sqrs
EMS = ESS / (n - 1)
if cas == 1:
if typ == 1:
res = (BMS - WMS) / (BMS + WMS)
if typ == 2:
res = (BMS - WMS) / BMS
if cas == 2:
if typ == 1:
res = (BMS - EMS) / (BMS + EMS + 2 * (RMS - EMS) / n)
if typ == 2:
res = (BMS - EMS) / (BMS + (RMS - EMS) / n)
if cas == 3:
if typ == 1:
res = (BMS - EMS) / (BMS + EMS)
if typ == 2:
res = (BMS - EMS) / BMS
res = res[0]
if np.isnan(res) or np.isinf(res):
return 0
else:
return res
return _process(y_hat, y_lab, fun)
def _pcc(y_hat, y_lab):
def fun(y1, y2):
res = np.corrcoef(y1, y2)[0, 1]
if np.isnan(res) or np.isinf(res):
return 0
else:
return res
return _process(y_hat, y_lab, fun)
def print_summary(y_hat, y_lab, log_dir=None, verbose=1, mode='max'):
assert(y_hat.shape==y_lab.shape)
# remove unlabeled frames
idx = y_lab.reshape(y_lab.shape[0],-1).max(-1)>=0
y_lab = y_lab[idx]
y_hat = y_hat[idx]
if y_hat.ndim==3:
if mode=='exp':
tmp = np.zeros(y_hat.shape[:2])
for i in range(y_hat.shape[2]):
tmp+=y_hat[:,:,i]*i
y_hat = tmp
tmp = np.zeros(y_lab.shape[:2])
for i in range(y_lab.shape[2]):
tmp+=y_lab[:,:,i]*i
y_lab = tmp
if mode=='max':
y_hat = y_hat.argmax(2)
y_lab = y_lab.argmax(2)
data = []
data.append(_icc(y_hat, y_lab))
data.append(_pcc(y_hat, y_lab))
data.append(_rmse(y_hat, y_lab))
data.append(_mae(y_hat, y_lab))
data.append(_acc(y_hat, y_lab))
data.append(_f1(y_hat, y_lab))
data = np.vstack(data)
columns = [str(i) for i in np.arange(data.shape[1])]+['avr.']
table = np.hstack((data,data.mean(1)[:,None]))
index = ['ICC','PCC','RMSE','MAE','ACC','F1-b']
t = pd.DataFrame(np.abs(table), index=index, columns = columns)
out = {
'index':index,
'columns':columns,
'data':data
}
if verbose:
print(t)
print()
if log_dir:
f = open(log_dir, 'w')
print(t, file=f)
f.close()
return out
if __name__ == "__main__":
import numpy as np
y1 = np.random.randint(0,5,[100,4])
y2 = np.random.randint(0,5,[100,4])
y1[:,0] = y2[:,0]
y1[:50,2]=-1
y2[:,3]=-1
print(_acc(y1,y2))
print(_mae(y1,y2))
print(_rmse(y1,y2))
print(_icc(y1,y2))
print(_pcc(y1,y2))
print(_f1(y1,y2))
| [
"[email protected]"
] | |
4f285ca0d361ca2986f77184b1364e48262952d5 | 6bb99b53ae72f03e4ebce2c80c3be1c13871e46f | /pyweb/web_11_framework_v3/test_case/test_bid.py | 09aadaa2667f5e987cb84621e5717cf50f64b8d1 | [] | no_license | change1q2/Learn | b9ac7085ae476f92fbf04043bda74605b723abf0 | 28e93c56c0a3aaf72006614a565fb7fff267b893 | refs/heads/master | 2021-03-15T16:31:51.641845 | 2020-04-10T14:19:23 | 2020-04-10T14:19:23 | 246,864,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,220 | py | #!/usr/bin/env python3
#-*- coding:utf-8 -*-
# email: [email protected]
# wechat: shoubian01
# author: 王雨泽
import time
import unittest
from selenium import webdriver
from data.login_data import login_data_success
from pages.index_page import IndexPage
from pages.login_page import LoginPage
class TestBid(unittest.TestCase):
def setUp(self) -> None:
"""
前置条件:
1, 登录
:return:
"""
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(20)
# 初始化页面
self.login_page = LoginPage(self.driver)
self.index_page = IndexPage(self.driver)
# 登录
login_data = login_data_success[0]
self.login_page.login(login_data['mobile'], login_data['pwd'])
def tearDown(self) -> None:
pass
def test_bid_error(self):
"测试投资失败"
time.sleep(1)
self.index_page.get()
# 如果不等待新页面出现而直接定位元素,可能找到的是上一个页面当中的元素。
self.index_page.get_element_bid().click()
print('hello')
# def test_bid_success(self):
# """投资成功"""
# pass
| [
"[email protected]"
] | |
4db4270f3fe0a646bfc6601b1a0ca5d44b124cad | 0fd5793e78e39adbfe9dcd733ef5e42390b8cc9a | /python3/13_OOP/f_Advanced/02_abc_classes.py | 698048fec9be8984002dbb3ddec5ae264a1514ff | [] | no_license | udhayprakash/PythonMaterial | 3ea282ceb4492d94d401e3bc8bad9bf6e9cfa156 | e72f44e147141ebc9bf9ec126b70a5fcdbfbd076 | refs/heads/develop | 2023-07-08T21:07:33.154577 | 2023-07-03T10:53:25 | 2023-07-03T10:53:25 | 73,196,374 | 8 | 5 | null | 2023-05-26T09:59:17 | 2016-11-08T14:55:51 | Jupyter Notebook | UTF-8 | Python | false | false | 952 | py | #!/usr/bin/python
"""
Purpose: Abstract Base classes
"""
from abc import ABC, abstractmethod, abstractproperty
class BasicCar(ABC):
modal_name: str = NotImplemented
@abstractmethod
def get_chasis_number(self):
pass
def get_car_model(self):
pass
# Solution
class RolsRoys(BasicCar):
def get_chasis_number(self):
pass
car_r = RolsRoys()
# NOTE: We cant enforce variables to be defined.
# for that we need to use property
# ----------------------------------------
class BasicCar(ABC):
@abstractmethod
def get_chasis_number(self):
pass
def get_car_model(self):
pass
@property
@abstractmethod
def modal_name(self):
pass
# NOTE: Earlier asbtractproperty is used, but deprecated in Python 3.8
# Solution
class RolsRoys(BasicCar):
def get_chasis_number(self):
pass
@property
def modal_name(self):
pass
car_r = RolsRoys()
| [
"[email protected]"
] | |
5e2198bbbaad10200ebe8913df6a6cce46ac2e95 | ddda55fcfc84ac5cd78cfc5c336a3df0b9096157 | /scripts/linux-menuconfig/menuconfig.py | d3b29c6414cf8265292e0e7ae04ab48eb6d085c9 | [
"Apache-2.0"
] | permissive | liu-delong/lu_xing_xiang_one_os | 701b74fceb82dbb2806518bfb07eb85415fab43a | 0c659cb811792f2e190d5a004a531bab4a9427ad | refs/heads/master | 2023-06-17T03:02:13.426431 | 2021-06-28T08:12:41 | 2021-06-28T08:12:41 | 379,661,507 | 2 | 2 | Apache-2.0 | 2021-06-28T10:08:10 | 2021-06-23T16:11:54 | C | UTF-8 | Python | false | false | 1,160 | py | # -*- coding:utf-8 -*-
#
# File : menuconfig.py
# This file is part of OneOS RTOS
#
import os
import sys
import argparse
import platform
import cmd_menuconfig
__version__ = 'OneOS packages v1.1.0'
def main():
bsp_root = os.getcwd()
os_root = os.path.join(bsp_root, "../..")
script_root = os.path.split(os.path.realpath(__file__))[0]
sys.path = sys.path + [os.path.join(script_root)]
try:
bsp_root.encode().decode("ascii")
except Exception as e:
if platform.system() == "Windows":
os.system('chcp 65001 > nul')
print ("\n\033[1;31;40m警告:\033[0m")
print ("\033[1;31;40m当前路径不支持非英文字符,请修改当前路径为纯英文路径。\033[0m")
print ("\033[1;31;40mThe current path does not support non-English characters.\033[0m")
print ("\033[1;31;40mPlease modify the current path to a pure English path.\033[0m")
print(bsp_root)
if platform.system() == "Windows":
os.system('chcp 437 > nul')
return False
cmd_menuconfig.cmd()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
5cb3a34105484efbd75167a50e66fe5ffcfd574a | 395f974e62eafed74572efebcd91d62966e61639 | /examples/microjson/mutants/CRP_Num_mutant_1486201375.py | 0091defae4453189f698e149e283597dd42ec271 | [
"Apache-2.0"
] | permissive | agroce/tstl | ad386d027f0f5ff750eab19a722a4b119ed39211 | 8d43ef7fa49534868e6cdf1697863748260405c7 | refs/heads/master | 2023-08-08T19:14:52.020314 | 2023-07-26T17:51:36 | 2023-07-26T17:51:36 | 32,408,285 | 106 | 33 | NOASSERTION | 2021-01-26T19:05:17 | 2015-03-17T17:14:04 | Python | UTF-8 | Python | false | false | 8,312 | py | import math
import StringIO
import types
__pychecker__ = 'no-returnvalues'
WS = set([' ', '\t', '\r', '\n', '\x08', '\x0c'])
DIGITS = set([str(i) for i in range(0, 10)])
NUMSTART = DIGITS.union(['.', '-', '+'])
NUMCHARS = NUMSTART.union(['e', 'E'])
ESC_MAP = {'n': '\n', 't': '\t', 'r': '\r', 'b': '\x08', 'f': '\x0c'}
REV_ESC_MAP = dict([(_v, _k) for (_k, _v) in ESC_MAP.items()] + [('"', '"')])
E_BYTES = 'input string must be type str containing ASCII or UTF-8 bytes'
E_MALF = 'malformed JSON data'
E_TRUNC = 'truncated JSON data'
E_BOOL = 'expected boolean'
E_NULL = 'expected null'
E_LITEM = 'expected list item'
E_DKEY = 'expected key'
E_COLON = 'missing colon after key'
E_EMPTY = 'found empty string, not valid JSON data'
E_BADESC = 'bad escape character found'
E_UNSUPP = 'unsupported type "%s" cannot be JSON-encoded'
E_BADFLOAT = 'cannot emit floating point value "%s"'
NEG_INF = float('-inf')
POS_INF = float('inf')
class JSONError(Exception):
def __init__(self, msg, stm=None, pos=0):
if stm:
msg += ' at position %d, "%s"' % (pos, repr(stm.substr(pos, 32)))
Exception.__init__(self, msg)
class JSONStream(object):
def __init__(self, data):
self._stm = StringIO.StringIO(data)
@property
def pos(self):
return self._stm.pos
@property
def len(self):
return self._stm.len
def getvalue(self):
return self._stm.getvalue()
def skipspaces(self):
'post-cond: read pointer will be over first non-WS char'
self._skip(lambda c: (c not in WS))
def _skip(self, stopcond):
while True:
c = self.peek()
if (stopcond(c) or (c == '')):
break
self.next()
def next(self, size=1):
return self._stm.read(size)
def next_ord(self):
return ord(self.next())
def peek(self):
if (self.pos == self.len):
return ''
return self.getvalue()[self.pos]
def substr(self, pos, length):
return self.getvalue()[pos:pos + length]
def _decode_utf8(c0, stm):
c0 = ord(c0)
r = 65533
nc = stm.next_ord
if (c0 & 224 == 192):
r = c0 & 31 << 6 + nc() & 63
elif (c0 & 240 == 224):
r = c0 & 15 << 12 + nc() & 63 << 6 + nc() & 63
elif (c0 & 248 == 240):
r = c0 & 7 << 18 + nc() & 63 << 12 + nc() & 63 << 6 + nc() & 63
return unichr(r)
def decode_escape(c, stm):
v = ESC_MAP.get(c, None)
if (v is not None):
return v
elif (c != 'u'):
return c
sv = 12
r = 0
for _ in range(0, 4):
r |= int(stm.next(), 16) << sv
sv -= 4
return unichr(r)
def _from_json_string(stm):
stm.next()
r = []
while True:
c = stm.next()
if (c == ''):
raiseJSONError(E_TRUNC, stm, stm.pos - 1)
elif (c == '\\'):
c = stm.next()
r.append(decode_escape(c, stm))
elif (c == '"'):
return ''.join(r)
elif (c > '\x7f'):
r.append(_decode_utf8(c, stm))
else:
r.append(c)
def _from_json_fixed(stm, expected, value, errmsg):
off = len(expected)
pos = stm.pos
if (stm.substr(pos, off) == expected):
stm.next(off)
return value
raiseJSONError(errmsg, stm, pos)
def _from_json_number(stm):
is_float = 0
saw_exp = 0
pos = stm.pos
while True:
c = stm.peek()
if (c not in NUMCHARS):
break
elif ((c == '-') and (not saw_exp)):
pass
elif (c in ('.', 'e', 'E')):
is_float = 1
if (c in ('e', 'E')):
saw_exp = 1
stm.next()
s = stm.substr(pos, stm.pos - pos)
if is_float:
return float(s)
return long(s)
def _from_json_list(stm):
stm.next()
result = []
pos = stm.pos
while True:
stm.skipspaces()
c = stm.peek()
if (c == ''):
raiseJSONError(E_TRUNC, stm, pos)
elif (c == ']'):
stm.next()
return result
elif (c == ','):
stm.next()
result.append(_from_json_raw(stm))
continue
elif (not result):
result.append(_from_json_raw(stm))
continue
else:
raiseJSONError(E_MALF, stm, stm.pos)
def _from_json_dict(stm):
stm.next()
result = {}
expect_key = 1
pos = stm.pos
while True:
stm.skipspaces()
c = stm.peek()
if (c == ''):
raiseJSONError(E_TRUNC, stm, pos)
if (c in ('}', ',')):
stm.next()
if expect_key:
raiseJSONError(E_DKEY, stm, stm.pos)
if (c == '}'):
return result
expect_key = 1
continue
elif (c == '"'):
key = _from_json_string(stm)
stm.skipspaces()
c = stm.next()
if (c != ':'):
raiseJSONError(E_COLON, stm, stm.pos)
stm.skipspaces()
val = _from_json_raw(stm)
result[key] = val
expect_key = 0
continue
raiseJSONError(E_MALF, stm, stm.pos)
def _from_json_raw(stm):
while True:
stm.skipspaces()
c = stm.peek()
if (c == '"'):
return _from_json_string(stm)
elif (c == '{'):
return _from_json_dict(stm)
elif (c == '['):
return _from_json_list(stm)
elif (c == 't'):
return _from_json_fixed(stm, 'true', True, E_BOOL)
elif (c == 'f'):
return _from_json_fixed(stm, 'false', False, E_BOOL)
elif (c == 'n'):
return _from_json_fixed(stm, 'null', None, E_NULL)
elif (c in NUMSTART):
return _from_json_number(stm)
raiseJSONError(E_MALF, stm, stm.pos)
def from_json(data):
"\n Converts 'data' which is UTF-8 (or the 7-bit pure ASCII subset) into\n a Python representation. You must pass bytes to this in a str type,\n not unicode.\n "
if (not isinstance(data, str)):
raiseJSONError(E_BYTES)
if (not data):
return None
stm = JSONStream(data)
return _from_json_raw(stm)
def _to_json_list(stm, lst):
seen = 0
stm.write('[')
for elem in lst:
if seen:
stm.write(',')
seen = 1
_to_json_object(stm, elem)
stm.write(']')
def _to_json_string(stm, buf):
stm.write('"')
for c in buf:
nc = REV_ESC_MAP.get(c, None)
if nc:
stm.write('\\' + nc)
elif (ord(c) <= 127):
stm.write(str(c))
else:
stm.write('\\u%04x' % ord(c))
stm.write('"')
def _to_json_dict(stm, dct):
seen = 0
stm.write('{')
for key in dct.keys():
if seen:
stm.write(',')
seen = 1
val = dct[key]
if (not (type(key) in (types.StringType, types.UnicodeType))):
key = str(key)
_to_json_string(stm, key)
stm.write(':')
_to_json_object(stm, val)
stm.write('}')
def _to_json_object(stm, obj):
if isinstance(obj, (types.ListType, types.TupleType)):
_to_json_list(stm, obj)
elif isinstance(obj, types.BooleanType):
if obj:
stm.write('true')
else:
stm.write('false')
elif isinstance(obj, types.FloatType):
if (not (NEG_INF < obj < POS_INF)):
raiseJSONError(E_BADFLOAT % obj)
stm.write('%s' % obj)
elif isinstance(obj, (types.IntType, types.LongType)):
stm.write('%d' % obj)
elif isinstance(obj, types.NoneType):
stm.write('null')
elif isinstance(obj, (types.StringType, types.UnicodeType)):
_to_json_string(stm, obj)
elif (hasattr(obj, 'keys') and hasattr(obj, '__getitem__')):
_to_json_dict(stm, obj)
elif hasattr(obj, '__unicode__'):
_to_json_string(stm, obj.__unicode__())
elif hasattr(obj, '__str__'):
_to_json_string(stm, obj.__str__())
else:
raiseJSONError(E_UNSUPP % type(obj))
def to_json(obj):
"\n Converts 'obj' to an ASCII JSON string representation.\n "
stm = StringIO.StringIO('')
_to_json_object(stm, obj)
return stm.getvalue()
decode = from_json
encode = to_json | [
"[email protected]"
] | |
79b15adf19e99c7c49e5040691a05f0842aedc20 | 09ac5476e94122bf8ccdb0b404175dff0820c8a7 | /283 移动零.py | de5fd9bb96328ff663044152764e0112c737d6ec | [] | no_license | wxke/LeetCode-python | df27c456ad0c7042e3bfcf2a697e3958d3b85f1f | 37a66e426e9c7e279928d2f6fcdecb9641f4121c | refs/heads/master | 2020-04-29T14:03:22.554357 | 2020-03-10T12:38:24 | 2020-03-10T12:38:24 | 176,185,448 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | 移动零
class Solution:
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
n=nums.count(0)
for i in range(n):
nums.remove(0)
for i in range(n):
nums.append(0)
| [
"[email protected]"
] | |
0556703511c977fa66d8a51c13ed395a5a309986 | 0da6e1000e071d97822ffe5d84efa7f998d72ae8 | /2021-03-08-Introduction-to-Python/examples/14-matmul.py | 6b0863741e3b60985b2abf63859843e460fa769c | [
"BSD-3-Clause"
] | permissive | s3rvac/talks | 1c3dfec03d1f798125a50438b26aa8daf1f86b65 | 5e76250ee98424c090fdfbf3c1a2a92f36ccaca6 | refs/heads/master | 2023-05-12T01:13:03.365621 | 2023-05-05T17:32:27 | 2023-05-05T17:32:27 | 84,107,862 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | # @ is an operator for matrix multiplication since Python 3.5 (__matmul__).
#
# Requires numpy (http://www.numpy.org/).
import numpy as np
A = np.matrix('4 1; 9 3')
B = np.matrix('5 1; 3 8')
# Prints
#
# [[23 12]
# [54 33]]
#
print(A @ B)
| [
"[email protected]"
] | |
7fa6c925a941e1607fa873947b412cbf1688cefe | f8f8651ab604acc4937f8725caadaca1fb97a5e8 | /src/lightning_app/cli/commands/app_commands.py | 0b08538e76ba6d2c9bab478aa86190a00f8b3954 | [
"Apache-2.0"
] | permissive | neptune-ai/pytorch-lightning | ac59e746a486e07e21abae426b28e5d72812ac98 | 702014418e2ec0437e67d8bf97809edef686a02c | refs/heads/master | 2022-09-28T09:34:07.653729 | 2022-09-12T11:13:48 | 2022-09-12T11:13:48 | 229,063,811 | 1 | 1 | Apache-2.0 | 2022-09-26T03:29:49 | 2019-12-19T13:48:16 | Python | UTF-8 | Python | false | false | 3,430 | py | import os
import sys
from typing import Dict, Optional
import requests
from lightning_app.cli.commands.connection import _resolve_command_path
from lightning_app.utilities.cli_helpers import _retrieve_application_url_and_available_commands
from lightning_app.utilities.commands.base import _download_command
from lightning_app.utilities.enum import OpenAPITags
def _run_app_command(app_name: str, app_id: Optional[str]):
"""Execute a function in a running App from its name."""
# 1: Collect the url and comments from the running application
url, api_commands, _ = _retrieve_application_url_and_available_commands(app_id)
if url is None or api_commands is None:
raise Exception("We couldn't find any matching running App.")
if not api_commands:
raise Exception("This application doesn't expose any commands yet.")
full_command = "_".join(sys.argv)
has_found = False
for command in list(api_commands):
if command in full_command:
has_found = True
break
if not has_found:
raise Exception(f"The provided command isn't available in {list(api_commands)}")
# 2: Send the command from the user
metadata = api_commands[command]
# 3: Execute the command
if metadata["tag"] == OpenAPITags.APP_COMMAND:
_handle_command_without_client(command, metadata, url)
else:
_handle_command_with_client(command, metadata, app_name, app_id, url)
if sys.argv[-1] != "--help":
print("Your command execution was successful.")
def _handle_command_without_client(command: str, metadata: Dict, url: str) -> None:
supported_params = list(metadata["parameters"])
if "--help" == sys.argv[-1]:
print(f"Usage: lightning {command} [ARGS]...")
print(" ")
print("Options")
for param in supported_params:
print(f" {param}: Add description")
return
provided_params = [param.replace("--", "") for param in sys.argv[1 + len(command.split("_")) :]]
# TODO: Add support for more argument types.
if any("=" not in param for param in provided_params):
raise Exception("Please, use --x=y syntax when providing the command arguments.")
if any(param.split("=")[0] not in supported_params for param in provided_params):
raise Exception(f"Some arguments need to be provided. The keys are {supported_params}.")
# TODO: Encode the parameters and validate their type.
query_parameters = "&".join(provided_params)
resp = requests.post(url + f"/command/{command}?{query_parameters}")
assert resp.status_code == 200, resp.json()
def _handle_command_with_client(command: str, metadata: Dict, app_name: str, app_id: Optional[str], url: str):
debug_mode = bool(int(os.getenv("DEBUG", "0")))
if app_name == "localhost":
target_file = metadata["cls_path"]
else:
target_file = _resolve_command_path(command) if debug_mode else _resolve_command_path(command)
if debug_mode:
print(target_file)
client_command = _download_command(
command,
metadata["cls_path"],
metadata["cls_name"],
app_id,
debug_mode=debug_mode,
target_file=target_file if debug_mode else _resolve_command_path(command),
)
client_command._setup(command_name=command, app_url=url)
sys.argv = sys.argv[len(command.split("_")) :]
client_command.run()
| [
"[email protected]"
] | |
9c08d98c2e1c10b1d3156cdc716e1f61bdac4ecd | b0549c720ffc7222c1b159db601d083f4422232f | /aib/init/tables/dir_companies.py | 5b540cdf12644b9121d40b33a113ed766f872a24 | [
"MIT"
] | permissive | FrankMillman/AccInABox | e7f6fd84caca27e3c4871b23b104cfd9de2150b3 | 3f2fc881cc9ee3e9e27022d90c90a7141fc59588 | refs/heads/develop | 2023-06-26T08:32:48.319840 | 2023-06-18T07:14:10 | 2023-06-18T07:14:10 | 23,425,845 | 3 | 1 | NOASSERTION | 2020-01-03T07:12:47 | 2014-08-28T11:43:13 | Python | UTF-8 | Python | false | false | 3,758 | py | # table definition
table = {
'table_name' : 'dir_companies',
'module_id' : 'dir',
'short_descr' : 'Companies',
'long_descr' : 'Directory of companies',
'sub_types' : None,
'sub_trans' : None,
'sequence' : None,
'tree_params' : None,
'roll_params' : None,
'indexes' : None,
'ledger_col' : None,
'defn_company' : None,
'data_company' : None,
'read_only' : False,
}
# column definitions
cols = []
cols.append ({
'col_name' : 'row_id',
'data_type' : 'AUTO',
'short_descr': 'Row id',
'long_descr' : 'Row id',
'col_head' : 'Row',
'key_field' : 'Y',
'data_source': 'gen',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'created_id',
'data_type' : 'INT',
'short_descr': 'Created id',
'long_descr' : 'Created row id',
'col_head' : 'Created',
'key_field' : 'N',
'data_source': 'gen',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : '0',
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'deleted_id',
'data_type' : 'INT',
'short_descr': 'Deleted id',
'long_descr' : 'Deleted row id',
'col_head' : 'Deleted',
'key_field' : 'N',
'data_source': 'gen',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : '0',
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'company_id',
'data_type' : 'TEXT',
'short_descr': 'Company id',
'long_descr' : 'Company id',
'col_head' : 'Company',
'key_field' : 'A',
'data_source': 'input',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 15,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'company_name',
'data_type' : 'TEXT',
'short_descr': 'Company name',
'long_descr' : 'Company name',
'col_head' : 'Name',
'key_field' : 'N',
'data_source': 'input',
'condition' : None,
'allow_null' : False,
'allow_amend': True,
'max_len' : 30,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
# virtual column definitions
virt = []
# cursor definitions
cursors = []
cursors.append({
'cursor_name': 'companies',
'title': 'Maintain companies',
'columns': [
['company_id', 100, False, False],
['company_name', 260, True, False],
],
'filter': [],
'sequence': [['company_id', False]],
})
# actions
actions = []
actions.append([
'del_checks',
[
[
'not_sys',
'Cannot delete _sys',
[
['check', '', 'company_id', '!=', "'_sys'", ''],
],
],
],
])
actions.append([
'after_insert', '<create_company/>'
])
actions.append([
'after_commit', '<pyfunc name="db.cache.company_changed"/>'
])
| [
"[email protected]"
] | |
18cedc9c8cb002b7f8892e0fcdfd09d244337590 | ee974d693ca4c4156121f8cb385328b52eaac07c | /env/share/doc/networkx-2.3/examples/drawing/plot_house_with_colors.py | 68cb5d35963bc4da741f8e85b2e81230e2bb2533 | [
"BSD-3-Clause"
] | permissive | ngonhi/Attendance_Check_System_with_Face_Recognition | f4531cc4dee565d0e45c02217f73f3eda412b414 | 92ff88cbc0c740ad48e149033efd38137c9be88d | refs/heads/main | 2023-03-12T07:03:25.302649 | 2021-02-26T15:37:33 | 2021-02-26T15:37:33 | 341,493,686 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:1063c25d17b4cbafe673c0f1acef5d9c1c6b29ef79501dfe0f10cec8d2767104
size 649
| [
"Nqk180998!"
] | Nqk180998! |
87c0b4da7e0f00c5b57efd1715fc9d504ce61440 | 81c344b8df43ed550cb9496c664a8de2687eda3e | /venv/lib/python3.8/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_webfilter_fortiguard.py | 732bef9974bbdafa4bbcdd29b6f9cf23076aee3d | [] | no_license | anhdoan-ntt/cisco-aci | dc0e52b6d19ee0bafb2b24e0febe955952bf39ef | 185be6d6f13eabd65fb0ff328ea54f6507ccf0d4 | refs/heads/main | 2022-12-20T00:07:27.465096 | 2020-10-05T08:15:29 | 2020-10-05T08:15:29 | 300,500,699 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,490 | py | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_webfilter_fortiguard
short_description: Configure FortiGuard Web Filter service in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify webfilter feature and fortiguard category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.0
version_added: "2.8"
author:
- Link Zheng (@chillancezen)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Jie Xue (@JieX19)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
webfilter_fortiguard:
description:
- Configure FortiGuard Web Filter service.
default: null
type: dict
suboptions:
cache_mem_percent:
description:
- Maximum percentage of available memory allocated to caching (1 - 15%).
type: int
cache_mode:
description:
- Cache entry expiration mode.
type: str
choices:
- ttl
- db-ver
cache_prefix_match:
description:
- Enable/disable prefix matching in the cache.
type: str
choices:
- enable
- disable
close_ports:
description:
- Close ports used for HTTP/HTTPS override authentication and disable user overrides.
type: str
choices:
- enable
- disable
ovrd_auth_https:
description:
- Enable/disable use of HTTPS for override authentication.
type: str
choices:
- enable
- disable
ovrd_auth_port:
description:
- Port to use for FortiGuard Web Filter override authentication.
type: int
ovrd_auth_port_http:
description:
- Port to use for FortiGuard Web Filter HTTP override authentication
type: int
ovrd_auth_port_https:
description:
- Port to use for FortiGuard Web Filter HTTPS override authentication.
type: int
ovrd_auth_port_warning:
description:
- Port to use for FortiGuard Web Filter Warning override authentication.
type: int
request_packet_size_limit:
description:
- Limit size of URL request packets sent to FortiGuard server (0 for default).
type: int
warn_auth_https:
description:
- Enable/disable use of HTTPS for warning and authentication.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: fortigates
collections:
- fortinet.fortios
connection: httpapi
vars:
vdom: "root"
ansible_httpapi_use_ssl: yes
ansible_httpapi_validate_certs: no
ansible_httpapi_port: 443
tasks:
- name: Configure FortiGuard Web Filter service.
fortios_webfilter_fortiguard:
vdom: "{{ vdom }}"
webfilter_fortiguard:
cache_mem_percent: "3"
cache_mode: "ttl"
cache_prefix_match: "enable"
close_ports: "enable"
ovrd_auth_https: "enable"
ovrd_auth_port: "8"
ovrd_auth_port_http: "9"
ovrd_auth_port_https: "10"
ovrd_auth_port_warning: "11"
request_packet_size_limit: "12"
warn_auth_https: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_webfilter_fortiguard_data(json):
option_list = ['cache_mem_percent', 'cache_mode', 'cache_prefix_match',
'close_ports', 'ovrd_auth_https', 'ovrd_auth_port',
'ovrd_auth_port_http', 'ovrd_auth_port_https', 'ovrd_auth_port_warning',
'request_packet_size_limit', 'warn_auth_https']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def webfilter_fortiguard(data, fos):
vdom = data['vdom']
webfilter_fortiguard_data = data['webfilter_fortiguard']
filtered_data = underscore_to_hyphen(filter_webfilter_fortiguard_data(webfilter_fortiguard_data))
return fos.set('webfilter',
'fortiguard',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_webfilter(data, fos):
if data['webfilter_fortiguard']:
resp = webfilter_fortiguard(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success" and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"webfilter_fortiguard": {
"required": False, "type": "dict", "default": None,
"options": {
"cache_mem_percent": {"required": False, "type": "int"},
"cache_mode": {"required": False, "type": "str",
"choices": ["ttl",
"db-ver"]},
"cache_prefix_match": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"close_ports": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"ovrd_auth_https": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"ovrd_auth_port": {"required": False, "type": "int"},
"ovrd_auth_port_http": {"required": False, "type": "int"},
"ovrd_auth_port_https": {"required": False, "type": "int"},
"ovrd_auth_port_warning": {"required": False, "type": "int"},
"request_packet_size_limit": {"required": False, "type": "int"},
"warn_auth_https": {"required": False, "type": "str",
"choices": ["enable",
"disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
versions_check_result = None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_webfilter(module.params, fos)
versions_check_result = connection.get_system_version()
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_webfilter(module.params, fos)
fos.logout()
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and galaxy, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
75580a359943e23211a8e016e753aacb53bf89b1 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/django_django/django-master/django/contrib/gis/geoip2/base.py | 545bd12a25217ca1b9da5725845c1a05f0d8aae0 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 8,969 | py | import os
import socket
import geoip2.database
from django.conf import settings
from django.core.validators import ipv4_re
from django.utils.ipv6 import is_valid_ipv6_address
from .resources import City, Country
# Creating the settings dictionary with any settings, if needed.
GEOIP_SETTINGS = {
'GEOIP_PATH': getattr(settings, 'GEOIP_PATH', None),
'GEOIP_CITY': getattr(settings, 'GEOIP_CITY', 'GeoLite2-City.mmdb'),
'GEOIP_COUNTRY': getattr(settings, 'GEOIP_COUNTRY', 'GeoLite2-Country.mmdb'),
}
class GeoIP2Exception(Exception):
pass
class GeoIP2:
# The flags for GeoIP memory caching.
# Try MODE_MMAP_EXT, MODE_MMAP, MODE_FILE in that order.
MODE_AUTO = 0
# Use the C extension with memory map.
MODE_MMAP_EXT = 1
# Read from memory map. Pure Python.
MODE_MMAP = 2
# Read database as standard file. Pure Python.
MODE_FILE = 4
# Load database into memory. Pure Python.
MODE_MEMORY = 8
cache_options = {opt: None for opt in (0, 1, 2, 4, 8)}
# Paths to the city & country binary databases.
_city_file = ''
_country_file = ''
# Initially, pointers to GeoIP file references are NULL.
_city = None
_country = None
def __init__(self, path=None, cache=0, country=None, city=None):
"""
Initialize the GeoIP object. No parameters are required to use default
settings. Keyword arguments may be passed in to customize the locations
of the GeoIP datasets.
* path: Base directory to where GeoIP data is located or the full path
to where the city or country data files (*.mmdb) are located.
Assumes that both the city and country data sets are located in
this directory; overrides the GEOIP_PATH setting.
* cache: The cache settings when opening up the GeoIP datasets. May be
an integer in (0, 1, 2, 4, 8) corresponding to the MODE_AUTO,
MODE_MMAP_EXT, MODE_MMAP, MODE_FILE, and MODE_MEMORY,
`GeoIPOptions` C API settings, respectively. Defaults to 0,
meaning MODE_AUTO.
* country: The name of the GeoIP country data file. Defaults to
'GeoLite2-Country.mmdb'; overrides the GEOIP_COUNTRY setting.
* city: The name of the GeoIP city data file. Defaults to
'GeoLite2-City.mmdb'; overrides the GEOIP_CITY setting.
"""
# Checking the given cache option.
if cache in self.cache_options:
self._cache = cache
else:
raise GeoIP2Exception('Invalid GeoIP caching option: %s' % cache)
# Getting the GeoIP data path.
if not path:
path = GEOIP_SETTINGS['GEOIP_PATH']
if not path:
raise GeoIP2Exception('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
if not isinstance(path, str):
raise TypeError('Invalid path type: %s' % type(path).__name__)
if os.path.isdir(path):
# Constructing the GeoIP database filenames using the settings
# dictionary. If the database files for the GeoLite country
# and/or city datasets exist, then try to open them.
country_db = os.path.join(path, country or GEOIP_SETTINGS['GEOIP_COUNTRY'])
if os.path.isfile(country_db):
self._country = geoip2.database.Reader(country_db, mode=cache)
self._country_file = country_db
city_db = os.path.join(path, city or GEOIP_SETTINGS['GEOIP_CITY'])
if os.path.isfile(city_db):
self._city = geoip2.database.Reader(city_db, mode=cache)
self._city_file = city_db
elif os.path.isfile(path):
# Otherwise, some detective work will be needed to figure out
# whether the given database path is for the GeoIP country or city
# databases.
reader = geoip2.database.Reader(path, mode=cache)
db_type = reader.metadata().database_type
if db_type.endswith('City'):
# GeoLite City database detected.
self._city = reader
self._city_file = path
elif db_type.endswith('Country'):
# GeoIP Country database detected.
self._country = reader
self._country_file = path
else:
raise GeoIP2Exception('Unable to recognize database edition: %s' % db_type)
else:
raise GeoIP2Exception('GeoIP path must be a valid file or directory.')
@property
def _reader(self):
if self._country:
return self._country
else:
return self._city
@property
def _country_or_city(self):
if self._country:
return self._country.country
else:
return self._city.city
def __del__(self):
# Cleanup any GeoIP file handles lying around.
if self._reader:
self._reader.close()
def __repr__(self):
meta = self._reader.metadata()
version = '[v%s.%s]' % (meta.binary_format_major_version, meta.binary_format_minor_version)
return '<%(cls)s %(version)s _country_file="%(country)s", _city_file="%(city)s">' % {
'cls': self.__class__.__name__,
'version': version,
'country': self._country_file,
'city': self._city_file,
}
def _check_query(self, query, country=False, city=False, city_or_country=False):
"Check the query and database availability."
# Making sure a string was passed in for the query.
if not isinstance(query, str):
raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__)
# Extra checks for the existence of country and city databases.
if city_or_country and not (self._country or self._city):
raise GeoIP2Exception('Invalid GeoIP country and city data files.')
elif country and not self._country:
raise GeoIP2Exception('Invalid GeoIP country data file: %s' % self._country_file)
elif city and not self._city:
raise GeoIP2Exception('Invalid GeoIP city data file: %s' % self._city_file)
# Return the query string back to the caller. GeoIP2 only takes IP addresses.
if not (ipv4_re.match(query) or is_valid_ipv6_address(query)):
query = socket.gethostbyname(query)
return query
def city(self, query):
"""
Return a dictionary of city information for the given IP address or
Fully Qualified Domain Name (FQDN). Some information in the dictionary
may be undefined (None).
"""
enc_query = self._check_query(query, city=True)
return City(self._city.city(enc_query))
def country_code(self, query):
"Return the country code for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
return self.country(enc_query)['country_code']
def country_name(self, query):
"Return the country name for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
return self.country(enc_query)['country_name']
def country(self, query):
"""
Return a dictionary with the country code and name when given an
IP address or a Fully Qualified Domain Name (FQDN). For example, both
'24.124.1.80' and 'djangoproject.com' are valid parameters.
"""
# Returning the country code and name
enc_query = self._check_query(query, city_or_country=True)
return Country(self._country_or_city(enc_query))
# #### Coordinate retrieval routines ####
def coords(self, query, ordering=('longitude', 'latitude')):
cdict = self.city(query)
if cdict is None:
return None
else:
return tuple(cdict[o] for o in ordering)
def lon_lat(self, query):
"Return a tuple of the (longitude, latitude) for the given query."
return self.coords(query)
def lat_lon(self, query):
"Return a tuple of the (latitude, longitude) for the given query."
return self.coords(query, ('latitude', 'longitude'))
def geos(self, query):
"Return a GEOS Point object for the given query."
ll = self.lon_lat(query)
if ll:
from django.contrib.gis.geos import Point
return Point(ll, srid=4326)
else:
return None
# #### GeoIP Database Information Routines ####
@property
def info(self):
"Return information about the GeoIP library and databases in use."
meta = self._reader.metadata()
return 'GeoIP Library:\n\t%s.%s\n' % (meta.binary_format_major_version, meta.binary_format_minor_version)
@classmethod
def open(cls, full_path, cache):
return GeoIP2(full_path, cache)
| [
"[email protected]"
] | |
461b635b955e33ca50eb9eb4f5cd167f0ab81b3a | 368be25e37bafa8cc795f7c9f34e4585e017091f | /.history/app_fav_books/views_20201115192231.py | f4f483905ce72aa6aed71af7ca372af90704fdb2 | [] | no_license | steven-halla/fav_books_proj | ebcfbfda0e7f3cdc49d592c86c633b1d331da513 | 512005deb84ac906c9f24d4ab0939bd0db096716 | refs/heads/master | 2023-03-30T09:37:38.016063 | 2021-04-02T20:27:22 | 2021-04-02T20:27:22 | 354,125,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,180 | py | from django.shortcuts import render, redirect
from .models import *
from django.contrib import messages
# contains user signup + login form
def view_index(request):
# bonus, if user is already logged in, lets not show them login/registration page,
# and instead redirect them to /books, which is already where we redirect users
# after they login/register.
if 'user_id' in request.session:
return redirect("/books")
return render(request, "index.html")
# user signup form will post to a url (/register) which maps to this function
def register_new_user(request):
# returns a dictionary of errors.
# e.g. errors['first_name'] = 'letters only'
errors = User.objects.user_registration_validator(request.POST)
# iterate over each error (key/value) pair in the errors dictionary
# and take the error key and value and makes a full error message,
# and then adds the error message via messages.error()
if len(errors) > 0:
for key, value in errors.items():
error_msg = key + ' - ' + value
messages.error(request, error_msg)
return redirect("/")
else:
first_name_from_post = request.POST['first_name']
last_name_from_post = request.POST['last_name']
email_from_post = request.POST['email']
password_from_post = request.POST['password']
new_user = User.objects.create(
first_name=first_name_from_post,
last_name=last_name_from_post,
email=email_from_post,
password=password_from_post
)
print(new_user.id)
request.session['user_id'] = new_user.id
return redirect('/books')
def login(request):
# user did provide email/password, now lets check database
email_from_post = request.POST['email']
password_from_post = request.POST['password']
# this will return all users that have the email_from_post
# in future we should require email to be unique
users = User.objects.filter(email=email_from_post)
if len(users) == 0:
messages.error(request, "email/password does not exist")
return redirect("/")
user = users[0]
print(user)
# check that the user submitted password is the same as what we have stored in the database
if (user.password != password_from_post):
messages.error(request, "email/password does not exist")
return redirect("/")
# we store the logged in user's id in the session variable,
# so that we can quickly get the current logged in user's id any time we need it in back end functions.
# e.g. view_books when we look up the user by: User.objects.get(id=request.session['user_id'])
# session variables are shared accors all of my requests
# LEARN
request.session['user_id'] = user.id
return redirect("/books")
def logout(request):
request.session.clear()
return redirect("/")
# this will render view_books.html page.
# this page will show a list of all the books and the current logged in user.
def view_books(request):
if 'user_id' not in request.session:
return redirect("/")
user = User.objects.get(id=request.session['user_id'])
all_books_from_db = Books.objects.all()
context = {
"user": user,
"all_books": all_books_from_db
}
return render(request, "view_books.html", context)
# this will render view_book.html page.
# this page will show a single book and the current logged in user.
def view_book(request, book_id):
if 'user_id' not in request.session:
return redirect("/")
user = User.objects.get(id=request.session['user_id'])
book_from_db = Books.objects.get(id=book_id)
context = {
"user": user,
"book": book_from_db
}
print(book_from_db.id)
return render(request, "view_book.html", context)
# adds new book to database that you like
def add_book(request):
if 'user_id' not in request.session:
return redirect("/")
errors = Books.objects.add_book_validator(request.POST)
print(errors)
if len(errors) > 0:
for key, value in errors.items():
error_msg = key + ' - ' + value
messages.error(request, error_msg)
return redirect("/books")
# current logged in user
current_user = User.objects.get(id=request.session['user_id'])
title_from_post = request.POST['title']
description_from_post = request.POST['desc']
book = Books.objects.create(
title=title_from_post,
desc=description_from_post,
uploaded_by_id=current_user.id,
)
print(book)
book.users_who_favorite.add(current_user)
return redirect("/books")
# favorite a book that you did not upload
def favorite_book(request, book_id):
if 'user_id' not in request.session:
return redirect("/")
book_from_db = Books.objects.get(id=book_id)
user_from_db = User.objects.get(id=request.session['user_id'])
# TODO if user has already added book as favorite, just return, don't re-add
book_from_db.users_who_favorite.add(user_from_db)
book_from_db.save()
return redirect("/books/" + str(book_id))
#this will edit the description of the book and redirect back to book page
def edit_book(request, book_id):
errors = Books.objects.add_book_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect("/books/" + str(book_id) + "/edit")
book_to_update = Books.objects.get(id=book_id)
book_to_update.title = request.POST['title']
book_to_update.desc = request.POST['desc']
book_to_update.save()
return redirect("/books/" + str(book_id))
#delete a book from the db but only if you uploaded it
def delete_book(request, book_id):
this_book = Books.objects.get(id=book_id)
this_book.delete()
return redirect("/books")
#removes a book from the favorite list of the user
def unfav_book(request, book_id):
this_book = Books.objects.get(id=book_id)
this_book.uploaded_by = False
this
return redirect("/books/" + str(book_id))
| [
"[email protected]"
] | |
2afb20a8d6138518efc06a6055d56149e339e7ab | 7d44745a63b5f470e718be3b02b08a2e4c90ff45 | /205IsomorphicStrings.py | d249d951cd37eef228fdd2d9612887bd285c7a92 | [] | no_license | SixingYan/algorithm | 20895471baca1b77d3dbe4a3310cc3789dc10c78 | 25b20d03b5613b731ac07baad1073daa3955113b | refs/heads/master | 2020-03-25T01:00:42.908903 | 2019-12-14T02:04:51 | 2019-12-14T02:04:51 | 143,217,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,716 | py | """
Given two strings s and t, determine if they are isomorphic.
Two strings are isomorphic if the characters in s can be replaced to get t.
All occurrences of a character must be replaced with another character while preserving the order of characters. No two characters may map to the same character but a character may map to itself.
Example 1:
Input: s = "egg", t = "add"
Output: true
Example 2:
Input: s = "foo", t = "bar"
Output: false
Example 3:
Input: s = "paper", t = "title"
Output: true
Note:
You may assume both s and t have the same length.
"""
"""
Comments
"""
"""
My
"""
class Solution(object):
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
return self.analysis(list(s)) == self.analysis(list(t))
def analysis(self, s):
arr = []
d = {}
idx = 0
for i in range(len(s)):
if s[i] in d.keys():
arr.append(d[s[i]])
else:
d[s[i]] = idx
arr.append(idx)
idx += 1
return arr
"""
Fast
"""
class Solution(object):
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
s_to_t = {}
length = len(s)
dict_values = {}
for i in range(length):
if s[i] in s_to_t:
if s_to_t[s[i]] != t[i]:
return False
else:
if t[i] in dict_values:
if s[i] != dict_values[t[i]]:
return False
s_to_t[s[i]] = t[i]
dict_values[t[i]] = s[i]
return True | [
"[email protected]"
] | |
03c2543b84cafbe50d743af624d68e6d7e91f476 | 33421188df7d7dcf2ee9be0771b0f2fe1ffad4f5 | /2012/gul-uc3m/bbdd-clave-valor/ejemplos/hash.py | 1dc822a5ca1e0c28e50659742f6cf7f6b5e48b81 | [
"CC-BY-4.0"
] | permissive | Gustavo17/ponencias | c0482fc7a72d7d4d829a54b94775e77c81ca5d97 | effb002b0300fe57d26776654b61a2396010da40 | refs/heads/master | 2021-01-13T09:18:13.837313 | 2014-11-21T04:58:11 | 2014-11-21T04:58:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | from kyotocabinet import *
import time
import random
db = DB()
db.open("db.kch", DB.OCREATE|DB.OWRITER)
pre_time = time.time()
# 1 Million loop
for x in range(1,1000000):
db.add(x,x+x)
post_time = time.time()
print "Escribir 1M de registros: %.4f segundos" % (post_time-pre_time)
keys = [random.randint(1, 1000000) for x in range(1,10000)]
pre_time = time.time()
for x in keys:
db.get(x)
post_time = time.time()
print "Leer 10K registros aleatorios: %.4f segundos" % (post_time-pre_time)
cur = db.cursor()
pre_time = time.time()
cur.jump(10000)
for x in range(1,10000):
cur.step()
post_time = time.time()
print "Leer 10K registros consecutivos: %.4f segundos" % (post_time-pre_time)
db.close()
| [
"[email protected]"
] | |
0d0af063bede796cb727ece6c2cdda4f9bf71a6a | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_conforming.py | 7156a408292dbb1e044fcef4ea5ca44b272f2a6a | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py |
#calss header
class _CONFORMING():
def __init__(self,):
self.name = "CONFORMING"
self.definitions = conform
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['conform']
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.