blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
sequencelengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
98e60fc6389398e16d76d7de6e665ef79eac8947 | dcc36a7744d657e15385803fcd13335685a595af | /quantdigger/demo/test_backtest.py | 9500f1a4557b50deac1f9ee32fcf107612863f33 | [] | no_license | timedcy/quantdigger | 777c28ba96d7dba1cb491a634f46e3968f3232bb | bc492811c796caaad3801d379bb485c1986d4619 | refs/heads/master | 2021-01-22T01:27:52.102183 | 2015-11-08T04:35:37 | 2015-11-08T04:35:37 | 45,767,058 | 1 | 0 | null | 2015-11-08T04:36:13 | 2015-11-08T04:36:13 | null | UTF-8 | Python | false | false | 2,362 | py | # -*- coding: utf-8 -*-
from quantdigger.engine.execute_unit import ExecuteUnit
from quantdigger.indicators.common import MA, BOLL
from quantdigger.engine.strategy import TradingStrategy
from quantdigger.util import pcontract, stock
from quantdigger.digger import deals
import plotting
#def average(series, n):
#""" 一个可选的平均线函数 """
### @todo plot element
#sum_ = 0
#for i in range(0, n):
#sum_ += series[i]
#return sum_ / n
class DemoStrategy(TradingStrategy):
""" 策略实例 """
def __init__(self, exe):
super(DemoStrategy, self).__init__(exe)
print 'start: ', self.datetime[0]
self.ma20 = MA(self, self.close, 20,'ma20', 'b', '1')
self.ma10 = MA(self, self.close, 10,'ma10', 'y', '1')
self.b_upper, self.b_middler, self.b_lower = BOLL(self, self.close, 10,'boll10', 'y', '1')
#self.ma2 = NumberSeries(self)
def on_bar(self):
""" 策略函数,对每根Bar运行一次。"""
#self.ma2.update(average(self.open, 10))
if self.ma10[1] < self.ma20[1] and self.ma10 > self.ma20:
self.buy('long', self.open, 1, contract = 'IF000.SHFE')
elif self.position() > 0 and self.ma10[1] > self.ma20[1] and self.ma10 < self.ma20:
self.sell('long', self.open, 1)
# 夸品种数据引用
#print self.position(), self.cash()
#print self.datetime, self.b_upper, self.b_middler, self.b_lower
#print self.datetime[0]
if __name__ == '__main__':
try:
pcon = pcontract('BB.SHFE', '1.Minute')
#begin_dt, end_dt = '2015-05-25', '2015-06-01'
#pcon = stock('600848','10.Minute') # 通过tushare下载股票数据
simulator = ExecuteUnit([pcon, pcon])
algo = DemoStrategy(simulator)
#algo1 = DemoStrategy(simulator)
#algo2 = DemoStrategy(simulator)
simulator.run()
# 显示回测结果
from quantdigger.datastruct import TradeSide
ping = 0
kai = 0
for t in algo.blotter.transactions:
if t.side == TradeSide.PING:
ping += t.quantity
elif t.side == TradeSide.KAI:
kai += t.quantity
else:
raise "error"
print "ping: ", ping
print "kai: ", kai
assert kai >= ping
| [
"[email protected]"
] | |
144e2902dff07ccc9008bdb81891c775c216b282 | 22ca9743aa05b70002276f37284e13c9ce614257 | /nodemcu-uploader.py | 041c15e601d52821ee82ade6466d5e9464a093d1 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Christoph-D/nodemcu-uploader | efe2d68aab12a220c8fe9496245e92bf3ecb2923 | 7c7026491a4a730965903d7eb848a47f4e628195 | refs/heads/master | 2020-07-12T04:11:27.443014 | 2016-03-09T21:20:57 | 2016-03-09T21:20:57 | 53,532,605 | 0 | 0 | null | 2016-03-09T21:19:45 | 2016-03-09T21:19:45 | null | UTF-8 | Python | false | false | 172 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2015-2016 Peter Magnusson <[email protected]>
from nodemcu_uploader import main
if __name__ == '__main__':
main.main_func()
| [
"[email protected]"
] | |
3a206f6d8e955b15bbd61988b40ea1f668583f18 | 8ef5a09d76a11c56963f18e6a08474a1a8bafe3c | /algorithm/dp_subset_sum.py | 44580f16c302081909155ac156cefc69cf012378 | [] | no_license | roiei/algo | 32c4677649c7666db148f6183fbfbf66c8b1969f | ae8bb8bf4ae4026ccaf1dce323b4098547dd35ec | refs/heads/master | 2022-04-01T19:21:27.768675 | 2022-02-19T06:15:29 | 2022-02-19T06:15:29 | 169,021,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,898 | py |
nums = [34, 4, 12, 5, 2]
target = 9
#target = 40
def dfs(nums, depth, n, target):
if target == 0:
return True
if depth == n or target < 0:
return False
res = dfs(nums, depth + 1, n, target - nums[depth]),
res += dfs(nums, depth + 1, n, target),
return any(res)
mem = {}
def dfs_dp(nums, depth, n, target):
if depth in mem:
return mem[depth]
if target == 0:
return True
if depth == n or target < 0:
return False
res = dfs(nums, depth+1, n, target - nums[depth]),
res += dfs(nums, depth+1, n, target),
mem[depth] = any(res)
return mem[depth]
def isSubsetSum(nums, n, target):
subset = ([[False for i in range(target+1)] for i in range(n+1)])
for i in range(n+1):
subset[i][0] = True
for i in range(1, target+1):
subset[0][i] = False
for i in range(1, n+1):
for j in range(1, target+1):
if j < nums[i-1]:
subset[i][j] = subset[i-1][j]
else:
subset[i][j] = (subset[i-1][j] or
subset[i-1][j-nums[i-1]])
return subset[n][target]
def is_subset_sum(nums, n, target):
dp = [False]*(target+1)
cmb = [True]*(target+1)
for num in nums:
if num <= target:
print(f'num = {num}')
dp[num] = True
cmb[num] = False
for i in range(1, target+1):
if dp[i] == True and (i+num <= target):
if i != num and cmb[i] == False:
dp[i+num] = True
return dp[target]
# print(dfs(nums, 0, len(nums), target))
# print(dfs_dp(nums, 0, len(nums), target))
print(isSubsetSum(nums, len(nums), target))
print(is_subset_sum(nums, len(nums), target))
| [
"[email protected]"
] | |
03dde1a263827b35b7aaa86f9f7835c933b700cc | 48f73b5b78da81c388d76d685ec47bb6387eefdd | /scrapeHackerrankCode/codes/find-point.py | fc6157b3e72153e83bf8e16e66b00a8c13227285 | [] | no_license | abidkhan484/hacerrankScraping | ad0ceda6c86d321d98768b169d63ea1ee7ccd861 | 487bbf115117bd5c293298e77f15ae810a50b82d | refs/heads/master | 2021-09-18T19:27:52.173164 | 2018-07-18T12:12:51 | 2018-07-18T12:12:51 | 111,005,462 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | # Accepted
# Python 3
def find_point(x1, y1, x2, y2):
print((2*x2-x1), (2*y2-y1))
for _ in range(int(input().strip())):
x1, y1, x2, y2 = input().split()
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
find_point(x1, y1, x2, y2)
| [
"[email protected]"
] | |
e51ccbd873a076784af7467c364a96aa7d2e3b3e | 20f1d8b7fb5331d6df6e7bee796afa3beac7910d | /Practice_2/lab4/lab4_2.py | 77487d2fa5253f9ce7a9232a51ca3c46fbd727ac | [] | no_license | ndtands/Algorithm_and_data_structer | e169c262445dffa132b297b55c50a2d769648f17 | bb83e2185a2244ef311fb80584fd28b0c40f8329 | refs/heads/master | 2023-05-30T08:49:27.984870 | 2021-06-14T09:33:08 | 2021-06-14T09:33:08 | 376,193,072 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | """
pesudocode:
- weigh: from 0 to n-1
- W
- value : nxW
- O(nxW)
Main:
init value all = 0
for i from 0 to n-1:
for m to 1 to W:
value[m][i-1]; m < weigh[i]
value[m][i]=
max{value[m][i-1],value[m-weight[i]][i-1]+weigh[i]}; m>=weigh[i]
return value[W][n-1]
"""
def Get_max_god(weigh,W):
weigh=[0]+weigh
n = len(weigh)
value = [[0 for _ in range(n)] for _ in range(W+1)]
for i in range(1,len(weigh)):
for w in range(1,W+1):
value[w][i] = value[w][i-1]
if weigh[i] <= w:
val = value[w-weigh[i]][i-1]+weigh[i]
if value[w][i] <val:
value[w][i] = val
return value[W][len(weigh)-1]
W,n = [int(i) for i in input("Nhap W,n: ").split(" ")]
weigh = [int(i) for i in input().split(" ")]
print(Get_max_god(weigh,W))
| [
"[email protected]"
] | |
2c3d7af90245cf571a6c064aa837b69a7be5e9f3 | b9123130de15cef57c4c10ed8fcccf2952151407 | /djangoFamily/urls.py | 5ada4539fcda65667d0ad20c30ed713277f71aa1 | [] | no_license | ffpaiki/django-family | ad7f063af4100b7be8722a4372fb65097530d785 | 8b60d4bf7b68d25a9589bf246a3118b98bf9115a | refs/heads/master | 2023-05-14T21:51:17.939068 | 2021-06-01T21:18:07 | 2021-06-01T21:18:07 | 372,964,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | """djangoFamily URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from person.views import person_view
urlpatterns = [
path('family/', include('family.urls')),
path('', person_view),
path('admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
1e8294ec9b10630a7a9116f76fccd3992da630d5 | a1ae8d7a75566dacafc46635d34314989d2ea3a1 | /脚本/backup_ver1.py | 1b483ce56621c6b3994e78cb5baa8ffa6745f918 | [] | no_license | lianshitong/pylearn | a03cc9129be381d06f4223fa5e6bd81fae69f64c | caeb15a7ce55d8bd88be22b4942900925106b8d7 | refs/heads/master | 2021-01-19T04:38:12.569234 | 2017-04-06T03:54:48 | 2017-04-06T03:54:48 | 87,383,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,019 | py | #来源:www.open-open.com
#编写一个Python脚本,实现为重要的文件或文件夹在指定的目录下创建备份。
#[设计思路]
#[1] 将需要备份的文件和目录由一个列表指定,通过传入参数获得并保存到列表中。
#[2] 备份应该保存在主备份目录中。
#[3] 将文件备份成一个压缩文件。
#[4] 每一次备份都根据当前的日期在主备份目录中创建一个子文件夹,而所备份的文件命名为当期的时间保存在这个子文件夹中。
#[5] 压缩命令由本地用户决定。可以使用任何本地的存档压缩命令,只要它有命令行界面就可以了,那样就可以从脚本中传递参数给它。
# [参考]
# [1] A Byte of Python, 2005
# [2] Python Manuals 2.6
#! /usr/bin/python
# Filename: backup_ver1.py
# 2010-7-12 wcdj
import os
import time
import sys
# 1, The files and directories to be backed up are specified in a list
# source = ['/home/wcdj/my_prog', '/home/wcdj/local_installed']
# The following information is the debug used
print '--------------------------------'
source=[]
print 'The command line arguments are: '
for i in sys.argv:
print i
if i == sys.argv[0]:
continue
source.append(i)
print source
print '--------------------------------'
# check input, if error app exit
if len(source) == 0:
print '''''You should input the files or directories, like
python backup_ver1.py /home/wcdj/myfile /home/wcdj/mydir ...'''
exit()
else:
print 'Some files or directorier will be saved into .tar.gz format: '
print source
# If you are using Windows, use
# source=[r'c:/Documents', r'd:/work'] or
# source=['c://Documents', 'd://work'] or
# something like that
# 2, The backup must be stored in a main backup directory
# Remember to change this to what you will be using
target_dir = '/home/wcdj/backup/'
# 3, The files are backed up into a tar file
# 4, The name of subdirectory and tar file
today = target_dir + time.strftime('%Y%m%d')
now = time.strftime('%H%M%S')
# Take a comment from the user to create the name of the tar file
comment = raw_input('Enter a comment: ')
if len(comment) == 0:# check if a comment was entered
target = today + os.sep + now + '.tar.gz'
else:
target = today + os.sep + now + '_' + /
comment.replace(' ', '_') + '.tar.gz'
#Create the subdirectory if it isn't already there
if not os.path.exists(today):
os.mkdir(today)# make directory
print 'Successfully created directory', today
# 5, We use the tar command(in Unix/Linux) to put the files in a tgz archive
tar_command = "tar -zcf %s %s" % (target, ' '.join(source))
# Run the backup
if os.system(tar_command) == 0:
print 'Successful backup to', target
else:
print 'Backup failed'
# end
| [
"[email protected]"
] | |
4fd112924b7f8d61fe2121c3674b954118913501 | 2b74bc3a31e0fa01bdd080d59b02b7eea3e1d77f | /examples/rd_themis.py | 0536a036e2031fdbfaf487852e853311f9c33f64 | [
"Apache-2.0"
] | permissive | serious-company/rd_themis | 4fc892ec49da16856061576a3ed6d18d1f70a980 | de851123cfbb4fd941f969107844023d169febfd | refs/heads/master | 2023-04-08T20:15:43.943249 | 2020-03-08T12:55:40 | 2020-03-08T12:55:40 | 234,794,002 | 0 | 1 | Apache-2.0 | 2023-03-30T13:32:14 | 2020-01-18T20:31:42 | C | UTF-8 | Python | false | false | 1,931 | py | #!/usr/bin/python3.5
#
# Copyright (c) 2016 Cossack Labs Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import redis
from pythemis import scell
import os
r = redis.StrictRedis(host='localhost', port=6379, db=0)
#load module
r.execute_command("module load {}/../rd_themis.so".format(os.getcwd()))
#scell set by rd_themis
r.execute_command("rd_themis.cset {} {} {}".format("key", "password", "data"))
#scell get by plain get
data = r.execute_command("get {}".format("key"))
enc = scell.scell_seal(b"password")
data = enc.decrypt(data)
print(data)
#scell set plaint encrypted data
data = enc.encrypt(b"data")
r.execute_command("set {} {}".format("key", "".join(map(chr, data))))
#scell get data with rd_themis
data = r.execute_command("rd_themis.cget {} {}".format("key", "password"))
print(data)
#smessage set
r.execute_command("rd_themis.msset {} {} {}".format("key", "\x55\x45\x43\x32\x00\x00\x00\x2d\x6b\xbb\x79\x79\x03\xfa\xb7\x33\x3a\x4d\x6e\xb7\xc2\x59\xde\x78\x96\xfa\x69\xe6\x63\x86\x91\xc2\x65\xa0\x92\xf6\x5a\x22\x3c\xa9\x8e\xc9\xa7\x35\x42", "data"))
#smessage get
data = r.execute_command("rd_themis.msget {} {}".format("key", "\x52\x45\x43\x32\x00\x00\x00\x2d\xc7\xa8\xca\x7a\x00\xc3\xb5\xd1\xad\x51\x37\x30\x8f\x45\xe6\x5e\x54\xdf\x2b\x7a\x45\xbc\x85\x08\xe8\xcc\x3b\xc9\x48\x1b\x63\x1a\xe8\x12\x8b\x39\x74"))
print(data)
#unload module
r.execute_command("module unload rd_themis".format(os.getcwd()))
| [
"[email protected]"
] | |
ed9afc7e270d21655fb7f14fc7d36f3a8ad9427a | 0ae43af4c1dde70faed80981cf807514031b8c6c | /Monte Carlo/mcmc.py | 9530b1e8844b0182fdbfa7560735b1ad5b9f6776 | [
"MIT"
] | permissive | pengyuan/mlnote | 6a9043867a1540f4365f2923aa04610756c74744 | d390ea5d95d63fa7ce43805a36de136b0d013688 | refs/heads/master | 2020-03-11T23:18:44.929694 | 2018-04-20T10:12:29 | 2018-04-20T10:12:29 | 130,318,015 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,680 | py | # coding: utf-8
from pylab import *
"""采样问题:分布->数据
计算机可以获得服从均匀分布的随机数(容易根据种子生成伪随机数)。
一般的采样问题,都可以理解成:有了均匀分布(简单分布)的采样,如何去获取复杂分布的采样。
"""
"""M-H采样
"""
def mh():
mu = 3
sigma = 10
def qsample():
return np.random.normal(mu, sigma)
def q(x):
return exp(-(x - mu) ** 2 / (sigma ** 2))
def p(x):
"""目标分布"""
return 0.3 * np.exp(-(x - 0.3) ** 2) + 0.7 * np.exp(-(x - 2.) ** 2 / 0.3)
def hm(n=10000):
sample = np.zeros(n)
sample[0] = 0.5
for i in range(n - 1):
q_s = qsample()
u = np.random.rand()
if u < min(1, (p(q_s) * q(sample[i])) / (p(sample[i]) * q(q_s))):
sample[i + 1] = q_s
else:
sample[i + 1] = sample[i]
return sample
x = np.arange(0, 4, 0.1)
realdata = p(x)
N = 10000
sample = hm(N)
plt.plot(x, realdata, 'g', lw=3)
plt.plot(x, q(x), 'r')
plt.hist(sample, bins=x, normed=1, fc='c')
plt.show()
def m():
# The Metropolis-Hastings algorithm
def p(x):
mu1 = 3
mu2 = 10
v1 = 10
v2 = 3
return 0.3 * exp(-(x - mu1) ** 2 / v1) + 0.7 * exp(-(x - mu2) ** 2 / v2)
def q(x):
mu = 5
sigma = 10
return exp(-(x - mu) ** 2 / (sigma ** 2))
stepsize = 0.5
x = arange(-10, 20, stepsize)
px = zeros(shape(x))
for i in range(len(x)):
px[i] = p(x[i])
N = 5000
# independence chain
u = np.random.rand(N)
mu = 5
sigma = 10
y = zeros(N)
y[0] = np.random.normal(mu, sigma)
for i in range(N - 1):
ynew = np.random.normal(mu, sigma)
alpha = min(1, p(ynew) * q(y[i]) / (p(y[i]) * q(ynew)))
if u[i] < alpha:
y[i + 1] = ynew
else:
y[i + 1] = y[i]
# random walk chain
u2 = np.random.rand(N)
sigma = 10
y2 = zeros(N)
y2[0] = np.random.normal(0, sigma)
for i in range(N - 1):
y2new = y2[i] + np.random.normal(0, sigma)
alpha = min(1, p(y2new) / p(y2[i]))
if u2[i] < alpha:
y2[i + 1] = y2new
else:
y2[i + 1] = y2[i]
figure(1)
nbins = 30
hist(y, bins=x)
plot(x, px * N / sum(px), color='g', linewidth=2)
plot(x, q(x) * N / sum(px), color='r', linewidth=2)
figure(2)
nbins = 30
hist(y2, bins=x)
plot(x, px * N / sum(px), color='g', linewidth=2)
show()
"""Gibbs采样
"""
def g():
def pXgivenY(y, m1, m2, s1, s2):
return np.random.normal(m1 + (y - m2) / s2, s1)
def pYgivenX(x, m1, m2, s1, s2):
return np.random.normal(m2 + (x - m1) / s1, s2)
def gibbs(N=5000):
k = 20
x0 = np.zeros(N, dtype=float)
m1 = 10
m2 = 20
s1 = 2
s2 = 3
for i in range(N):
y = np.random.rand(1)
# 每次采样需要迭代 k 次
for j in range(k):
x = pXgivenY(y, m1, m2, s1, s2)
y = pYgivenX(x, m1, m2, s1, s2)
x0[i] = x
return x0
def f(x):
"""目标分布"""
return np.exp(-(x - 10) ** 2 / 10)
# 画图
N = 10000
s = gibbs(N)
x1 = np.arange(0, 17, 1)
plt.hist(s, bins=x1, fc='c')
x1 = np.arange(0, 17, 0.1)
px1 = np.zeros(len(x1))
for i in range(len(x1)):
px1[i] = f(x1[i])
plt.plot(x1, px1 * N * 10 / sum(px1), color='r', linewidth=3)
plt.show()
if __name__ == '__main__':
m()
| [
"[email protected]"
] | |
9aff07ad32360b10ae281e93532a2f1af7a967f5 | 7826681647933249c8949c00238392a0128b4a18 | /cosypose/simulator/__init__.py | 6242dfa1c761870f2a85f43957247c13b7b53277 | [
"MIT"
] | permissive | imankgoyal/cosypose | b35678a32a6491bb15d645bc867f4b2e49bee6d2 | fa494447d72777f1d3bd5bd134d79e5db0526009 | refs/heads/master | 2022-12-09T11:18:23.188868 | 2020-08-31T15:34:02 | 2020-08-31T15:34:02 | 291,834,596 | 2 | 0 | MIT | 2020-08-31T22:06:12 | 2020-08-31T22:06:11 | null | UTF-8 | Python | false | false | 173 | py | from .body import Body
from .camera import Camera
from .base_scene import BaseScene
from .caching import BodyCache, TextureCache
from .textures import apply_random_textures
| [
"[email protected]"
] | |
422f1fd4ccf12e0a93f2d2fe7bbdebf8e36d78f2 | e1c91b1d26183fdde27e1f4392cee6924f4ee085 | /models.py | 202755c4736ac2a363fa81ed2b392db41608fb28 | [] | no_license | ArchipLab-LinfengZhang/Auxiliary-Training | 8106e5a722db921085204df2fc08ef95a161521f | 568dc11c5a397a12724f302a304da60e98e68a85 | refs/heads/master | 2021-04-20T06:43:39.756616 | 2020-10-13T11:53:35 | 2020-10-13T11:53:35 | 249,663,017 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,632 | py | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch
import torch.nn.functional as F
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': './pretrain/resnet18-5c106cde.pth',
'resnet34': './pretrain/resnet34-333f7ec4.pth',
'resnet50': './pretrain/resnet50-19c8e357.pth',
'resnet101': './pretrain/resnet101-5d3b4d8f.pth',
'resnet152': './pretrain/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class MainClassifier(nn.Module):
def __init__(self, channel, num_classes=100):
super(MainClassifier, self).__init__()
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(channel, num_classes)
def forward(self, x):
x = self.pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class AuxiliaryClassifier(nn.Module):
# Auxiliary classifier, including first an attention layer, then a bottlecneck layer,
# and final a fully connected layer
def __init__(self, channel, num_classes=100):
super(AuxiliaryClassifier, self).__init__()
self.attention_layer = self._make_attention_layer(channel)
self.bottleneck_layer = self._make_bottleneck_layer(channel)
self.fc = nn.Linear(channel, num_classes)
self.pool = nn.AdaptiveAvgPool2d(1)
def _make_attention_layer(self, channel):
# parameter <stride=1> indicates the stride of bottleneck layers
# for CIFAR datasets, stride=1 is advised
return nn.Sequential(
nn.Conv2d(in_channels=channel, out_channels=channel,
kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(channel),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(kernel_size=3, padding=1, stride=1,
in_channels=channel, out_channels=channel, bias=False),
nn.BatchNorm2d(channel),
nn.Sigmoid()
)
def _make_bottleneck_layer(self, channel):
return nn.Sequential(
nn.Conv2d(channel, channel // 2, kernel_size=1, stride=1),
nn.BatchNorm2d(channel // 2),
nn.ReLU(inplace=True),
nn.Conv2d(channel // 2, channel // 2, kernel_size=3, stride=1),
nn.BatchNorm2d(channel // 2),
nn.ReLU(inplace=True),
nn.Conv2d(channel // 2, channel, kernel_size=1, stride=1),
nn.BatchNorm2d(channel),
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d(1)
)
def forward(self, x):
x = self.bottleneck_layer(self.attention_layer(x) * x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=100, zero_init_residual=False):
super(ResNet, self).__init__()
print("num_class: ", num_classes)
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
# self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# remove maxplooing in cifar datasets
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.bn_means, self.bn_vars = [], []
self.auxiliary_classifiers_list = nn.ModuleList([
MainClassifier(channel=512 * block.expansion, num_classes=num_classes),
AuxiliaryClassifier(channel=512 * block.expansion, num_classes=num_classes),
AuxiliaryClassifier(channel=512 * block.expansion, num_classes=num_classes),
AuxiliaryClassifier(channel=512 * block.expansion, num_classes=num_classes),
AuxiliaryClassifier(channel=512 * block.expansion, num_classes=num_classes)
])
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def close_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.track_running_stats = False
def record_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
self.bn_means.append(m.running_mean.clone())
self.bn_vars.append(m.running_var.clone())
def open_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.track_running_stats = True
def reset_bn(self):
self.bn_means = []
self.bn_vars = []
def load_bn(self):
index = 0
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.running_mean.data = self.bn_means[index].clone()
m.running_var.data = self.bn_vars[index].clone()
# self.bn_means.append(m.running_mean)
# self.bn_vars.append(m.running_var)
index += 1
self.bn_vars = []
self.bn_means = []
def print_bn(self):
for m in self.layer1.modules():
if isinstance(m, nn.BatchNorm2d):
print(m.running_var[0])
print(m.running_mean[0])
def forward(self, inputs):
out_list = []
for index in range(len(inputs)):
# print(index)
x = inputs[index]
out = self.conv1(x)
out = self.bn1(out)
layer1_out = self.layer1(out)
layer2_out = self.layer2(layer1_out)
layer3_out = self.layer3(layer2_out)
layer4_out = self.layer4(layer3_out)
out_list.append(self.auxiliary_classifiers_list[index](layer4_out))
if index == 0:
#return layer4_out
self.record_bn()
self.load_bn()
return out_list
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| [
"[email protected]"
] | |
a85f691e07272e37e184d43b23232623c6d4c14b | 1567d96d6874b2f86096eaccafbaa59ed95f726c | /laywer_in_touch/project/api/migrations/0003_auto_20190420_2108.py | 124fd350b6cbd3fa1d2ff5fd33f25c63f71f2876 | [] | no_license | nativanando/lawyer-in-touch | b11ff78cbcaabb292e94e7ec861eb7359300d471 | 771afd742c44408140c4c3d672e1dabda3fa5fab | refs/heads/master | 2021-10-28T03:46:02.559506 | 2019-04-21T18:29:05 | 2019-04-21T18:29:05 | 182,430,472 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | # Generated by Django 2.2 on 2019-04-20 21:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20190420_2107'),
]
operations = [
migrations.RenameField(
model_name='contract',
old_name='customer',
new_name='id_customer',
),
migrations.RenameField(
model_name='contract',
old_name='lawyer',
new_name='id_lawyer',
),
]
| [
"[email protected]"
] | |
abf3cc2733f9ecf9e321e886fbfc782dc581e43e | 741f7d43d5e5d20d15d975ec8153d7ccdec19f3f | /se/task2/migrations/0002_auto_20200810_2213.py | cb4e261f57f6870e83f6dadbf6cda02f40e43b07 | [] | no_license | krishnumkhodke/tasks | 2ff20e3f4f6a10d962167380762b3171e845e3d8 | 1295c0422838c19c7ec859945863b785dd59c47f | refs/heads/master | 2022-11-30T06:29:36.635311 | 2020-08-14T18:30:39 | 2020-08-14T18:30:39 | 287,596,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | # Generated by Django 3.0 on 2020-08-10 16:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('task2', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user_info',
name='email',
field=models.EmailField(max_length=254, unique=True),
),
]
| [
"[email protected]"
] | |
dae2781054c223b7973d66222611a491d8b965b8 | 52407745c0d787e296de7877c6113daa814c9304 | /zstp/py180320/classify_text.py | 5fd2b3f92ffdd4df99496aae146c9804c7063f94 | [] | no_license | moafmoar/hello | 28bd378e1035dda1419a445e231601dd16a3ab15 | a595c97f951591948797518f4a9d379f25d982c5 | refs/heads/master | 2021-09-10T17:36:40.647306 | 2018-03-30T06:45:25 | 2018-03-30T06:45:25 | 114,957,097 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,680 | py | """
通过训练好的模型,对投诉进行分类
"""
import pandas as pd
import numpy as np
import os
from sklearn.externals import joblib as jl
import time
import jieba, re
import jieba.posseg as pseg
import sys, unicodedata
from sklearn.feature_extraction.text import TfidfVectorizer
from collections import Counter
#数据和模型的存储地址
# main_path = os.path.abspath('.')
main_path='E:\workspalce\python\hello\zstp\py180208'
data_path = main_path+'\\data\\' #数据保存地址
# model_path=os.main_path+'\\model\\'
def read_model(path=main_path):
"""
加载已经训练好的模型
:param path: 文件路径
:return: 训练好的模型
"""
t=jl.load(path+'\\tfidf.m') #tfidf模型
com_chi2=jl.load(path+'\\select_feature.m')#刷选变量模型
com_model=jl.load(path+'\\classify_svc.m')#分类模型
return t,com_chi2,com_model
#读取数据
def read_data(path=main_path):
"""
读取要处理的数据
:param path: 数据存储地址
:return: 返回读取的数据,数据框格式
"""
headers = ['gd_id', 'description']
dataset=pd.read_csv(path+'\\ldata.csv',encoding='gbk')
return dataset
# dataset = pd.read_csv(data_path+'record_info.txt', header=None, encoding='utf8')
# dataset.columns = headers
#处理数据
def dis_data(dataset):
"""
处理现有数据
:param dataset: 要处理的数据
:return: 处理好的数据
"""
dataset = dataset[dataset['description'].notnull()]
dataset.index=range(len(dataset))
#把投诉描述里面英文字母全部变成小写的
dataset['text'] = dataset['description'].str.lower()
#处理一些转换错误的词、同义词/近义词
dict_path = main_path + '\\dict\\'
f = open(dict_path + 'words_replace.txt', 'r', encoding='utf8')
for line in f.readlines():
value = line.strip().replace('\n','').split(',')
dataset['text'] = dataset['text'].str.replace(value[0], value[1])
#停用词
stopwords_path = dict_path+'stopwords.txt'
stop_set = set([value.replace('\n','') for value in open(stopwords_path, 'r', encoding='utf8').readlines()])
userdict_path = dict_path+'word_dict.txt'
jieba.load_userdict(userdict_path)
flag_ls = ['a','ad','b','d','f','i','l','m','n','nrt','ns','nt','nz','v','vn','x']
def pseg_cut(text):
words = pseg.cut(text)
return ' '.join([w.word for w in words if w.flag in flag_ls and w.word not in stop_set and len(w.word)>=2])
dataset['cut'] = dataset['text'].map(pseg_cut)
# 清洗用的正则表达式
res = re.compile(r'\s+')
red = re.compile(r'^(\d+)$')
# 清洗标点符号等异常字符
todel = dict.fromkeys(i for i in range(sys.maxunicode)
if unicodedata.category(chr(i)) not in ('Lu', 'Ll', 'Lt', 'Lo', 'Nd', 'Nl', 'Zs'))
# 清洗分词结果的方法
def cleantext(text):
# try:
# text = unicode(text)
# except:
# pass
if text != '':
return re.sub(res, ' ', ' '.join(map(lambda x: re.sub(red, '', x), text.translate(todel).split(' ')))).strip()
else:
return text
# 对分词结果进行清洗
dataset['cut_clean'] = dataset['cut'].map(cleantext)
return dataset
def model_fit(dataset,t,con_chi2,com_model):
"""
预测结果
:param dataset: 数据
:param t: tfidf的模型
:param con_chi2: 刷选变量的文件
:param com_model: 分类模型
:return: 预测分类结果
"""
features = t.transform(dataset['cut_clean'])
features_chi2 = com_chi2.transform(features)
predict_id = com_model.predict(features_chi2)
return predict_id
def dict_df(array_df):
""""
将数据框装换成字典形式,表示每个投诉类型的个数。
Arguments:
array_df --Series格式数据
returns:
返回字典形式的
"""
df_count=array_df.value_counts()
dict_count={}
i=len(df_count)
for i in range(i):
dict_count[i]={'name':df_count.index[i],'value':df_count[i]}
return dict_count
if __name__=='__main__':
dataset=read_data()#加载数据
t,com_chi2,com_model=read_model()#加载模型
dataset=dis_data(dataset) #处理数据
predict_id=model_fit(dataset,t,com_chi2,com_model)#预测问题分类
dataset['predict_label'] = predict_id#预测标签列
#for i in range(len(dataset)):#打印预测结果
# print('%s 投诉的是 %s 问题' % (dataset['gd_id'][i], dataset['predict_label'][i]))
dict_freq=dict_df(dataset['predict_label']) #返回字典形式的投诉类别的种类
print(dict_freq)
| [
"[email protected]"
] | |
c3988bcfd867b120960050f68e13953610bb01d2 | 4ceb794a6f7951ba17b64f4d25c3780883156bbe | /codes/model.py | d64e126eeda0455f1afb72c79581194cbc9f8284 | [] | no_license | TimmyWang/time_series_anomaly_detection | 37ceb35198aee93788fd92c14b59cdb1d6a717e6 | 068ac33c695a2607658dc9dc2f8b34e5f970cf68 | refs/heads/master | 2022-12-06T06:58:36.256626 | 2020-08-23T10:36:36 | 2020-08-23T10:36:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,267 | py | import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
from keras.models import Model
from keras.layers import Input
from keras.layers import Conv1D, Dense
#from keras.layers import LSTM, TimeDistributed
from keras.layers import Flatten#, Reshape, Concatenate
import numpy as np
import matplotlib.pyplot as plt
def get_model(seq_len):
seq_input = Input(shape=(seq_len,1))
h_layer = Conv1D(3, 10,activation="relu")(seq_input)
h_layer = Flatten()(h_layer)
h_layer = Dense(5,activation="relu")(h_layer)
output = Dense(1)(h_layer)
return Model(seq_input, output)
class AnomalyDetector:
def __init__(self, model, x, y):
self.model = model
self.x = x
self.y = y
self.time_steps = np.array([i for i in range(len(x))])
self.values = np.squeeze(y)
self.loss = self._calculate_loss()
def _calculate_loss(self):
loss = []
for i, x in enumerate(self.x):
inputt = np.expand_dims(x,axis=0)
output = np.expand_dims(self.y[i],axis=-1)
a_loss = self.model.evaluate(inputt,output,verbose=0)
loss.append(a_loss)
return loss
def plot(self,threshold,size):
col = ['r' if l > threshold else 'b' for l in self.loss]
plt.figure(figsize=(15,7))
plt.scatter(self.time_steps, self.values, c=col,alpha=0.5, s=size)
| [
"[email protected]"
] | |
9f3e8d0c12df00c8e7512f2dd31e83eb5a13fe01 | 7c04c5c8c8330b4e2babffb96f7d8c88f1a39e7f | /flow/db/instdata/admin.py | cf54ff28c1ccbc770364837b67e80f9342cc97e8 | [
"BSD-3-Clause"
] | permissive | OspreyX/flow | d200a07e21a90e0ff621a7d0012754d6559cd538 | 20cdcd6b128defe57649d6e20a5fec60488fd0d2 | refs/heads/master | 2020-12-27T21:22:22.863762 | 2013-05-20T06:11:39 | 2013-05-20T06:11:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,082 | py | from django.contrib import admin
from django.contrib.admin import helpers
from django import http
from django.template import loader
from django.utils.safestring import mark_safe
from django.contrib.admin.util import unquote
from django.forms.models import modelform_factory
from django.utils import simplejson as json
from models import *
from utils import ctids, dbmodels
from forms import DataIdForm
#_______________________________________ INLINES
class VendorIdInline(admin.TabularInline):
model = VendorId
class BondIssuerInline(admin.TabularInline):
model = BondIssuer
#_______________________________________ ADMINS
class DataIdAdmin(admin.ModelAdmin):
list_display = ('code', 'name', 'live', 'get_country',
'curncy', 'content_type', 'firm_code', 'isin',
'tags')
form = DataIdForm
inlines = [VendorIdInline]
search_fields = ('code', 'name', 'description', 'isin', 'tags')
list_filter = ('content_type',)
save_on_top = True
def change_content(self, request, obj = None):
form = self.get_form(request, obj = obj)
data = request.POST or request.GET
form = form(initial = dict(data.items()), instance = obj)
html = self.render_content_form(request, form.content_form)
data = {'header':'htmls',
'body': [{'identifier': '.data-id-instrument',
'html': html}]
}
return http.HttpResponse(json.dumps(data), mimetype='application/javascript')
def render_content_form(self, request, content_form):
if content_form:
model = content_form._meta.model
content_admin = self.admin_site._instruments.get(model,None)
form = helpers.AdminForm(content_form,
list(content_admin.get_fieldsets(request)),
content_admin.prepopulated_fields,
content_admin.get_readonly_fields(request),
model_admin=content_admin)
return loader.render_to_string('admin/instdata/dataid/instrument_form.html',{'adminform':form})
else:
return ''
def add_view(self, request, **kwargs):
if request.is_ajax():
return self.change_content(request)
else:
return super(DataIdAdmin,self).add_view(request, **kwargs)
def change_view(self, request, object_id, **kwargs):
if request.is_ajax():
return self.change_content(request, self.get_object(request, unquote(object_id)))
else:
return super(DataIdAdmin,self).change_view(request, object_id, **kwargs)
def render_change_form(self, request, context, **kwargs):
content_form = context['adminform'].form.content_form
context['instform'] = self.render_content_form(request, content_form)
return super(DataIdAdmin,self).render_change_form(request, context, **kwargs)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "content_type":
kwargs["queryset"] = ctids()
return db_field.formfield(**kwargs)
return super(DataIdAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
class VendorAdmin(admin.ModelAdmin):
list_display = ('code', 'name', 'summary')
prepopulated_fields = {'name': ('code',)}
class DataFieldAdmin(admin.ModelAdmin):
list_display = ('code', 'description', 'format')
class VendorDataFieldAdmin(admin.ModelAdmin):
list_display = ('vendor', 'field', 'code')
class VendorIdAdmin(admin.ModelAdmin):
list_display = ('ticker', 'vendor', 'dataid',)
class ExchangeAdmin(admin.ModelAdmin):
list_display = ('code','name',)
class BondMaturityTypeAdmin(admin.ModelAdmin):
list_display = ('code','description',)
class CouponTypeAdmin(admin.ModelAdmin):
list_display = ('code','month_frequency','day_count','description')
ordering = ('code','month_frequency','day_count')
class FutureContractAdmin(admin.ModelAdmin):
list_display = ('code','description','type','curncy','country','index','exchange')
class BondClassAdmin(admin.ModelAdmin):
list_display = ('code','bondcode','description','curncy','country','issuer','sovereign','convertible')
search_fields = ('bondcode',)
list_filter = ('sovereign','convertible','curncy', 'country')
inlines = [BondIssuerInline]
class BondIssuerAdmin(admin.ModelAdmin):
list_display = ('bond_class','issuer','ccy','dt')
search_fields = ('bond_class__code',)
class CollateralTypeAdmin(admin.ModelAdmin):
list_display = ('name','order')
class FundTypeAdmin(admin.ModelAdmin):
list_display = ('code','name','openended','description')
class FundManagerAdmin(admin.ModelAdmin):
list_display = ('code','name','description','website')
class IcAdmin(admin.ModelAdmin):
list_display = ('code','firm_code','instype','ccy','data_id')
search_fields = ('code','firm_code')
class BondAdmin(admin.ModelAdmin):
list_display = ('code','bond_class','ISIN','coupon','maturity_date')
search_fields = ('ISIN',)
class InstDecompAdmin(admin.ModelAdmin):
list_display = ('code','dataid','dt','composition')
ordering = ('code','-dt')
class MktDataAdmin(admin.ModelAdmin):
list_display = ('vendor_id','field','dt','mkt_value')
search_fields = ('vendor_id__ticker',)
ordering = ('-dt',)
class IndustryCodeAdmin(admin.ModelAdmin):
list_display = ('id' , 'code' , 'description' , 'parent')
#_______________________________________ REGISTERING
admin.site.register(Vendor,VendorAdmin)
admin.site.register(VendorDataField,VendorDataFieldAdmin)
admin.site.register(VendorId,VendorIdAdmin)
admin.site.register(DataField,DataFieldAdmin)
admin.site.register(DataId,DataIdAdmin)
admin.site.register(Exchange,ExchangeAdmin)
admin.site.register(BondMaturityType,BondMaturityTypeAdmin)
admin.site.register(FutureContract,FutureContractAdmin)
admin.site.register(BondClass,BondClassAdmin)
admin.site.register(BondIssuer,BondIssuerAdmin)
admin.site.register(CollateralType,CollateralTypeAdmin)
admin.site.register(FundManager,FundManagerAdmin)
admin.site.register(FundType,FundTypeAdmin)
admin.site.register(InstDecomp,InstDecompAdmin)
admin.site.register(IndustryCode,IndustryCodeAdmin)
###admin.site.register(InstrumentCode,IcAdmin)
###admin.site.register(Cash3, list_display = ('id','code','curncy','type','extended'))
###admin.site.register(FwdCash, list_display = ('id','code','curncy','value_date'))
###admin.site.register(MktData,MktDataAdmin)
admin.site._instruments = {}
for inst in dbmodels():
admin.site.register(inst)
inst_admin = admin.site._registry.pop(inst)
admin.site._instruments[inst] = inst_admin
| [
"[email protected]"
] | |
49254eb20c5905f20020b227a913eea9b9007358 | 457c673c8c8d704ec150322e4eeee2fde4f827ca | /Programming Basic/First_Steps_in_coding_lab/07_Projects_Creation.py | ccf04b1496f502d612f560496cd25f03a08b4d0e | [] | no_license | xMrShadyx/SoftUni | 13c08d56108bf8b1ff56d17bb2a4b804381e0d4e | ce4adcd6e8425134d138fd8f4b6101d4eb1c520b | refs/heads/master | 2023-08-02T03:10:16.205251 | 2021-06-20T05:52:15 | 2021-06-20T05:52:15 | 276,562,926 | 5 | 1 | null | 2021-09-22T19:35:25 | 2020-07-02T06:07:35 | Python | UTF-8 | Python | false | false | 191 | py | architect = input()
amount_projects = int(input())
total_time = amount_projects * 3
print(f'The architect {architect} will need {total_time} hours to complete {amount_projects} project/s.') | [
"[email protected]"
] | |
f5b3338d64bfd8747d4e1c0534c8191e3607e991 | a9e09d4de8c0d012877aaf8db40331bea5bf1462 | /examples/bert_cls_pooling.py | a5193dab4a9d22802ca5f2311441b93305ab60e1 | [
"BSD-3-Clause"
] | permissive | amenityllc/SentEval | 24dabcaf1e0784a14774c52414f9c28141eb2d95 | 8b6b4528820dbe06869c39829512785b2f56189d | refs/heads/master | 2020-04-28T09:32:17.943065 | 2019-03-18T15:08:32 | 2019-03-18T15:08:32 | 175,170,273 | 0 | 0 | NOASSERTION | 2019-03-12T08:47:29 | 2019-03-12T08:47:00 | Python | UTF-8 | Python | false | false | 594 | py | # from examples.bert_embeddings import BertEmbeddings, AbstractBertPooller
from bert_embeddings import BertEmbeddings, AbstractBertPooller
import numpy as np
class CLSPooller(AbstractBertPooller):
def __init__(self):
super().__init__()
def get_name(self):
return 'cls_pooler'
def pool(self, results):
embeddings = [result['embedding'] for result in results]
return np.array([np.array(sentence_vectors[0]) for sentence_vectors in embeddings])
if __name__ == "__main__":
bert_embeddings = BertEmbeddings(CLSPooller)
bert_embeddings.run()
| [
"[email protected]"
] | |
0d2b450afa4215b6663f3662b252963b05493a13 | 7e2d802a17e42d50974af29e4c9b658d5da6471b | /IC/21-Missing-drone.py | addde9d1910b7c9628616aee6fecb212e90f64fb | [] | no_license | siddharthadtt1/Leet | a46290bacdf569f69d523413c0129676727cb20e | 1d8b96257f94e16d0c1ccf8d8e8cd3cbd9bdabce | refs/heads/master | 2020-06-20T16:21:15.915761 | 2017-05-15T22:35:42 | 2017-05-15T22:35:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | # see Leet 136 single number | [
"[email protected]"
] | |
6924472770c9b64625e91f5425599c76f151c774 | e3946d91dc5fe71989c2f4b6390232865fcb5d1b | /fjord/flags/spicedham_utils.py | 2e87b27ba33d4e350010c6253dd663f313cda103 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | zeusintuivo/fjord | 61b632fd6df0e1b3508e628fe4f682a937cc0244 | 3bd227004d369df1fdc39f06acff12ebc8f0fe34 | refs/heads/master | 2021-01-16T18:28:52.564638 | 2014-09-24T21:02:51 | 2014-09-24T21:02:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,166 | py | import json
import os
import re
import threading
from spicedham import Spicedham
from spicedham.backend import BaseBackend
from fjord.flags.models import Store
class FjordBackend(BaseBackend):
def __init__(self, config):
pass
def reset(self):
Store.objects.all().delete()
def get_key(self, classifier, key, default=None):
try:
obj = Store.objects.filter(classifier=classifier, key=key)[0]
value = json.loads(obj.value)
except (IndexError, Store.DoesNotExist):
value = default
return value
def set_key(self, classifier, key, value):
value = json.dumps(value)
try:
obj = Store.objects.filter(classifier=classifier, key=key)[0]
obj.value = value
except (IndexError, Store.DoesNotExist):
obj = Store.objects.create(
classifier=classifier, key=key, value=value)
obj.save()
def set_key_list(self, classifier, key_value_tuples):
for key, value in key_value_tuples:
self.set_key(classifier, key, value)
TOKEN_RE = re.compile(r'\W')
def tokenize(text):
"""Takes a piece of text and tokenizes it into train/classify tokens"""
# FIXME: This is a shite tokenizer and doesn't handle urls
# well. (We should handle urls well.)
tokens = TOKEN_RE.split(text)
return [token.lower() for token in tokens if token]
_cached_spicedham = threading.local()
def get_spicedham():
"""Retrieve a Spicedham object
These objects are cached threadlocal.
"""
sham = getattr(_cached_spicedham, 'sham', None)
if sham is None:
config = {
'backend': 'FjordBackend'
}
sham = Spicedham(config)
_cached_spicedham.sham = sham
return sham
def train_cmd(path, classification):
"""Recreates training data using datafiles in path"""
path = os.path.abspath(path)
if not os.path.exists(path):
raise ValueError('path "%s" does not exist' % path)
sham = get_spicedham()
# Wipe existing training data.
print 'Wiping existing data...'
sham.backend.reset()
# Load all data for when classifier=True
true_path = os.path.join(path, classification)
print 'Loading classifier=True data from %s...' % true_path
files = [os.path.join(true_path, fn)
for fn in os.listdir(true_path) if fn.endswith('.json')]
print ' %s records...' % len(files)
for fn in files:
print ' - ' + fn
with open(fn, 'r') as fp:
data = json.load(fp)
sham.train(tokenize(data['description']), match=True)
# Load all data for when classifier=False
false_path = os.path.join(path, 'not_' + classification)
print 'Loading classifier=False data from %s...' % false_path
files = [os.path.join(false_path, fn)
for fn in os.listdir(false_path) if fn.endswith('.json')]
print ' %s records...' % len(files)
for fn in files:
print ' - ' + fn
with open(fn, 'r') as fp:
data = json.load(fp)
sham.train(tokenize(data['description']), match=False)
print 'Done!'
| [
"[email protected]"
] | |
832974b9068a90cd72f7987a17131faae3924d37 | f3bd271bf00325881fb5b2533b9ef7f7448a75ec | /xcp2k/classes/_cell3.py | c1150112a0c843de3db8f2c0d137662bf75a7671 | [] | no_license | obaica/xcp2k | 7f99fc9d494859e16b9b0ea8e217b0493f4b2f59 | 6e15c2c95658f545102595dc1783f5e03a9e6916 | refs/heads/master | 2020-07-15T17:27:43.378835 | 2019-02-11T16:32:24 | 2019-02-11T16:32:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,148 | py | from xcp2k.inputsection import InputSection
from _cell_ref1 import _cell_ref1
class _cell3(InputSection):
def __init__(self):
InputSection.__init__(self)
self.A = None
self.B = None
self.C = None
self.Abc = None
self.Alpha_beta_gamma = None
self.Cell_file_name = None
self.Cell_file_format = None
self.Periodic = None
self.Multiple_unit_cell = None
self.Symmetry = None
self.CELL_REF = _cell_ref1()
self._name = "CELL"
self._keywords = {'A': 'A', 'Cell_file_format': 'CELL_FILE_FORMAT', 'C': 'C', 'B': 'B', 'Symmetry': 'SYMMETRY', 'Alpha_beta_gamma': 'ALPHA_BETA_GAMMA', 'Multiple_unit_cell': 'MULTIPLE_UNIT_CELL', 'Periodic': 'PERIODIC', 'Abc': 'ABC', 'Cell_file_name': 'CELL_FILE_NAME'}
self._subsections = {'CELL_REF': 'CELL_REF'}
self._aliases = {'Angles': 'Alpha_beta_gamma'}
@property
def Angles(self):
"""
See documentation for Alpha_beta_gamma
"""
return self.Alpha_beta_gamma
@Angles.setter
def Angles(self, value):
self.Alpha_beta_gamma = value
| [
"[email protected]"
] | |
a056eed4886e7ab503371a8658a267813fb8a067 | dc7632da4f04385142ea86e4a63c8537eaa2edeb | /django_app/config/urls.py | a7a6a32944ca6639e025c365d8c4507f1ca55a53 | [] | no_license | jmnghn/0621_test | bf4ecd4cc4ce56e06f4f07922d53e4dfb7471bab | 9cb3930d24644897074a336c998759a6a86b656a | refs/heads/master | 2022-10-11T17:53:34.715991 | 2017-06-22T05:03:39 | 2017-06-22T05:03:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | """test_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^post/', include('post.urls'))
]
| [
"[email protected]"
] | |
0103d51ce865761ed13bd38499675fd2c9ebd607 | e865222c08dbd64758e70d67b97cf954334f1cf1 | /find_refs.py | 80baad3c7b1fad0cbf5e0fa732fa0c625d134add | [] | no_license | gbroques/fc-scripts | 83f9313eab02fc2f38fbe7f5f4ad8119f1ff8e3b | 283bbed243718e261a9f52deb436378162b53a8e | refs/heads/master | 2023-06-26T06:27:20.529426 | 2021-07-26T01:44:04 | 2021-07-26T01:44:04 | 380,091,819 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,366 | py | import argparse
import glob
import os
import re
import zipfile
from pathlib import Path
from typing import Callable, Dict, List
from xml.etree import ElementTree
from xml.etree.ElementTree import Element
class Reference:
"""Represents a reference to the property of an object in another document."""
def __init__(self,
document: str,
object_name: str,
property_name: str) -> None:
self.document = document
self.object_name = object_name
self.property_name = property_name
def __str__(self):
return self._to_string()
def __repr__(self):
return self._to_string()
def _to_string(self):
return '{}#{}.{}'.format(self.document, self.object_name, self.property_name)
class Match:
"""Represents a match to a reference."""
def __init__(self,
document: str,
object_name: str,
property_name: str,
location: str) -> None:
self.document = document
self.object_name = object_name
self.property_name = property_name
self.location = location
def __str__(self):
return self._to_string()
def __repr__(self):
return self._to_string()
def _to_string(self):
return '{} {}.{} ({})'.format(
self.document,
self.object_name,
self.location,
self.property_name)
def parse_document_xml(document: str) -> Element:
archive = zipfile.ZipFile(document, 'r')
document_xml = archive.read('Document.xml')
return ElementTree.fromstring(document_xml)
def find_root_by_document_path(cwd: str) -> Dict[str, Element]:
"""Returns a dictionary where keys are document filepaths,
and values are document xml root elements.
"""
root_by_document = {}
pattern = Path(cwd).joinpath('**', '*.FCStd').as_posix()
documents = glob.glob(pattern, recursive=True)
for document in documents:
root = parse_document_xml(document)
root_by_document[document] = root
return root_by_document
def make_find_references_in_property(child_element_name: str,
reference_attribute: str,
location_attribute: str,
reference: Reference) -> Callable[[Element], List[str]]:
"""
XML Examples::
<Cell address="B1" content="=Main#Spreadsheet.Value" alias="Value1" />
<Expression path="Radius" expression="Main#Spreadsheet.Value"/>
+--------------------+---------------------+--------------------+
| child_element_name | reference_attribute | location_attribute |
+====================+=====================+====================+
| Cell | content | address |
+--------------------+---------------------+--------------------+
| Expression | expression | path |
+--------------------+---------------------+--------------------+
"""
def find_references_in_property(property: Element) -> List[str]:
locations = []
for child_element in property.findall(child_element_name):
content = child_element.attrib[reference_attribute]
pattern = re.compile(str(reference))
match = pattern.search(content)
if match:
locations.append(child_element.attrib[location_attribute])
return locations
return find_references_in_property
def make_find_references_in_cells(reference: Reference) -> Callable[[Element], List[str]]:
return make_find_references_in_property('Cell',
'content',
'address',
reference)
def make_find_references_in_expression_engine(reference: Reference) -> Callable[[Element], List[str]]:
return make_find_references_in_property('Expression',
'expression',
'path',
reference)
def find_references_in_root(document_path: str,
root: Element,
reference: Reference) -> List[Match]:
matches = []
object_data = root.find('ObjectData')
for object in object_data:
properties = object.find('Properties')
object_name = object.attrib['name']
for property in properties.findall('Property'):
property_name = property.attrib['name']
find_locations = make_find_locations(property)
locations = find_locations(reference)
for location in locations:
matches.append(
Match(document_path, object_name, property_name, location))
return matches
class Property:
"""Represents a property with a potential external or cross-document reference."""
def __init__(self,
property_element: Element,
nested_element_name: str,
make_find_references: Callable[[Reference], Callable[[Element], List[str]]]) -> None:
self.property_element = property_element
self.nested_element_name = nested_element_name
self.make_find_references = make_find_references
def find_locations(self, reference: Reference) -> List[str]:
find_references = self.make_find_references(reference)
nested_element = self.property_element.find(self.nested_element_name)
return find_references(nested_element)
def make_find_locations(property_element: Element) -> Callable[[Reference], List[str]]:
def find_locations(reference: Reference) -> List[str]:
property_name = property_element.attrib['name']
properties_with_references = {'cells', 'ExpressionEngine'}
if property_name in properties_with_references:
property = create_property(property_element)
return property.find_locations(reference)
else:
return []
return find_locations
def create_property(property_element: Element) -> Property:
"""
XML Examples::
<Property name="cells" type="Spreadsheet::PropertySheet" status="67108864">
<Cells Count="4" xlink="1">
...
</Cells>
</Property>
<Property name="ExpressionEngine" type="App::PropertyExpressionEngine" status="67108864">
<ExpressionEngine count="2" xlink="1">
...
</ExpressionEngine>
</Property>
+--------------------+---------------------+
| property_name | nested_element_name |
+====================+=====================+
| cells | Cells |
+--------------------+---------------------+
| ExpressionEngine | ExpressionEngine |
+--------------------+---------------------+
FreeCAD Source:
* `Property <https://github.com/FreeCAD/FreeCAD/blob/0.19.2/src/App/PropertyContainer.cpp#L221-L310>`_
* `Cells <https://github.com/FreeCAD/FreeCAD/blob/0.19.2/src/Mod/Spreadsheet/App/PropertySheet.cpp#L277-L304>`_
* `Expression Engine <https://github.com/FreeCAD/FreeCAD/blob/0.19.2/src/App/PropertyExpressionEngine.cpp#L163-L185>`_
"""
property_name = property_element.attrib['name']
if property_name == 'cells':
return Property(property_element, 'Cells', make_find_references_in_cells)
elif property_name == 'ExpressionEngine':
return Property(property_element, 'ExpressionEngine', make_find_references_in_expression_engine)
return None
def find_references(cwd: str, reference: Reference) -> List[Match]:
matches = []
root_by_document_path = find_root_by_document_path(cwd)
for document_path, root in root_by_document_path.items():
matches_in_document = find_references_in_root(
document_path, root, reference)
matches.extend(matches_in_document)
return matches
def rename_references(from_reference: Reference,
to_reference: Reference) -> Dict[str, Element]:
"""
TODO: 1) Find from document
If not label (not surrounded by << >>),
Find file named 'XXX.FCStd'.
Else
Go through every document looking for the one wit the label
2) Then find object with name or label.
<Object name="Spreadsheet">
<Properties Count="7" TransientCount="0">
<Property name="Label" type="App::PropertyString" status="134217728">
<String value="Spreadsheet"/>
</Property>
3) Then find cell with alias.
<Property name="cells" type="Spreadsheet::PropertySheet" status="67108864">
<Cells Count="2" xlink="1">
<XLinks count="0">
</XLinks>
<Cell address="A1" content="Test" />
<Cell address="B1" content="5" alias="Test" />
</Cells>
</Property>
4) Output new XML depending upon to_reference (change alias, spreadsheet name or label).
"""
pass
def remove_external_links(document: str) -> Dict[str, Element]:
"""
https://github.com/FreeCAD/FreeCAD/blob/0.19.2/src/App/PropertyLinks.cpp#L4473-L4510
https://github.com/FreeCAD/FreeCAD/blob/0.19.2/src/App/PropertyLinks.cpp#L3155-L3249
EMPTY
=====
<Cells Count="2" xlink="1">
<XLinks count="0">
</XLinks>
<Cell address="A1" content="Test" />
<Cell address="B1" content="5" alias="Test" />
</Cells>
<Property name="ExpressionEngine" type="App::PropertyExpressionEngine" status="67108864">
<ExpressionEngine count="0">
</ExpressionEngine>
</Property>
XLINKS
======
<Cells Count="4" xlink="1">
<XLinks count="1" docs="1">
<DocMap name="Master" label="Master" index="0"/>
<XLink file="Master.FCStd" stamp="2021-07-25T18:40:15Z" name="Spreadsheet"/>
</XLinks>
<Cell address="A1" content="Value" />
<Cell address="B1" content="=Master#Spreadsheet.Value" alias="Value1" />
<Cell address="D8" content="Value" />
<Cell address="E8" content="=<<Master>>#<<Spreadsheet>>.Value" alias="Value2" />
</Cells>
<ExpressionEngine count="2" xlink="1">
<XLinks count="2" docs="2">
<DocMap name="Master" label="Master" index="1"/>
<DocMap name="Cube" label="Cube" index="0"/>
<XLink file="Cube.FCStd" stamp="2021-07-25T20:03:03Z" name="Box"/>
<XLink file="Master.FCStd" stamp="2021-07-25T18:40:15Z" name="Spreadsheet"/>
</XLinks>
<Expression path="Height" expression="Cube#Box.Height"/>
<Expression path="Radius" expression="Master#Spreadsheet.Value"/>
</ExpressionEngine>
"""
pass
if __name__ == '__main__':
cwd = os.getcwd()
parser = argparse.ArgumentParser(
description='Find cross-document spreadsheet references.')
parser.add_argument(
'document', help='Document where spreadsheet is located.')
parser.add_argument('spreadsheet', help='Spreadsheet name or label.')
parser.add_argument('alias', help='Alias name.')
args = parser.parse_args()
ref = Reference(args.document, args.spreadsheet, args.alias)
matches = find_references(cwd, ref)
def format_match(match: Match) -> str:
beginning_path = cwd + os.path.sep
return str(match).replace(beginning_path, '')
if matches:
num_matches = len(matches)
word = 'reference' if num_matches == 1 else 'references'
print('{} {} to {} found:'.format(num_matches, word, ref))
print(' ' + '\n '.join(map(format_match, matches)))
else:
print('No references to {} found.'.format(ref))
| [
"[email protected]"
] | |
fdfe2da15ad67c8b189985f392c4c6bf45bfc155 | 2df04f6354647440618f87acc9fe8eee480ab11a | /ex036.py | 962783fa2bc74fa6a18e3385a7b4ea2fc071d8fc | [
"MIT"
] | permissive | zWillsz/exsvscode | de6a3d79d73178e6168088a2851bfc4bf9cc9985 | ba507dca6de748e3c82c306731137bb5f6f0c918 | refs/heads/main | 2023-03-22T00:39:58.021276 | 2021-03-11T00:48:17 | 2021-03-11T00:48:17 | 345,176,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | n1 = float(input('Digite um número: '))
n2 = float(input('Digite outro número: '))
n3 = float(input('Digite outro número: '))
if n1 < n2 + n3 and n2 < n1 + n3 and n3 < n1 + n2:
print('É possível fazer um triângulo.')
else:
print('Não é possível fazer um triângulo.')
| [
"[email protected]"
] | |
e959abd5bcb09d4400358e2b64740608947477ad | 990aeac794d108927157ba0e13e85dad622f9970 | /LAB/SourceCodes/EchoServer/client.py | 21d343d05f481df640fa401b06f284ba9c001f96 | [] | no_license | darwinsubramaniam/UDTECH | af07f0690bbe4304670d4d98a71caeeaaaadbc97 | 6f6151845a20c3dd456b70aab361ce7e1bb9eeec | refs/heads/master | 2021-01-17T05:34:57.104956 | 2015-11-14T15:48:40 | 2015-11-14T15:48:40 | 42,150,764 | 0 | 1 | null | 2015-10-16T18:13:58 | 2015-09-09T02:18:46 | HTML | UTF-8 | Python | false | false | 847 | py | __author__ = 'darwin'
import socket
import sys
#create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
ser_address = ("192.168.1.19", 10000)
print(sys.stderr, 'connection to %s port %s' % ser_address)
sock.connect(ser_address)
try:
# Send data
message = 'This is the message . It will be repeated.'
print(sys.stderr, 'sending "%s" ' % message)
sock.sendall(bytes(message, 'utf8'))
#Look for the response
amount_recieved = 0
amount_expected = len(message)
while amount_recieved < amount_expected:
bytedata = sock.recv(16)
data = bytedata.encode('utf8')
amount_recieved += len(data)
print(sys.stderr,'recieved "%s" ' % data)
finally:
print(sys.stderr, 'closing socket')
sock.close()
| [
"[email protected]"
] | |
7b6adfe3e2a7b0b6f0a1ad95b1f71cfddbc311ea | 7e583b67f1a3e73d7ed10a63372bcba70c3d4432 | /sim_db/src_command_line_tool/commands/delete_sim.py | 892e06e181d106043458784238a0db352c606780 | [
"MIT"
] | permissive | task123/sim_db | 6edd9e502186b13eec19ae1a02edbf8ccd6c09d7 | 892934693fc166640f6cbf7af905a2c0d48ea99b | refs/heads/master | 2021-07-20T02:19:10.764670 | 2020-05-07T10:00:08 | 2020-05-07T10:00:08 | 153,098,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,091 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Delete a row in the sim.db.
One must either provide a list of ID's or a condition to delete sets of
simulation parameters (rows) from the database.
Usage: python delete_sim.py --id 'ID'
python delete_sim.py --where 'CONDITION'
"""
# Copyright (C) 2017-2019 Håkon Austlid Taskén <[email protected]>
# Licenced under the MIT License.
if __name__ == '__main__':
import add_package_root_to_path
import sim_db.src_command_line_tool.commands.delete_results_dir as delete_results_dir
import sim_db.src_command_line_tool.commands.helpers as helpers
import sqlite3
import argparse
import sys
import os.path
def command_line_arguments_parser(name_command_line_tool="sim_db",
name_command="delete_sim"):
parser = argparse.ArgumentParser(
description='Delete simulations from sim.db.',
prog="{0} {1}".format(name_command_line_tool, name_command))
parser.add_argument(
'--id',
'-i',
type=int,
nargs='+',
default=[],
help="ID's of runs to delete.")
parser.add_argument(
'--where',
'-w',
type=str,
default=None,
help=
("Condition for which entries should be deleted. Must be a "
"valid SQL (sqlite3) command when added after WHERE in a DELETE "
"command."))
parser.add_argument(
'--all',
action="store_true",
help="Delete all simulation from database.")
parser.add_argument(
'--no_checks',
action='store_true',
help=("No questions are asked about wheter you really want to "
"delete simulation or the 'results_dir' of the simulation."))
return parser
def delete_sim(name_command_line_tool="sim_db",
name_command="delete_sim",
argv=None):
db = helpers.connect_sim_db()
db_cursor = db.cursor()
args = command_line_arguments_parser(name_command_line_tool,
name_command).parse_args(argv)
if args.all:
args.where = "id > -1"
answer = 'n'
if len(args.id) == 0 and args.where == None:
print("--id 'ID' or --where 'CONDITION' must be passed to the "
"program.")
elif len(args.id) > 0 and not args.no_checks:
print("Do you really want to delete simulations with following ID's:")
for delete_id in args.id:
print(delete_id)
answer = helpers.user_input("? (y/n)")
elif args.where != None and not args.no_checks:
print("Do you really want to delete simulations with following "
"condition:")
print(args.where)
answer = helpers.user_input("? (y/n)")
if (answer == 'y' or answer == 'Y' or answer == 'yes' or answer == 'Yes'
or args.no_checks):
if len(args.id) > 0:
if args.no_checks:
delete_results_dir_params = ['--no_checks', '--id']
else:
delete_results_dir_params = ['--id']
for delete_id in args.id:
delete_results_dir_params.append(str(delete_id))
delete_results_dir.delete_results_dir(
argv=delete_results_dir_params)
for delete_id in args.id:
db_cursor.execute(
"DELETE FROM runs WHERE id = {0}".format(delete_id))
elif args.where:
if args.no_checks:
delete_results_dir.delete_results_dir(
argv=["--no_checks", "--where", args.where])
else:
delete_results_dir.delete_results_dir(
argv=["--where", args.where])
db_cursor.execute("DELETE FROM runs WHERE {0}".format(args.where))
else:
print("No simulations were deleted.")
db.commit()
db_cursor.close()
db.close()
if __name__ == '__main__':
delete_sim("", sys.argv[0], sys.argv[1:])
| [
"[email protected]"
] | |
73ebe4706416324038c098bffa8ff1673ac13aab | 96a08c181df0873ed61645c63cbb2870b713ec56 | /lane.py | 65c5343077343eeb0669dafd3d2ffb52b11db41a | [] | no_license | KnightFoxii/Lane-Detection-system | fe33cff4ba5a9b5c9a8c1a77a88633437b1a5555 | 5991e2b57fe9cf42cc4b7ce3f859ee5dbd7ca3bd | refs/heads/master | 2022-12-01T22:16:59.298843 | 2020-08-20T15:59:58 | 2020-08-20T15:59:58 | 289,047,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,940 | py | # Import the required libraries
import cv2
import numpy as np
import matplotlib.pyplot as plt
def canny_edge_detector(image):
# Convert the image color to grayscale
gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Reduce noise from the image
blur = cv2.GaussianBlur(gray_image, (5, 5), 0)
canny = cv2.Canny(blur, 50, 150)
return canny
def region_of_interest(image):
height = image.shape[0]
polygons = np.array([
[(200, height), (1100, height), (550, 250)]
])
mask = np.zeros_like(image)
# Fill poly-function deals with multiple polygon
cv2.fillPoly(mask, polygons, 255)
# Bitwise operation between canny image and mask image
masked_image = cv2.bitwise_and(image, mask)
return masked_image
def create_coordinates(image, line_parameters):
slope, intercept = line_parameters
y1 = image.shape[0]
y2 = int(y1 * (3 / 5))
x1 = int((y1 - intercept) / slope)
x2 = int((y2 - intercept) / slope)
return np.array([x1, y1, x2, y2])
def average_slope_intercept(image, lines):
left_fit = []
right_fit = []
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
# It will fit the polynomial and the intercept and slope
parameters = np.polyfit((x1, x2), (y1, y2), 1)
slope = parameters[0]
intercept = parameters[1]
if slope < 0:
left_fit.append((slope, intercept))
else:
right_fit.append((slope, intercept))
left_fit_average = np.average(left_fit, axis = 0)
right_fit_average = np.average(right_fit, axis = 0)
left_line = create_coordinates(image, left_fit_average)
right_line = create_coordinates(image, right_fit_average)
return np.array([left_line, right_line])
def display_lines(image, lines):
line_image = np.zeros_like(image)
if lines is not None:
for x1, y1, x2, y2 in lines:
cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 10)
return line_image
# Path of dataset directory
cap = cv2.VideoCapture("datasets\test2.mp4")
while(cap.isOpened()):
_, frame = cap.read()
canny_image = canny_edge_detector(frame)
cropped_image = region_of_interest(canny_image)
lines = cv2.HoughLinesP(cropped_image, 2, np.pi / 180, 100,
np.array([]), minLineLength = 40,
maxLineGap = 5)
averaged_lines = average_slope_intercept(frame, lines)
line_image = display_lines(frame, averaged_lines)
combo_image = cv2.addWeighted(frame, 0.8, line_image, 1, 1)
cv2.imshow("results", combo_image)
# When the below two will be true and will press the 'q' on
# our keyboard, we will break out from the loop
# # wait 0 will wait for infinitely between each frames.
# 1ms will wait for the specified time only between each frames
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# close the video file
cap.release()
# destroy all the windows that is currently on
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
98f971dc88967fb06d38b0ee5fa5d6a36c736e0f | 0b078aec6ff439aa7a59152913d88cd617d6bdd1 | /main.py | 3cd15d07c4494a73774edf5bc597542a3f7ddb54 | [] | no_license | RahulAnand442001/ColorgramPy | 0f2fc7a70376c32e72625365ecef8b618441dc72 | 11c829f985a4b399703964d7a7e83b6f59e2a7da | refs/heads/master | 2023-02-05T09:22:16.726583 | 2020-12-17T07:42:11 | 2020-12-17T07:42:11 | 322,218,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,364 | py | # import colorgram
# colors = colorgram.extract("image.jpg", 30)
# for col in colors:
# red = col.rgb.r
# blue = col.rgb.b
# green = col.rgb.g
# color = (red, green, blue)
# rgb_colors.append(color)
# print(rgb_colors)
import turtle
import random
color_list = [(202, 12, 30),
(238, 244, 250), (35, 91, 186), (232, 229, 4), (232, 149, 48), (197, 68, 22), (212, 13, 9),
(35, 31, 152), (49, 220, 60), (241, 46, 151), (20, 22, 53), (14, 208, 224), (75, 9, 53),
(17, 154, 18), (55, 26, 13), (80, 193, 223), (219, 23, 116), (232, 159, 8), (241, 64, 24),
(221, 138, 191), (96, 75, 10), (247, 11, 9), (83, 238, 162), (11, 96, 63), (5, 35, 33),
(89, 208, 147)]
turtle.colormode(255)
my_turtle = turtle.Turtle()
my_turtle.shape("turtle")
my_turtle.speed("fastest")
my_turtle.penup()
my_turtle.hideturtle()
my_turtle.setheading(225)
my_turtle.forward(300)
my_turtle.setheading(0)
number_of_dots = 100
for dot_count in range(1, number_of_dots + 1):
my_turtle.dot(20, random.choice(color_list))
my_turtle.penup()
my_turtle.forward(50)
if dot_count % 10 == 0:
my_turtle.setheading(90)
my_turtle.forward(50)
my_turtle.setheading(180)
my_turtle.forward(500)
my_turtle.setheading(0)
screen = turtle.Screen()
screen.exitonclick()
| [
"[email protected]"
] | |
51faeca946587f96b7b9dd94f87c793737f1948a | 2bcafd5b76b68956489c2e9f9df0e44ec18b3b95 | /HardExamples.py | 4808338ebb8470b25fa17335a869087da74fe312 | [] | no_license | pekkar/hog-cell-detection-framework | 45439c893ca63b77fa192d3e206e442a89df4f50 | 1299799245bd3a537d57704ee2d5c356259abbe8 | refs/heads/master | 2021-01-22T21:12:08.158648 | 2015-09-29T18:02:55 | 2015-09-29T18:02:55 | 43,384,354 | 0 | 0 | null | 2015-09-29T17:52:20 | 2015-09-29T17:52:20 | null | UTF-8 | Python | false | false | 13,923 | py | # -*- coding: utf-8 -*-
# Libraries
import os
import glob
import time
import datetime
import numpy as np
import cv2
import scipy
import uuid
# Files
import SVM
import DetectionProcess
import Filters
import MeasurePerformance
# ----------------------------------------------------------------
# Returns a list of paths to subfolders
def Listdirs(folder):
return [
d for d in (os.path.join(folder, d1) for d1 in os.listdir(folder))
if os.path.isdir(d)
]
# ----------------------------------------------------------------
def Search(hog, trainData, trainClasses, labels,
groundTruth, amountToInitialTraining=1.0,
saveImagesWithDetections=False, saveHardExampleImages=True,
maxIters=1000, maxHardExamples=200000, calculateROC=False,
ROCforThisManyFirstIters=5):
# Initialize dictionary of lists where resutls will be saved
ROCResults = {}
ROCResults["FPR"] = [] # False Positive Rate
ROCResults["TPR"] = [] # True Positive Rate
ROCResults["AUC"] = [] # Area Under ROC
ROCResults["iter"] = []
ROCResults["nrOfIterHE"] = []
ROCResults["F1"] = []
totalHardExamples = []
totalHardExampleLabels = []
# find hard examples using smaller amount of data.
# else, use all the data.
if amountToInitialTraining != 1.0:
negExRowIndices = np.where(trainClasses==0)[0]
posExRowIndices = np.where(trainClasses==1)[0]
tDNegInd = negExRowIndices[:int(amountToInitialTraining*len(negExRowIndices))]
tDPosInd = posExRowIndices[:int(amountToInitialTraining*len(posExRowIndices))]
trainData = np.concatenate((trainData[tDNegInd],trainData[tDPosInd]))
trainClasses = np.concatenate((trainClasses[tDNegInd],trainClasses[tDPosInd]))
labels = np.concatenate((labels[tDNegInd],labels[tDPosInd]))
if saveHardExampleImages or saveImagesWithDetections:
# Output folder name
parentFolder = "hardExamples_" + \
datetime.datetime.fromtimestamp(time.time()). \
strftime('%Y-%m-%d_%H-%M-%S')
# Create parent output folder if does not exist yet
if not os.path.exists(parentFolder):
os.makedirs(parentFolder)
if calculateROC:
ROCResults, cost = ROC(trainData, trainClasses, labels, ROCResults)
ROCResults["iter"].append(1) # First iteration
ROCResults["nrOfIterHE"].append(0) # Zero hard examples
for i in np.arange(2,maxIters,1):
iterHardExamples = []
iterHardExampleLabels = []
# Search and build SVM model.
# If ROC was calculated last on last iteration, we already have
# cross-validated cost.
if calculateROC and i<=ROCforThisManyFirstIters:
model = SVM.Train(trainData, trainClasses, cost=cost)
else: # Else, cross-validate new cost value
cost = 10.0**(np.arange(-2,3,1))
model = SVM.Train(trainData, trainClasses,
cost=cost, CVtype="lolo", labels=labels)
# Use the model to detect cells from already seen images
# and compare them to ground truth in order to find false positives
w = model.coef_[0]
hog.setSVMDetector( w )
searchMethod = "detectMultiScale"
params = dict(
hitThreshold = -model.intercept_[0],
winStride = (2,2), # IMPORTANT! if same as blockStride, no detections will be produced
padding = (0,0), # IMPORTANT! if bigger than (0,0), detections can have minus values which cropping does not like
scale = 1.05,
finalThreshold = 2,
useMeanshiftGrouping = False
)
# Input folder location and possible image types
listOfDayDirs = Listdirs(r".\trainWithThese")
fileTypes = ["bmp", 'jpg', 'png']
# Loop through input folders (days)
for ii, directory in enumerate(listOfDayDirs):
# Search hard examples only from images from these days,
# because subsequent day images are not annotated 100% correctly
# and thus "false positives" might be actual positives
if not "day1" in directory and \
not "day2" in directory and \
not "day3" in directory:
continue
# Get list of images in the folder
imageList = []
for fileType in fileTypes:
imageList = imageList + glob.glob(directory + "\*." + fileType)
# Loop through images
for j,imFileName in enumerate(imageList):
# Image name without file type extension
imName = imFileName[imFileName.rfind("\\")+1 : \
imFileName.rfind(".")]
print "\nProcessing " + directory[directory.rfind("\\")+1 : ] + \
" image " + str(j+1) + "/" + str(len(imageList)) + "..."
# Open current image
img = cv2.imread(imFileName, cv2.CV_LOAD_IMAGE_GRAYSCALE)
# Second copy that remains untouched for cropping
imgOrig = cv2.imread(imFileName, cv2.CV_LOAD_IMAGE_GRAYSCALE)
# Detect
found, timeTaken, w = DetectionProcess.SlidingWindow(hog, img, \
searchMethod, params, filterDetections=False)
if saveHardExampleImages:
# Create the folder if it does not exist already
childFolder = parentFolder + "\\" + "hardExamples_iter" + str(i)
if not os.path.exists(childFolder):
os.makedirs(childFolder)
# Find hard examples
for ri,r in enumerate(found):
for qi,q in enumerate(groundTruth[directory][imName]["positiveExamples"]):
# True if overlaps at least x %...
if Filters.Overlap(r, q) > 0.0000001:
break
# This example is false positive if it overlaps
# less than x % with any of the true positives
elif (qi == len(groundTruth[directory][imName]["positiveExamples"])-1):
# You can set minimum weight/confidence threshold
# for hard examples here.
if w[ri] > 0.0:
cropped = imgOrig[ r[1]:r[1]+r[3], r[0]:r[0]+r[2] ]
# Crop & resize
cropped = cv2.resize(cropped, hog.winSize)
# Generate feature
feature = hog.compute(cropped)[:,0]
iterHardExamples.append(feature)
iterHardExampleLabels.append(ii)
# Save the image
if saveHardExampleImages:
cv2.imwrite(childFolder + "\\" + imName + "_" + \
str(uuid.uuid4().fields[-1])[:5] + ".png", cropped)
# Save the results in .INI.
# Create the folder where at least detections.INI files will
# be saved. Images with detections will be saved over there
# as well later, if input argument of this function says so.
childFolder = parentFolder + "\\" + \
directory[directory.rfind("\\")+1 : ] + "_imagesWithDetections"
# Create the folder if it does not exist already
if not os.path.exists(childFolder):
os.makedirs(childFolder)
pathToDetectionsIni = childFolder+"\\"+imName+"_iter" + str(i)+".ini"
DetectionProcess.SaveIni(found, pathToDetectionsIni,
searchMethod, imFileName, hog)
# Analyze the results, build confusion matrix
pathToTruthIni = imFileName[:imFileName.rfind(".")]+"_annotations.ini"
TP, FP, FN, TPR, FPR, F1, F05, F09, nrOfCellsTruth, imWithDetections = \
MeasurePerformance.Measure \
(pathToDetectionsIni, pathToTruthIni, imFileName)
ROCResults["F1"].append(F1)
if saveImagesWithDetections:
# Save the image with detections
scipy.misc.imsave(childFolder + "\\" + \
imFileName[imFileName.rfind("\\")+1 : imFileName.rfind(".")] \
+ "_iter" + str(i) + ".png", imWithDetections)
# If no hard examples were found, draw ROC for the last time and exit
if len(iterHardExamples) == 0:
ROCResults, cost = ROC(trainData, trainClasses, labels, ROCResults)
ROCResults["iter"].append(i)
ROCResults["nrOfIterHE"].append(0)
break
# Concatenate
totalHardExamples = totalHardExamples + iterHardExamples
totalHardExampleLabels = totalHardExampleLabels + iterHardExampleLabels
# List to array
iterHardExampleLabels = np.asarray(iterHardExampleLabels)
iterHardExamples = np.asarray(iterHardExamples)
# Append
trainData = np.concatenate((trainData,iterHardExamples))
trainClasses = np.concatenate((trainClasses,([0] * iterHardExamples.shape[0])))
labels = np.concatenate((labels, iterHardExampleLabels))
# Save the number of hard examples on first iteration
if i == 2:
nrOfHeFirstIter = iterHardExamples.shape[0]
# If the search is not complete, print number of HE and calculate
# ROC if needed
if iterHardExamples.shape[0] >= (0.05 * nrOfHeFirstIter):
print "\nHard examples found: " + str(iterHardExamples.shape[0]) + "\n"
if calculateROC and i < ROCforThisManyFirstIters:
ROCResults, cost = ROC(trainData, trainClasses, labels, ROCResults)
ROCResults["iter"].append(i)
ROCResults["nrOfIterHE"].append(iterHardExamples.shape[0])
# Search is complete, calculate ROC for the last time if needed and
# exit the search
else:
print "\n|--------------------------------------------------"
print "| < 5 % hard examples found from the initial amount!"
print "| Exiting the search..."
print "|--------------------------------------------------"
if calculateROC:
ROCResults, cost = ROC(trainData, trainClasses, labels, ROCResults)
ROCResults["iter"].append(i)
ROCResults["nrOfIterHE"].append(iterHardExamples.shape[0])
break
totalHardExamples = np.asarray(totalHardExamples)
totalHardExampleLabels = np.asarray(totalHardExampleLabels)
return (totalHardExamples, totalHardExampleLabels, ROCResults)
def ROC(trainData, trainClasses, labels, ROCResults):
# Shuffle and maintain the same order in every array
np.random.seed(666) #222
shuffledOrder = range(trainData.shape[0])
np.random.shuffle(shuffledOrder)
trainData = np.asarray( [trainData[zz,:] for zz in shuffledOrder] )
trainClasses = np.asarray( [trainClasses[zz] for zz in shuffledOrder] )
labels = np.asarray( [labels[zz] for zz in shuffledOrder] )
# Find pos & neg indices
posExRowIndices = np.where(trainClasses==1)[0]
negExRowIndices = np.where(trainClasses==0)[0]
# Take 75 % for training and 25 % for testing
forTrainingPos = np.int(0.75 * len(posExRowIndices))
forTestingPos = len(posExRowIndices) - forTrainingPos
forTrainingNeg = np.int(0.75 * len(negExRowIndices))
forTestingNeg = len(negExRowIndices) - forTrainingNeg
# Partition the data
trainD, trainC, trainL, testD, testC = [],[],[],[],[]
testD = np.concatenate((trainData[posExRowIndices[-forTestingPos:]],
trainData[negExRowIndices[-forTestingNeg:]]))
trainD = np.concatenate((trainData[posExRowIndices[0:forTrainingPos]],
trainData[negExRowIndices[0:forTrainingNeg]]))
trainL = np.concatenate((labels[posExRowIndices[0:forTrainingPos]],
labels[negExRowIndices[0:forTrainingNeg]]))
testC = np.concatenate((trainClasses[posExRowIndices[-forTestingPos:]],
trainClasses[negExRowIndices[-forTestingNeg:]]))
trainC = np.concatenate((trainClasses[posExRowIndices[0:forTrainingPos]],
trainClasses[negExRowIndices[0:forTrainingNeg]]))
# Determine best C
cost = 10.0**(np.arange(-2,3,1))
model = SVM.Train(trainD, trainC, cost=cost, CVtype="lolo", labels=trainL)
# Calculate ROC with the best C
fpr, tpr, roc_auc = SVM.ROC(model, trainD, trainC, testD, testC)
# Save results
ROCResults["FPR"].append(fpr)
ROCResults["TPR"].append(tpr)
ROCResults["AUC"].append(roc_auc)
return (ROCResults, model.C)
| [
"[email protected]"
] | |
58e37c32308bc62287cb206420482f9b9601e669 | 0bb040ccd488f83655bd7f20305dd3e66b5b5bed | /stuff5e/forum/migrations/0016_auto_20210103_2318.py | f8d2f2376f4e1604e0c8579df47110db076f79a8 | [] | no_license | kozakusek/bd_proj | 75bc4bfc17708f850d13fe4f798ccbdb89a711dc | bd11a3a5ef17fd3ac08469a798b44f8dda1b4215 | refs/heads/master | 2023-03-04T00:03:59.783426 | 2021-02-18T20:10:22 | 2021-02-18T20:10:22 | 326,291,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | # Generated by Django 3.0.5 on 2021-01-03 22:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forum', '0015_auto_20210103_2212'),
]
operations = [
migrations.AlterField(
model_name='bonus',
name='name',
field=models.CharField(choices=[('STR', 'STR'), ('DEX', 'DEX'), ('CON', 'CON'), ('INT', 'INT'), ('WIS', 'WIS'), ('CHA', 'CHA'), ('ANY', 'ANY')], max_length=10),
),
]
| [
"[email protected]"
] | |
541cb69c75260157173f8624d237328916435f77 | cfd374b34a6b1a6ae16b5d23f633a0c47ae471e1 | /generated_python_code/ball_collector/ballcollector/scripts/subsystem_camera.py | 2b5a8a3131b05aa8935b5914b541fe7ed40ab212 | [
"BSD-3-Clause"
] | permissive | mfigat/public_rshpn_tool | dd91d12e28d31a9c97c878b4d6e0a482a9bddfb6 | 3555cb8f1eb35ef12441b9aef63dae8f578c2aa7 | refs/heads/master | 2021-11-05T23:47:08.904506 | 2021-11-02T12:26:29 | 2021-11-02T12:26:29 | 207,675,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,765 | py | #!/usr/bin/env python
'''
Copyright (c) 2019, Robot Control and Pattern Recognition Group, Warsaw University of Technology
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Warsaw University of Technology nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYright HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Maksym Figat
'''
# Import other scripts #
from auxiliary_functions import *
from auxiliary_agent_ballcollector import *
from auxiliary_subsystem_camera import *
# Temporary definitions #
IS_LOG = False # Flag determining if logs are shown in the terminal #
IS_PRINT = True # Flag indicating if debug information for developer are shown in the terminal #
class camera:
##### Subsystem camera constructor #####
def __init__(self):
self.log("__init__ function")
rospy.init_node("camera")
self._subsystemName="camera"
self._subsystemFrequency=10;
self._currentSubsystemBehaviour="Behaviour_initBehaviour";
self._subsystemIterations=0
self._behaviourIterations=0
self.initialiseCommunicationModel()
self.auxiliaryFunctions = AuxiliaryFunctions(self._subsystemFrequency)
# initialize all input flags
# initialize all output flags
self._out_flag_cameraInfo=False
self._out_flag_detectedBalls=False
self._out_flag_rpiCamera=False
pass
##### Start subsystem #####
def startSubsystem(self):
self.log("startSubsystem")
try:
while self.auxiliaryFunctions.isSubsystemOK():
''' Execute behaviour associated with _currentSubsystemBehaviour -- choose appropriate state based on _currentSubsystemBehaviour '''
if self._currentSubsystemBehaviour=="Behaviour_initBehaviour":
self.log("_currentSubsystemBehaviour==Behaviour_initBehaviour")
self.subsystemBehaviour_initBehaviour()
continue
if self._currentSubsystemBehaviour=="Behaviour_idleBehaviour":
self.log("_currentSubsystemBehaviour==Behaviour_idleBehaviour")
self.subsystemBehaviour_idleBehaviour()
continue
except Exception as e:
print e
self.error("Error found in function startSubsystem -- file subsystem_camera.py!")
pass
##### Initialise communication model #####
def initialiseCommunicationModel(self):
self.log("initialiseCommunicationModel")
self.initialiseSendChannel()
self.initialiseSendChannelForDiagnostics()
self.initialiseReceiveChannel()
pass
##### Initialise send channel #####
def initialiseSendChannel(self):
self.log("initialiseSendChannel")
# Buffer name=cameraInfo - Sender using NON-BLOCKING mode, receiver using NON-BLOCKING mode
self._sender_cameraInfo=rospy.Publisher("cameraInfoChannel", CameraMessage, queue_size=CHANNEL_SIZE)
# Buffer name=detectedBalls - Sender using NON-BLOCKING mode, receiver using NON-BLOCKING mode
self._sender_detectedBalls=rospy.Publisher("detectedBallsChannel", Image, queue_size=CHANNEL_SIZE)
# Buffer name=rpiCamera - Sender using NON-BLOCKING mode, receiver using NON-BLOCKING mode
self._sender_rpiCamera=rospy.Publisher("rpiCameraChannel", Image, queue_size=CHANNEL_SIZE)
pass
##### Initialise send channel for diagnostics #####
def initialiseSendChannelForDiagnostics(self):
self.log("initialiseSendChannelForDiagnostics")
self._vectorOfSenderDiagnostics=[]
self._vectorOfSenderDiagnostics.append(rospy.Publisher('camera/_currentSubsystemBehaviour', String, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('camera/_subsystemFrequency', Float64, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('camera/_subsystemName', String, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('camera/_subsystemIterations', Int64, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('camera/_behaviourIterations', Int64, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('camera/_out_flag_cameraInfo', Bool, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('camera/_out_flag_detectedBalls', Bool, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('camera/_out_flag_rpiCamera', Bool, queue_size=CHANNEL_SIZE))
pass
##### Initialise receive channel based on input buffers #####
def initialiseReceiveChannel(self):
self.log("initialiseReceiveChannel")
pass
##### Wait for all messages #####
def waitForAllMessages(self):
self.log("waitForAllMessages")
pass
##### Publish on topics diagnostic data concerning the subsystem state #####
def sendDataForDiagnostics(self):
self._vectorOfSenderDiagnostics[0].publish(self._currentSubsystemBehaviour)
self._vectorOfSenderDiagnostics[1].publish(self._subsystemFrequency)
self._vectorOfSenderDiagnostics[2].publish(self._subsystemName)
self._vectorOfSenderDiagnostics[3].publish(self._subsystemIterations)
self._vectorOfSenderDiagnostics[4].publish(self._behaviourIterations)
###### internal state #####
if(7 < len(self._vectorOfSenderDiagnostics) ):
self._vectorOfSenderDiagnostics[5].publish(self._out_flag_cameraInfo)
self._vectorOfSenderDiagnostics[6].publish(self._out_flag_detectedBalls)
self._vectorOfSenderDiagnostics[7].publish(self._out_flag_rpiCamera)
pass
##### Behaviour definitions #####
##### Behaviour initBehaviour #####
##### Terminal condition #####
def terminalCondition_initBehaviour(self): # std_msgs::Bool _out_flag_cameraInfo, std_msgs::Bool _out_flag_detectedBalls, std_msgs::Bool _out_flag_rpiCamera #
self.log("[Behaviour initBehaviour] -- Checking Terminal Condition")
return True
pass
##### Error condition #####
def errorCondition_initBehaviour(self): # std_msgs::Bool _out_flag_cameraInfo, std_msgs::Bool _out_flag_detectedBalls, std_msgs::Bool _out_flag_rpiCamera #
self.log("[Behaviour initBehaviour] -- Checking Error Condition")
return False
pass
##### Transition function #####
def transitionFunction_initBehaviour(self):
self.log("[Behaviour initBehaviour] -- Calculating Transition Function")
# Transition function #
self.log("TRANSITION FUNCTION - initBehaviour consists of the following partial transition functions (decomposition based on output buffers)")
# Partial transition function call: fun1
self.transitionFunction_initBehaviour_fun1()
# Partial transition function call: set_buffer_flags_function
self.transitionFunction_initBehaviour_set_buffer_flags_function()
pass
##### Decomposition of partial transition function based on input buffers #####
def transitionFunction_initBehaviour_fun1(self):
self.log("[Behaviour initBehaviour] -- Calculating Partial Transition Function fun1")
# Partial Transition Function - the first layer #
self.log("PARTIAL TRANSITION FUNCTION - FIRST LAYER - initBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
if True:
self.transitionFunction_initBehaviour_fun1_0()
pass
##### Partial transition function: fun1_0 based on input buffers True #####
def transitionFunction_initBehaviour_fun1_0(self):
self.log("[Behaviour initBehaviour] -- Calculating Partial Transition Function fun1_0")
# Partial Transition Function - the second layer #
self.log("PARTIAL TRANSITION FUNCTION - SECOND LAYER - initBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
# Begin - Partial Transition Function Code
print("[VR - camera] -- initBehaviour")
# Change the current directory
# get user path
home = os.path.expanduser("~")
# set path where the image will be saved:
directory=home+"/git/robot_pi/ros/generated_python/data/img"
print(directory)
# set directory as a current path
os.chdir(directory)
self.detectedBalls=Image()
self.rpiCamera=Image()
self.camera=PiCamera()
self.camera.resolution=(IMAGE_X_SIZE,IMAGE_Y_SIZE)
self.camera.framerate=32
self.cameraInfo=CameraMessage()
# End - Partial Transition Function Code
pass
##### Decomposition of partial transition function based on input buffers #####
def transitionFunction_initBehaviour_set_buffer_flags_function(self):
self.log("[Behaviour initBehaviour] -- Calculating Partial Transition Function set_buffer_flags_function")
# Partial Transition Function - the first layer #
self.log("PARTIAL TRANSITION FUNCTION - FIRST LAYER - initBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
if True:
self.transitionFunction_initBehaviour_set_buffer_flags_function_0()
pass
##### Partial transition function: set_buffer_flags_function_0 based on input buffers True #####
def transitionFunction_initBehaviour_set_buffer_flags_function_0(self):
self.log("[Behaviour initBehaviour] -- Calculating Partial Transition Function set_buffer_flags_function_0")
# Partial Transition Function - the second layer #
self.log("PARTIAL TRANSITION FUNCTION - SECOND LAYER - initBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
# Begin - Partial Transition Function Code
# End - Partial Transition Function Code
pass
##### End of transition function #####
##### Send data to other subsystems #####
def sendData_initBehaviour(self):
self.log("[Behaviour initBehaviour] -- Sending Data")
# DIAGNOSTICS SEND #
self.sendDataForDiagnostics()
# END OF DIAGNOSTICS SEND #
# TYPICAL SEND CALL #
# check if output buffer cameraInfo has new data - i.e. is ready to send new data
if( self._out_flag_cameraInfo ):
# send data from output buffer cameraInfo
# Buffer cameraInfo - NON-BLOCKING Sender mode - NON-BLOCKING Receiver mode
self._sender_cameraInfo.publish(self.cameraInfo) # sending data from output buffer cameraInfo #
# indicate that data was sent and now the output buffer cameraInfo is empty
self._out_flag_cameraInfo=False
# check if output buffer detectedBalls has new data - i.e. is ready to send new data
if( self._out_flag_detectedBalls ):
# send data from output buffer detectedBalls
# Buffer detectedBalls - NON-BLOCKING Sender mode - NON-BLOCKING Receiver mode
self._sender_detectedBalls.publish(self.detectedBalls) # sending data from output buffer detectedBalls #
# indicate that data was sent and now the output buffer detectedBalls is empty
self._out_flag_detectedBalls=False
# check if output buffer rpiCamera has new data - i.e. is ready to send new data
if( self._out_flag_rpiCamera ):
# send data from output buffer rpiCamera
# Buffer rpiCamera - NON-BLOCKING Sender mode - NON-BLOCKING Receiver mode
self._sender_rpiCamera.publish(self.rpiCamera) # sending data from output buffer rpiCamera #
# indicate that data was sent and now the output buffer rpiCamera is empty
self._out_flag_rpiCamera=False
# END OF TYPICAL SEND CALL #
# BEGIN OF BODY SEND CALL #
# END OF BODY SEND CALL #
pass
##### Receive data from other subsystems #####
def receiveData_initBehaviour(self):
self.log("[Behaviour initBehaviour] -- Receiving Data")
# TYPICAL RECEIVE CALL #
self.waitForAllMessages() #
# END OF TYPICAL RECEIVE CALL #
# BEGIN OF RECEIVE BODY CALL #
# END OF RECEIVE BODY CALL #
pass
##### Execute behaviour initBehaviour #####
def executeBehaviour_initBehaviour(self):
self.log("[Behaviour initBehaviour] -- Executing initBehaviour Behaviour")
stopBehaviourIteration=False
# Execution of a single iteration of a behaviour initBehaviour #
_behaviourIterations=0
# Starts execution! #
while True:
# Sleep is a method from class AuxiliaryFunctions which executes sleep from ROS #
self.auxiliaryFunctions.sleep()
# Calculates transition function -- output and internal buffers can only be modified by this function! #
self.transitionFunction_initBehaviour()
# Sends data! #
self.sendData_initBehaviour()
# Updates index! -- i.e. i:i+1 -- increment number of behaviour iterations and number of subsystem iterations #
self._behaviourIterations=self._behaviourIterations+1
# Receives data! #
self.receiveData_initBehaviour()
# Check both conditions, i.e. terminal condition and error condition #
stopBehaviourIteration = self.terminalCondition_initBehaviour() or self.errorCondition_initBehaviour()
if stopBehaviourIteration or not self.auxiliaryFunctions.isSubsystemOK():
'''
Iterate within the while loop until stopBehaviourIteration is set true, i.e. one
of error and terminal condition is fulfilled and isSubsystemOK is true. Otherwise
subsystem must have been switched to another state or SIGINT was sent
'''
break
# Stops execution! #
pass
##### Behaviour idleBehaviour #####
##### Terminal condition #####
def terminalCondition_idleBehaviour(self): # std_msgs::Bool _out_flag_cameraInfo, std_msgs::Bool _out_flag_detectedBalls, std_msgs::Bool _out_flag_rpiCamera #
self.log("[Behaviour idleBehaviour] -- Checking Terminal Condition")
return False
pass
##### Error condition #####
def errorCondition_idleBehaviour(self): # std_msgs::Bool _out_flag_cameraInfo, std_msgs::Bool _out_flag_detectedBalls, std_msgs::Bool _out_flag_rpiCamera #
self.log("[Behaviour idleBehaviour] -- Checking Error Condition")
return False
pass
##### Transition function #####
def transitionFunction_idleBehaviour(self):
self.log("[Behaviour idleBehaviour] -- Calculating Transition Function")
# Transition function #
self.log("TRANSITION FUNCTION - idleBehaviour consists of the following partial transition functions (decomposition based on output buffers)")
# Partial transition function call: fun1
self.transitionFunction_idleBehaviour_fun1()
# Partial transition function call: set_buffer_flags_function
self.transitionFunction_idleBehaviour_set_buffer_flags_function()
pass
##### Decomposition of partial transition function based on input buffers #####
def transitionFunction_idleBehaviour_fun1(self):
self.log("[Behaviour idleBehaviour] -- Calculating Partial Transition Function fun1")
# Partial Transition Function - the first layer #
self.log("PARTIAL TRANSITION FUNCTION - FIRST LAYER - idleBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
if True:
self.transitionFunction_idleBehaviour_fun1_0()
pass
##### Partial transition function: fun1_0 based on input buffers True #####
def transitionFunction_idleBehaviour_fun1_0(self):
self.log("[Behaviour idleBehaviour] -- Calculating Partial Transition Function fun1_0")
# Partial Transition Function - the second layer #
self.log("PARTIAL TRANSITION FUNCTION - SECOND LAYER - idleBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
# Begin - Partial Transition Function Code
self._out_flag_cameraInfo=True
self._out_flag_detectedBalls=True
self._out_flag_rpiCamera=True
print("[VR - camera] -- idleBehaviour")
print("A 1")
rawCapture = PiRGBArray(self.camera,size=(IMAGE_X_SIZE,IMAGE_Y_SIZE))
print("A 2")
time.sleep(0.1)
self.camera.capture(rawCapture, format="bgr")
print("A 3")
image=rawCapture.array
print("A 4")
# resize image
dim = (IMAGE_X_SIZE, IMAGE_Y_SIZE)
# img=cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
img=cv2.resize(image, dim)
self.rpiCamera=bridge.cv2_to_imgmsg(img,"bgr8")
return
############################# OLD VERSION
try:
print("B")
print("C")
hsvLow = tuple(rospy.get_param("/hsvLowPi"))
hsvHigh = tuple(rospy.get_param("/hsvHighPi"))
minCirclePi =rospy.get_param("/minCirclePi")
param1Pi = rospy.get_param("/param1Pi")
param2Pi =rospy.get_param("/param2Pi")
minRadiusPi =rospy.get_param("/minRadiusPi")
maxRadiusPi =rospy.get_param("/maxRadiusPi")
print("D")
print("E")
except rospy.ROSException:
print("F")
print("Could not get param name")
self._out_flag_detectedBalls=False
self._out_flag_rpiCamera=False
self._out_flag_cameraInfo=False
return
except Exception as e:
print("F")
print("Could not get param name")
self._out_flag_detectedBalls=False
self._out_flag_rpiCamera=False
self._out_flag_cameraInfo=False
hsvLow = (0, 39, 167)
hsvHigh = (179, 130,255)
minCirclePi =32
param1Pi = 35
param2Pi = 15
minRadiusPi =14
maxRadiusPi =36
return
print("H")
print("J")
# blur image
img2 = cv2.medianBlur(img,5)
print("K")
# change image to hsv space
hsv = cv2.cvtColor(img2,cv2.COLOR_BGR2HSV)
# set HSV mask
mask = cv2.inRange(hsv,hsvLow, hsvHigh)
# erode image
mask = cv2.erode(mask, None, iterations=3)
# dilate image
mask = cv2.dilate(mask, None, iterations=3)
# bitwise and
img2=cv2.bitwise_and(img2,img2,mask=mask)
# change to gray scale:
cimg = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
# find cirles:
circles = cv2.HoughCircles(cimg,cv2.HOUGH_GRADIENT,1,minCirclePi, param1=param1Pi,param2=param2Pi ,minRadius=minRadiusPi,maxRadius=maxRadiusPi)
flag=False
# check if balls were found:
if circles is not None:
circles = np.uint16(np.around(circles))
self.cameraInfo.ballVisible.data=True
( self.cameraInfo.ballPosition.x, self.cameraInfo.ballPosition.y , self.cameraInfo.ballPosition.z) = getBestBall(circles)
img2=drawCircle(img2,circles)
img2=drawSelectedCircle(img2, self.cameraInfo.ballPosition.x, self.cameraInfo.ballPosition.y , self.cameraInfo.ballPosition.z)
else:
self.cameraInfo.ballVisible.data=False
# set new value within detectedBalls
self.detectedBalls=bridge.cv2_to_imgmsg(img2,"bgr8")
self.rpiCamera=bridge.cv2_to_imgmsg(img,"bgr8")
# save image to file
cv2.imwrite('ballDetected_new.jpg', img)
rawCapture.truncate()
# End - Partial Transition Function Code
pass
##### Decomposition of partial transition function based on input buffers #####
def transitionFunction_idleBehaviour_set_buffer_flags_function(self):
self.log("[Behaviour idleBehaviour] -- Calculating Partial Transition Function set_buffer_flags_function")
# Partial Transition Function - the first layer #
self.log("PARTIAL TRANSITION FUNCTION - FIRST LAYER - idleBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
if True:
self.transitionFunction_idleBehaviour_set_buffer_flags_function_0()
pass
##### Partial transition function: set_buffer_flags_function_0 based on input buffers True #####
def transitionFunction_idleBehaviour_set_buffer_flags_function_0(self):
self.log("[Behaviour idleBehaviour] -- Calculating Partial Transition Function set_buffer_flags_function_0")
# Partial Transition Function - the second layer #
self.log("PARTIAL TRANSITION FUNCTION - SECOND LAYER - idleBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
# Begin - Partial Transition Function Code
# End - Partial Transition Function Code
pass
##### End of transition function #####
##### Send data to other subsystems #####
def sendData_idleBehaviour(self):
self.log("[Behaviour idleBehaviour] -- Sending Data")
# DIAGNOSTICS SEND #
self.sendDataForDiagnostics()
# END OF DIAGNOSTICS SEND #
# TYPICAL SEND CALL #
# check if output buffer cameraInfo has new data - i.e. is ready to send new data
if( self._out_flag_cameraInfo ):
# send data from output buffer cameraInfo
# Buffer cameraInfo - NON-BLOCKING Sender mode - NON-BLOCKING Receiver mode
self._sender_cameraInfo.publish(self.cameraInfo) # sending data from output buffer cameraInfo #
# indicate that data was sent and now the output buffer cameraInfo is empty
self._out_flag_cameraInfo=False
# check if output buffer detectedBalls has new data - i.e. is ready to send new data
if( self._out_flag_detectedBalls ):
# send data from output buffer detectedBalls
# Buffer detectedBalls - NON-BLOCKING Sender mode - NON-BLOCKING Receiver mode
self._sender_detectedBalls.publish(self.detectedBalls) # sending data from output buffer detectedBalls #
# indicate that data was sent and now the output buffer detectedBalls is empty
self._out_flag_detectedBalls=False
# check if output buffer rpiCamera has new data - i.e. is ready to send new data
if( self._out_flag_rpiCamera ):
# send data from output buffer rpiCamera
# Buffer rpiCamera - NON-BLOCKING Sender mode - NON-BLOCKING Receiver mode
self._sender_rpiCamera.publish(self.rpiCamera) # sending data from output buffer rpiCamera #
# indicate that data was sent and now the output buffer rpiCamera is empty
self._out_flag_rpiCamera=False
# END OF TYPICAL SEND CALL #
# BEGIN OF BODY SEND CALL #
# END OF BODY SEND CALL #
pass
##### Receive data from other subsystems #####
def receiveData_idleBehaviour(self):
self.log("[Behaviour idleBehaviour] -- Receiving Data")
# TYPICAL RECEIVE CALL #
self.waitForAllMessages() #
# END OF TYPICAL RECEIVE CALL #
# BEGIN OF RECEIVE BODY CALL #
# END OF RECEIVE BODY CALL #
pass
##### Execute behaviour idleBehaviour #####
def executeBehaviour_idleBehaviour(self):
self.log("[Behaviour idleBehaviour] -- Executing idleBehaviour Behaviour")
stopBehaviourIteration=False
# Execution of a single iteration of a behaviour idleBehaviour #
_behaviourIterations=0
# Starts execution! #
while True:
# Sleep is a method from class AuxiliaryFunctions which executes sleep from ROS #
self.auxiliaryFunctions.sleep()
# Calculates transition function -- output and internal buffers can only be modified by this function! #
self.transitionFunction_idleBehaviour()
# Sends data! #
self.sendData_idleBehaviour()
# Updates index! -- i.e. i:i+1 -- increment number of behaviour iterations and number of subsystem iterations #
self._behaviourIterations=self._behaviourIterations+1
# Receives data! #
self.receiveData_idleBehaviour()
# Check both conditions, i.e. terminal condition and error condition #
stopBehaviourIteration = self.terminalCondition_idleBehaviour() or self.errorCondition_idleBehaviour()
if stopBehaviourIteration or not self.auxiliaryFunctions.isSubsystemOK():
'''
Iterate within the while loop until stopBehaviourIteration is set true, i.e. one
of error and terminal condition is fulfilled and isSubsystemOK is true. Otherwise
subsystem must have been switched to another state or SIGINT was sent
'''
break
# Stops execution! #
pass
##### Definition of functions responsible for switching subsystem camera between states : Behaviour_initBehaviour Behaviour_idleBehaviour #####
# Behaviour initBehaviour: #
def subsystemBehaviour_initBehaviour(self):
self.log("subsystemBehaviour_initBehaviour")
# Executing behaviour initBehaviour #
self.executeBehaviour_initBehaviour()
# Behaviour has been terminated #
# Checking initial condition for behaviour initBehaviour: switching to behaviour idleBehaviour #
if self.initialCondition_From_Behaviour_initBehaviour_To_Behaviour_idleBehaviour():
# incrementing the number determining how many times subsystem has switched between behaviours #
self._subsystemIterations=self._subsystemIterations+1
self._currentSubsystemBehaviour="Behaviour_idleBehaviour"
pass
# Behaviour idleBehaviour: #
def subsystemBehaviour_idleBehaviour(self):
self.log("subsystemBehaviour_idleBehaviour")
# Executing behaviour idleBehaviour #
self.executeBehaviour_idleBehaviour()
# Behaviour has been terminated #
pass
##### Initial condition for behaviour initBehaviour: switching to behaviour idleBehaviour #####
def initialCondition_From_Behaviour_initBehaviour_To_Behaviour_idleBehaviour(self):
# Initial condition specified by user #
return True
##### Function indicating basic log/debug message #####
def log(self, str):
if(IS_LOG):
rospy.loginfo("[SUBSYSTEM camera] -- LOG -- "+str)
if(IS_PRINT):
print "[SUBSYSTEM camera] -- LOG -- " + str
pass
##### Function indicating basic error message #####
def error(self, str):
sys.stderr.write("[SUBSYSTEM camera] -- ERROR -- " + str)
if(IS_LOG):
rospy.loginfo("[SUBSYSTEM camera] -- ERROR -- "+str)
sys.exit()
pass
##### MAIN FUNCTION FOR SUBSYSTEM camera #####
if __name__ == '__main__':
try:
subsystem_camera = camera()
subsystem_camera.startSubsystem()
except rospy.ROSInterruptException:
pass
| [
"[email protected]"
] | |
ac56f7c3e781f8956edb58ef1d5a1467891a3fb5 | dd47f3cbdc1ca987dd30275d6612f34f7cd69885 | /lesson_4/lesson_4_5.py | b8f646bdcbb679847cf5b0c7b1319063ca071a1d | [] | no_license | ek01012020/python_base | 3c7a6db6dc5039b07e504eae4b78e171989d3e61 | 67ba00581361322bae151e61cf9e41bcaa3aa456 | refs/heads/master | 2023-01-20T02:33:43.434356 | 2020-11-20T10:14:07 | 2020-11-20T10:14:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | '''Реализовать формирование списка, используя функцию range() и возможности генератора.
В список должны войти четные числа от 100 до 1000 (включая границы).
Необходимо получить результат вычисления произведения всех элементов списка.'''
from functools import reduce
def my_func(pre_el, el):
return pre_el * el
my_list = [el for el in range(100, 1001, 2)]
print(reduce(my_func,my_list)) | [
"[email protected]"
] | |
dd1abf4c54ae75cafa0a93d25b9b6d4ff5540e44 | 56e37f0a0dd73fd544853bbbeb41e4ec2906b157 | /setup.py | b83d5a13b9f28ef178588c5e0caae4a7e720af29 | [
"Apache-2.0"
] | permissive | gsjurseth/aws-api-gateway-developer-portal | 575a7a4a83a073f48e2bc41691fd3bc00f0b1ade | 69c5c13dc9d70b74ef1f8eaf77153b88348aad43 | refs/heads/master | 2021-01-19T05:50:02.994606 | 2017-10-30T09:59:49 | 2017-10-30T09:59:49 | 100,585,005 | 0 | 1 | null | 2017-08-17T09:14:27 | 2017-08-17T09:14:27 | null | UTF-8 | Python | false | false | 1,460 | py | #!/usr/bin/python
import logging
import os
import sys
from golem.build_utils import build_gradle, get_arguments
args = get_arguments()
logging.basicConfig(level='DEBUG');
logger = logging.getLogger('setup')
env_vars = {
"cco-dev" : {
"eu-west-1": {
"CUSTOM_DOMAIN_NAME": "apigw.ebuilder.io",
"LOGLEVEL": "DEBUG",
"ENVIRONMENT": "development"
}
},
"cco-dev-test" : {
"ap-northeast-1": {
"CUSTOM_DOMAIN_NAME": "api-test-ap-northeast-1.ebuilder.io",
"LOGLEVEL": "DEBUG",
"ENVIRONMENT": "test"
},
"eu-west-1": {
"CUSTOM_DOMAIN_NAME": "apigw-test.ebuilder.io",
"LOGLEVEL": "DEBUG",
"ENVIRONMENT": "test"
}
},
"cco-prod-eu-west" : {
"eu-west-1": {
"CUSTOM_DOMAIN_NAME": "apigw-skutt.ebuilder.io",
"LOGLEVEL": "INFO",
"ENVIRONMENT": "production"
}
}
}
if len(sys.argv) != 2:
logger.error('Required: A single argument with a base64-encoded dictionary with system information.')
sys.exit(1)
if args['account'] not in env_vars:
print 'No configuration data for the account "%s" is configured.'%args['account']
sys.exit(1)
if args['region'] not in env_vars[args['account']]:
print 'No configuration data for the region "%s" is configured for this account.'%args['region']
sys.exit(1)
for key in env_vars[args['account']][args['region']]:
print ':ENVIRONMENT:%s:%s:'%(
key, env_vars[args['account']][args['region']][key])
| [
"[email protected]"
] | |
84e38d5ffdb27a5283e46d446abbd1e8d4a947e3 | 655c6b2500bd72d8f84e9197b534afc9e336917e | /doubanCrawl/settings.py | 1110333a19d991838a68affdef8f8637f832ac7b | [] | no_license | hackingwu/doubanCrawl | da66d3c51d5d77d79627a6c7e25fa4d255dfa22b | e8005409238a515eedc2630e6c01acd6f1eda846 | refs/heads/master | 2021-01-10T11:04:31.156443 | 2015-11-30T13:28:15 | 2015-11-30T13:28:15 | 47,086,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,130 | py | # -*- coding: utf-8 -*-
# Scrapy settings for doubanCrawl project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'doubanCrawl'
SPIDER_MODULES = ['doubanCrawl.spiders']
NEWSPIDER_MODULE = 'doubanCrawl.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'doubanCrawl (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY=1
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
CONCURRENT_REQUESTS_PER_IP=2
# Disable cookies (enabled by default)
#COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'doubanCrawl.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'doubanCrawl.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'doubanCrawl.pipelines.DoubancrawlPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=True
# The initial download delay
#AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED=True
#HTTPCACHE_EXPIRATION_SECS=0
#HTTPCACHE_DIR='httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES=[]
#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
MONGODB_URI='mongodb://hackingwu:[email protected]:27017/douban?authMechanism=SCRAM_SHA-1'
DEPTH_LIMIT=10 | [
"[email protected]"
] | |
9944cc9c11328eb7a6c0cf0d9b7bbb05684f89f3 | 0c84329f5d1e4e9070cece982ac827644d8a1893 | /venv/bin/python-config | 66e81ac48125aa0cbe7cfce2392c3aba8a368cf7 | [] | no_license | Tiyas-13/LC | 955deece0ecf1fc42e6a5d5fcc287ff0fca63679 | ef45b44198532173c37793b9b0e8905ad7739f57 | refs/heads/master | 2020-06-09T16:30:02.176987 | 2019-07-02T22:10:50 | 2019-07-02T22:10:50 | 193,467,920 | 0 | 0 | null | 2019-07-02T22:10:51 | 2019-06-24T08:44:05 | Python | UTF-8 | Python | false | false | 2,334 | #!/home/hp/LC/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"[email protected]"
] | ||
36e17c4801a1e696dbc5f9a0ef3cebb947a956f4 | 667a8604914d8137d0e667924a54ebc708090646 | /buildpack/databroker/config_generator/scripts/config_env_whitelist.py | b1f89b3ed4b734986709a5fadcc04059b0c0c685 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mendix/cf-mendix-buildpack | ec3df05165760d716263c50923fca69bc699aee7 | bd5df58da5bc106f6d864304bb8d55371d40cbc5 | refs/heads/master | 2023-07-26T18:37:41.970004 | 2023-07-13T12:42:03 | 2023-07-13T12:42:03 | 19,174,281 | 41 | 155 | Apache-2.0 | 2023-09-14T11:52:35 | 2014-04-26T09:53:49 | Python | UTF-8 | Python | false | false | 415 | py | # This is list of allowed ENV VARS that will be converted into configuration
from buildpack.databroker.config_generator.scripts.constants import (
ENV_VAR_BROKER_PREFIX,
NODE_COUNT_KEY,
)
whitelist = [
"MXRUNTIME_DatabaseType",
"MXRUNTIME_DatabaseHost",
"MXRUNTIME_DatabaseName",
"MXRUNTIME_DatabaseUserName",
"MXRUNTIME_DatabasePassword",
ENV_VAR_BROKER_PREFIX + NODE_COUNT_KEY,
]
| [
"[email protected]"
] | |
7414ad7eafe29973d4611d6f86ba80c8f4db9c97 | 6944f0aa994045e04d01445c70667f25184736de | /softmax.py | 3e3448b10d5387abd321f6d71d0bf74275f9fc2e | [] | no_license | sumantsaxena/ML_Projects | af5918b01238977433cbe0d13587ec5b196924f6 | 37847f4c828be65392423f43196543f2f1769252 | refs/heads/main | 2023-07-17T23:58:50.442728 | 2021-09-05T18:26:29 | 2021-09-05T18:26:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,539 | py | import numpy as np
from scipy.special import softmax
from scipy.special import log_softmax
data = np.genfromtxt("HamiltonCases.csv", delimiter=',')
X = data[:, 0:4]
y = data[:, 4]
z0 = (y < X.min(axis=1)).astype(int)
z2 = (y > X.max(axis=1)).astype(int)
z1 = ((z0 == 0) & (z2 == 0)).astype(int)
y = np.concatenate([z0[:, np.newaxis], z1[:, np.newaxis], z2[:, np.newaxis]], axis=1)
X = np.concatenate([X, np.ones(len(X))[:, np.newaxis]], axis=1)
m = 200
Xtrain = X[0:m, :]
ytrain = y[0:m, :]
target = np.argmax(ytrain, axis=1)
theta = np.random.randn(5, 3)
o = np.matmul(Xtrain, theta)
predict = np.argmax(o, axis=1)
before = np.sum(predict == target)
eta = 0.001
n_iterations = 1000
for i in range(n_iterations):
# o= x.Θ
o = np.matmul(Xtrain, theta)
# y~ =softmax(o)
p = softmax(o, axis=1)
# NLL =-log(y~) -> -log(softmax(o,axis=1)) -> log_softmax(o)
NLL = -log_softmax(o, axis=1)
# L= y*(-log(y^))
# =- y*(NLL)
L = np.tensordot(NLL, ytrain, axes=[[0, 1], [0, 1]]) / m
print(L)
#
# (y^ - y).x = Xtrain.(p-ytrain)
gradient = np.matmul(Xtrain.T, (p - ytrain)) / m
"""
theta = theta -eta * gradient
"""
theta = theta - eta * gradient
print(theta)
o = np.matmul(Xtrain, theta)
predict = np.argmax(o, axis=1)
print(before, np.sum(predict == target))
Xtest = X[m:, :]
ytest = y[m:, :]
o = np.matmul(Xtest, theta)
predict = np.argmax(o, axis=1)
act = np.argmax(ytest, axis=1)
print(np.sum(predict == act), np.sum(predict != act))
| [
"[email protected]"
] | |
164503ed62c9543f52af60bfa4c3b9c71832e65c | e5fe7c3b4f2ede48cf32353e70c74961c726a026 | /server/rest/main/rest_controller.py | 1cae2712a858e226bae38f189916503c2f67273d | [
"MIT"
] | permissive | lazykyama/ClusteringVisualize | 871e971a6839f2d34d5ba023ff40981d3b8c5983 | 5ce366bf05474ab91ef8eec78bc217168f893dc2 | refs/heads/master | 2021-01-23T08:43:34.323670 | 2013-05-03T11:59:11 | 2013-05-03T11:59:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,797 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, traceback
from kmeans_rest_controller import *
from http_response_wrapper import *
class RestController:
# コマンドとクエリをサブコントローラに渡して実行させる
def execute_with_subcontroller(self, command, query):
# 本当はこのへんでメッセージキューを利用したほうがいいと思いますが、
# 簡易実装ということでひとつ。。。
response = ''
try:
subcontroller = RestSubControllerFactory.create_subcontroller(command)
response = subcontroller.execute(query)
except Exception, detail:
# http://melpystudio.blog82.fc2.com/blog-entry-87.htmlから
# エラーの情報をsysモジュールから取得
info = sys.exc_info()
# tracebackモジュールのformat_tbメソッドで特定の書式に変換
tbinfo = traceback.format_tb(info[2])
# 収集した情報を読みやすいように整形して出力する
error_msg_header = 'internal server error:'
DELIMITER_LENGTH = 40
print error_msg_header.ljust(DELIMITER_LENGTH, '=')
for tbi in tbinfo:
print tbi
print ' %s' % str(info[1])
print '\n'.rjust(DELIMITER_LENGTH, '=')
# 例外発生時は500エラー
response = HttpResponseWrapper.create_internal_server_error()
finally:
return response
class RestSubControllerFactory:
@staticmethod
def create_subcontroller(command):
if command == 'kmeans_result':
return KMeansRestController()
else:
raise TypeError('Unknown command: ' + command)
| [
"[email protected]"
] | |
2a976cf99191a3f8e6e31bfa00a10abbbf4117fb | 3099c7efd083e4bab488912134a2694e0492bc34 | /ch09/regularization.py | 15b840b14b4954f602185896c7971a36d418984d | [] | no_license | lkhoho/DL4CV_Starter | 54d948b4771fef38c2ad8302b0a6396f46eca917 | fdc296bce4146f18d140e03fdc4da3170287eddc | refs/heads/master | 2022-02-03T22:06:47.679246 | 2022-01-25T09:46:28 | 2022-01-25T09:46:28 | 131,102,142 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,578 | py | import argparse
from imutils import paths
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from pyimagesearch.datasets.simple_dataset_loader import SimpleDatasetLoader
from pyimagesearch.preprocessing.simple_preprocessor import SimplePreprocessor
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--dataset', required=True, help='Path to input dataset')
ap.add_argument('-j', '--jobs', type=int, default=-1, help='# of jobs for k-NN distance (-1 uses all available cores)')
args = vars(ap.parse_args())
print('[INFO] loading images...')
imagePaths = list(paths.list_images(args['dataset']))
sp = SimplePreprocessor(32, 32)
sdl = SimpleDatasetLoader(preprocessors=[sp])
(data, labels) = sdl.load(imagePaths, verbose=500)
data = data.reshape((data.shape[0], 32 * 32 * 3))
le = LabelEncoder()
labels = le.fit_transform(labels)
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=5)
# apply different types of regularization terms
for r in (None, 'l1', 'l2'):
# train the SGD classifier using a softmax loss function and the specified regularization function for 10 epochs
print('[INFO] training model with \'{}\' penalty'.format(r))
model = SGDClassifier(loss='log', penalty=r, max_iter=10, learning_rate='constant', eta0=0.01, random_state=42)
model.fit(trainX, trainY)
# evaluate the classifier
acc = model.score(testX, testY)
print('[INFO] \'{}\' penalty accuracy: {:.2f}%'.format(r, acc * 100))
| [
"[email protected]"
] | |
d06ae631edf4627719568c2fa0175e50fd4988cf | 05172b2f9ac3cc2dc925631fcb49513b3e02ea2c | /_PythonDataStrutures/DynamicArray.py | 89bad60ce24f861f5bcb6e1d810f71816fdb0a35 | [] | no_license | KickItAndCode/Algorithms | cbe1ea95c1e655cbfa0b829d3bd2a2a78d7d862f | d41e8a2262cede3154bc5b48c43fb60ac098e385 | refs/heads/master | 2020-04-26T07:08:37.127285 | 2019-12-17T03:46:10 | 2019-12-17T03:46:10 | 173,385,735 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 908 | py | import ctypes
class DynamicArray(object):
def __init__(self):
self.n = 0
self.capacity = 1
self.A = self.make_array(self.capacity)
def __len__(self):
return self.n
def __getitem__(self, k):
if not 0 <= k < self.n:
return IndexError('K is out of bounds!')
return self.A[k]
def append(self, ele):
if self.n == self.capacity:
self._resize(2 * self.capacity) # 2x if capacity isn't enough
self.A[self.n] = ele
self.n += 1
def _resize(self, new_cap):
B = self.make_array(new_cap)
for k in range(self.n):
B[k] = self.A[k]
self.A = B
self.capacity = new_cap
def make_array(self, new_cap):
return (new_cap * ctypes.py_object)()
arr = DynamicArray()
arr.append(1)
print(arr.capacity)
print(len(arr))
arr.append(22)
print(arr[1])
| [
"[email protected]"
] | |
c8df08cd076940f234c738d05d98284b04845a7d | 0921aa5e3e6084ea391bcfd690ecb03025cbdacc | /job_import.py | 3f226e8d5369b36505488bdeb3c1ce9883d94798 | [] | no_license | h-parekh/jenkins_utils | aebc5b6cabcacde475e8e3442de6862fe2fe639c | 9f7f037f84dfd74b5d551662389ce903aa3e1c2c | refs/heads/master | 2020-03-21T01:13:31.930866 | 2018-06-19T18:37:23 | 2018-06-19T18:37:23 | 137,930,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | import jenkins
import xml.etree.ElementTree as ET
import os
url = os.environ['JENKINS_URL']
username = os.environ['JENKINS_USERNAME']
password = os.environ['JENKINS_PASSWORD']
path_to_config_file = os.environ['JENKINS_JOB_CONFIG_FILE_PATH']
job_name = os.environ['JENKINS_JOB_NAME']
def convert_xml_file_to_str():
tree = ET.parse(path_to_config_file)
root = tree.getroot()
return ET.tostring(root, encoding='utf8', method='xml').decode()
def main():
target_server = jenkins.Jenkins(url, username=username, password=password)
config = convert_xml_file_to_str()
# @see http://python-jenkins.readthedocs.io/en/latest/api.html#jenkins.Jenkins.create_job
target_server.create_job(job_name, config)
main()
| [
"[email protected]"
] | |
f717b3d1730d62889162b5e26f197a781a601552 | 2a04091dcb63cc2b8bc803d3ec497e6e775a0e61 | /tests/providers/audio/test_youtube.py | 4efc445139c8a2cd7f488ae7fa61ca819039bece | [
"MIT"
] | permissive | vaginessa/Spotify-Downloader | 3f4655e5004b7c9e50f89d657597bcffc94f7d91 | d1d1223e196891d26825cbfe38e44eef8057c908 | refs/heads/master | 2023-08-05T09:32:12.341299 | 2023-01-24T07:12:35 | 2023-01-24T07:12:35 | 78,599,970 | 0 | 0 | MIT | 2023-07-25T22:24:21 | 2017-01-11T03:41:44 | Python | UTF-8 | Python | false | false | 1,559 | py | import pytest
from spotdl.providers.audio.youtube import YouTube
from spotdl.types.song import Song
@pytest.mark.vcr()
def test_yt_search():
provider = YouTube()
assert (
provider.search(
Song.from_dict(
{
"name": "Nobody Else",
"artists": ["Abstrakt"],
"artist": "Abstrakt",
"album_name": "Nobody Else",
"album_artist": "Abstrakt",
"genres": [],
"disc_number": 1,
"disc_count": 1,
"duration": 162.406,
"year": 2022,
"date": "2022-03-17",
"track_number": 1,
"tracks_count": 1,
"isrc": "GB2LD2210007",
"song_id": "0kx3ml8bdAYrQtcIwvkhp8",
"cover_url": "https://i.scdn.co/image/ab67616d0000b27345f5ba253b9825efc88bc236",
"explicit": False,
"publisher": "NCS",
"url": "https://open.spotify.com/track/0kx3ml8bdAYrQtcIwvkhp8",
"copyright_text": "2022 NCS",
"download_url": None,
"song_list": None,
}
)
)
== "https://youtube.com/watch?v=bNXMlIogpXc"
)
@pytest.mark.vcr()
def test_yt_get_results():
provider = YouTube()
results = provider.get_results("Lost Identities Moments")
assert results and len(results) > 5
| [
"[email protected]"
] | |
433413195ed7c45b3676f07608e167a1fde9eec0 | 3be14950c61913576e1ee695ec05fc7ceed3b78e | /pecs/migrations/0004_auto_20210214_2048.py | 02ff45b8e9bba5318a83bf2f80e75be795c0ea7c | [] | no_license | roquemike/binder2 | 8c991262553ede3e929c378dadbb9786fac6e97f | 7917f863c34ade5646a9d334255a82cd710e919e | refs/heads/main | 2023-03-11T15:50:46.317606 | 2021-02-26T07:06:54 | 2021-02-26T07:06:54 | 342,656,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | # Generated by Django 3.1.5 on 2021-02-15 01:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pecs', '0003_auto_20210123_0057'),
]
operations = [
migrations.AlterField(
model_name='image',
name='description',
field=models.CharField(max_length=10),
),
migrations.CreateModel(
name='Favorite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pecs', models.ManyToManyField(related_name='favorite', to='pecs.Image')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='favorite', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
ae5b5e91cf43266b95ffaeb5f1795e03a00655ff | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part003803.py | 26d534cc630c79581554130b81c7f37de6f38777 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,094 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher62345(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.2.0', 1, 1, S(0)), Add)
]),
1: (1, Multiset({1: 1}), [
(VariableWithCount('i2.2.3.0', 1, 1, S(0)), Add)
]),
2: (2, Multiset({2: 1}), [
(VariableWithCount('i2.2.1.2.0', 1, 1, S(0)), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher62345._instance is None:
CommutativeMatcher62345._instance = CommutativeMatcher62345()
return CommutativeMatcher62345._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 62344
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 62346
if len(subjects) >= 1:
tmp2 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.2.1.0', tmp2)
except ValueError:
pass
else:
pass
# State 62347
if len(subjects) == 0:
pass
# 0: x*f
yield 0, subst2
subjects.appendleft(tmp2)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.3.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 63479
if len(subjects) >= 1:
tmp5 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.3.1.0', tmp5)
except ValueError:
pass
else:
pass
# State 63480
if len(subjects) == 0:
pass
# 1: x*f
yield 1, subst2
subjects.appendleft(tmp5)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 65481
if len(subjects) >= 1:
tmp8 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.1.0', tmp8)
except ValueError:
pass
else:
pass
# State 65482
if len(subjects) == 0:
pass
# 2: x*d
yield 2, subst2
subjects.appendleft(tmp8)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp10 = subjects.popleft()
associative1 = tmp10
associative_type1 = type(tmp10)
subjects11 = deque(tmp10._args)
matcher = CommutativeMatcher62349.get()
tmp12 = subjects11
subjects11 = []
for s in tmp12:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp12, subst0):
pass
if pattern_index == 0:
pass
# State 62350
if len(subjects) == 0:
pass
# 0: x*f
yield 0, subst1
if pattern_index == 1:
pass
# State 63481
if len(subjects) == 0:
pass
# 1: x*f
yield 1, subst1
if pattern_index == 2:
pass
# State 65483
if len(subjects) == 0:
pass
# 2: x*d
yield 2, subst1
subjects.appendleft(tmp10)
return
yield
from matchpy.matching.many_to_one import CommutativeMatcher
from .generated_part003804 import *
from collections import deque
from matchpy.utils import VariableWithCount
from multiset import Multiset | [
"[email protected]"
] | |
46b9a9f729ed98e272f6501feadb2762b71b8590 | 7bafe264cb4ac6eff7b7e1edb6ddca267cc7e0ae | /sell_your_phone/accounts/signals.py | 32f55b33a83385f254215d41afe9894ed45d9888 | [] | no_license | Valentin2333/Sell-Your-Phone | 6bd55acb3121ce79345b26b016db75b192d34650 | e8e64e6a41826e48f8fa28af345938202d785ce4 | refs/heads/master | 2023-07-01T19:46:48.767568 | 2021-08-13T19:41:59 | 2021-08-13T19:41:59 | 394,266,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | from django.contrib.auth import get_user_model
from django.db.models.signals import post_save
from django.dispatch import receiver
from sell_your_phone.accounts.models import Profile
UserModel = get_user_model()
@receiver(post_save, sender=UserModel)
def user_created(sender, instance, created, **kwargs):
if created:
profile = Profile(
user = instance,
)
profile.save()
| [
"[email protected]"
] | |
b9a35d9b23d72056681ea97f3befda8018e6c7fd | 6611fc8f80e9d2ba6879ce6fae3069a66d34cc82 | /python-39 objectoriented.py | 83c9d8af9aa6bb896ec4c3fe127660d05ea1ac38 | [] | no_license | mass234/broPython12hoursTutorials | 6feae46e7f6a934b264e15e15dca1f6616958136 | 7d908752f4709519a9fc6885f25e8433434710ce | refs/heads/main | 2023-07-09T04:16:43.939355 | 2021-08-16T08:04:03 | 2021-08-16T08:04:03 | 396,685,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | from car import Car
car_1 = Car("Chevy","Corvette",2021,"blue")
car_2 = Car("Ford","Mustang",2022,"red")
# print(car_1.make)
# print(car_1.model)
# print(car_1.year)
# print(car_1.color)
car_1.drive()
car_1.stop()
# print(car_2.make)
# print(car_2.model)
# print(car_2.year)
# print(car_2.color)
car_2.drive()
car_2.stop()
Car.wheels = 2
print(Car.wheels) | [
"[email protected]"
] | |
ca948906b190235d198874d255c25307202a2b12 | bee57d5be5e4d249275b6709b6f2582591b72036 | /Dummy K_means.py | 45f26929301c1d897515fe61aa784e2a3a9caa65 | [] | no_license | 0zymandias11/ML | a6b3d30cd5f469b817b51b0f1bf13657cde1b423 | 76f6d99549becc15f914595aaec210105fea7495 | refs/heads/master | 2023-03-19T15:26:22.291801 | 2019-02-10T21:41:03 | 2019-02-10T21:41:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,673 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 6 00:59:29 2019
@author: priya
"""
import numpy as np
import random
import matplotlib.pyplot as plt
l=[]
for i in range(50):
l.append([])
for j in range(2):
l[i].append(random.randint(1,100))
X=np.array(l)
plt.scatter(X[:,0], X[:,1], s=150)
print("initial plot")
plt.show()
colors = 50*["g","r","c","b","k"]
class K_means:
def __init__(self, k=3, tol=0.0001, max_iter=300):
self.k = k
self.tol = tol
self.max_iter = max_iter
def fit(self,data):
self.centroids ={}
for i in range(self.k):
self.centroids[i]=data[i]
for i in range(self.max_iter):
self.classifications={}
for i in range(self.k):
self.classifications[i]=[]
for features in X:
distances =[np.linalg.norm(features -self.centroids[i]) for i in self.centroids]
i=distances.index(min(distances))
self.classifications[i].append(features)
prev_centroids =dict(self.centroids)
for i in self.classifications:
self.centroids[i]=np.average(self.classifications[i],axis=0)
optimized=True
for i in self.centroids:
original_centroid=prev_centroids[i]
current_centroid=self.centroids[i]
if np.sum((current_centroid-original_centroid)/original_centroid*100.0) > self.tol:
optimized = False
if optimized:
break
def predict(self,data):
classifications=[]
for features in data:
distances =[np.linalg.norm(features -self.centroids[i]) for i in self.centroids]
i=distances.index(min(distances))
classifications.append(i)
return np.array(classifications)
clf = K_means()
clf.fit(X)
y_pred=clf.predict(X)
for centroid in clf.centroids:
plt.scatter(clf.centroids[centroid][0], clf.centroids[centroid][1],
marker="o", color="y", s=150, linewidths=5)
for classification in clf.classifications:
color = colors[classification]
for featureset in clf.classifications[classification]:
plt.scatter(featureset[0], featureset[1], marker="+", color=color, s=150, linewidths=5)
plt.show() | [
"[email protected]"
] | |
e66c08b871e76eeaf4e11f234bb1968803b42de1 | 3b6074316cbce6eebb13407f221759b2a47b5548 | /othellologic.py | e3789b9576c01f332829ee97f13a364a700fcd97 | [] | no_license | asiddiq1/Othello | a240b0eebbc883778e8537a584268da50f77f4b1 | e9638149f3d95e8db130367a3461b83b37e31315 | refs/heads/master | 2021-09-11T16:51:08.945852 | 2018-04-10T00:45:48 | 2018-04-10T00:45:48 | 107,196,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,408 | py | #Aisha Siddiq 81047072 Project 4
class InvalidMoveError(Exception):
'''Raises an error whenever an invalid move is made
'''
pass
class OthelloGame:
#Public Functions
def __init__(self, row, column, turn, top_player_piece, bottom_player_piece):
'''Initializes all objects inserted by the user
'''
self.Black = 'B'
self.White = 'W'
self.NONE = '.'
self.Tie = 'NONE'
self._row = row
self._column = column
self._turn = turn
self._topcorner = top_player_piece
self._bottomcorner = bottom_player_piece
def create_board(self):
'''Create the board with the four pieces in the middle
'''
self.game_board = []
for row in range(self._row):
self.game_board.append([self.NONE] * self._column)
self.game_board[int(self._row/2) -1][int(self._column/2)-1] = self._topcorner
self.game_board[int(self._row/2) -1][int(self._column/2)] = self._bottomcorner
self.game_board[int(self._row/2)][int(self._column/2) -1] = self._bottomcorner
self.game_board[int(self._row/2)][int(self._column/2)] = self._topcorner
def make_move(self, row, col):
'''The user inputs the row/col and if the move is valid, update the board
if invalid then raise an error
'''
if self._valid_move(row,col):
self._flip_pieces(row,col)
self._turn = self._switch_color()
self.player_score()
else:
raise InvalidMoveError()
def player_score(self):
'''Counts the score of each player
'''
self.total_white = 0
self.total_black = 0
for row in self.game_board:
for col in row:
if col == self.Black:
self.total_black += 1
elif col == self.White:
self.total_white += 1
def winning_player(self)->bool:
'''Returns false if there is a move on the board that is valid, if there
isn't then it checks if the other player has a valid move. If both players don't
have a valid move available then there is a winning player(true)'''
for row in range(self._row):
for col in range(self._column):
if self._valid_move(row, col):
return False
self._turn = self._switch_color()
for row in range(self._row):
for col in range(self._column):
if self._valid_move(row, col):
return False
return True
def winner_most_points(self):
'''Winning option (player with the most points)
'''
if self.winning_player():
if self.total_black > self.total_white:
return self.Black
elif self.total_white > self.total_black:
return self.White
else:
return self.Tie
def winner_least_points(self):
'''Winning option (player with the least points)
'''
if self.winning_player():
if self.total_black > self.total_white:
return self.White
elif self.total_white > self.total_black:
return self.White
else:
return self.Tie
#Private functions
def _switch_color(self):
'''Switches the color of the players
'''
if self._turn == self.Black:
return self.White
elif self._turn == self.White:
return self.Black
def _check_valid(self, row, col, rowdelta, coldelta)->bool:
'''Returns true if the row/col selected is a valid position on the board
'''
starting_point = self.game_board[row][col]
seen_opposite_color = False
if starting_point != self.NONE:
return False
else:
for i in range(1, max(self._row, self._column)):
if self._valid_column_number(col + coldelta * i) and self._valid_row_number(row + rowdelta * i):
current_point = self.game_board[row + rowdelta * i][col + coldelta * i]
if current_point == self._switch_color():
seen_opposite_color = True
continue
elif current_point == self._turn and seen_opposite_color:
return True
else:
return False
return False
def _valid_move(self, row, col)->bool:
'''Returns true/false if there is a move on the board that is valid
'''
return self._check_valid(row, col, 0, 1)\
or self._check_valid(row, col, 1, 1)\
or self._check_valid(row, col, 1, 0)\
or self._check_valid(row, col, 1, -1)\
or self._check_valid(row, col, 0, -1)\
or self._check_valid(row, col, -1, -1)\
or self._check_valid(row, col, -1, 0)\
or self._check_valid(row, col, -1, 1)
def _flip_pieces(self, row, col):
'''If the position selected is valid flip the pieces on the board
'''
delta = [(0,1), (1,1), (1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1)]
for rowdelta, coldelta in delta:
if self._check_valid(row, col, rowdelta, coldelta):
for i in range(1, max(self._row, self._column)):
current_point = self.game_board[row + rowdelta * i][col + coldelta * i]
if current_point == self._switch_color():
self.game_board[row + rowdelta * i][col + coldelta * i] = self._turn
continue
else:
break
self.game_board[row][col] = self._turn
def _valid_column_number(self, valid_column)->bool:
'''Checks to see if the piece is in a valid column
'''
return 0 <= valid_column < self._column
def _valid_row_number(self, valid_row)->bool:
'''Checks to see if the piece is in a valid row
'''
return 0 <= valid_row < self._row
| [
"[email protected]"
] | |
882029deae2d3a31649ea48615af00792b932715 | 49406aa32e049ef52903777b5f7598c28348aa6f | /TankPy/test.py | 6bf824f6eb375529c98bd8fa7e6b0b36872a1255 | [
"MIT"
] | permissive | kmcgill88/TankPy | 1beb3b728d153b8da6d2e576dd4b5b7deeb2216d | 5c214eb452dce6669fd64e0c17b04c02d8709e88 | refs/heads/master | 2021-01-12T12:12:43.159641 | 2016-10-30T17:51:59 | 2016-10-30T17:51:59 | 72,364,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py |
import datetime
from dateutil import tz
on_off = "OFF"
now = datetime.now(tz=tz.gettz('America/Chicago'))
print "%s: %s" % (now, on_off)
| [
"[email protected]"
] | |
e95625894d5cba62471ce44e67b02160ea805c8f | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_3/shangtai/codejamC.py | be3aad06742fb467f6074f6d0a440327d6d7dc65 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 999 | py | T = int(raw_input())
N, J = map(int, raw_input().split())
def is_prime(n):
if n == 2 or n == 3: return True
if n < 2 or n%2 == 0: return False
if n < 9: return True
if n%3 == 0: return False
r = int(n**0.5)
f = 5
while f <= r:
if n%f == 0: return False
if n%(f+2) == 0: return False
f +=6
return True
def primefactors(x):
loop=2
while loop<=x:
if x%loop==0:
x/=loop
return loop
else:
loop+=1
print "Case #1:"
j=0
for candidate in xrange(2**(N-2)):
candidate=candidate<<1
candidate+=(1+(1<<(N-1)))
candidate="{0:b}".format(candidate)
factorlist=[candidate]
for base in xrange(2,11):
candidatebase=int(candidate,base)
if is_prime(candidatebase):
break
else:
factorlist.append(primefactors(candidatebase))
if len(factorlist)==10:
j+=1
for i in factorlist:
print i,
print
if j==J:
break
| [
"[[email protected]]"
] | |
85ef2d4e488b8bb8cc6f70bb01e7d115d50ceea6 | dee7b7fd962bb940f63e27da2e1e7823821e0444 | /test/core_tests.py | 5532b9b98c926c5a07d182d1e667fb25a810fc36 | [] | no_license | russellchristensen/mesh | 841fd104f6ad858b7c68837442d574a06a0a63c9 | 20c69366ac874e1f33f26ad02ed6a5da9a354378 | refs/heads/master | 2021-01-18T05:54:40.243461 | 2013-07-15T17:13:38 | 2013-07-15T17:13:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,824 | py | # This file is part of Mesh.
# Mesh is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Mesh is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Mesh. If not, see <http://www.gnu.org/licenses/>.
import glob, os, sys, subprocess, tempfile, time, unittest
from distutils import version
global gpl_header
gpl_header = """# This file is part of Mesh.
# Mesh is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Mesh is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Mesh. If not, see <http://www.gnu.org/licenses/>."""
# We've got to find the root directory of the project to run tests!
global project_root_dir
project_root_dir = None
curr_root_dir = __file__
while not project_root_dir:
curr_root_dir, last_element = os.path.split(curr_root_dir)
if os.path.isfile(os.path.join(curr_root_dir, 'mesh')):
project_root_dir = curr_root_dir
break
if not last_element:
break
if not project_root_dir:
print "Error, couldn't find the project root directory. :-("
sys.exit(1)
# Found the project root directory! Make it available for the rest of the tests...
sys.path.append(os.path.join(project_root_dir, 'src'))
sys.path.append(os.path.join(project_root_dir, 'test'))
class Test00Dependencies(unittest.TestCase):
def test_00banner(self):
"[DEPENDENCY TESTS]"
def test_01os(self):
"OS is supported"
if sys.platform not in ['darwin', 'linux2']:
self.fail("Unsupported OS: %s" % sys.platform)
def test_03python_version(self):
"Python >= 2.6 is present"
supported_version = version.StrictVersion('2.6')
this_version = version.StrictVersion(str(sys.version_info[0]) + '.' + str(sys.version_info[1]))
self.assertTrue(this_version >= supported_version)
def test_09m2crypto(self):
"M2Crypto >= 0.20.2 is present"
import M2Crypto
supported_version = version.StrictVersion('0.20.2')
this_version = version.StrictVersion(M2Crypto.version)
self.assertTrue(this_version >= supported_version)
def test_12zmq(self):
"ZeroMQ >= 2.0.10 is present"
import zmq
supported_version = version.LooseVersion('2.0.10')
this_version = version.LooseVersion(zmq.__version__)
self.assertTrue(this_version >= supported_version)
def test_15psutil(self):
"psutil >= 0.2.0 is present"
import psutil
supported_version = version.StrictVersion('0.2.0')
this_version = version.StrictVersion(".".join([str(x) for x in psutil.version_info]))
self.assertTrue(this_version >= supported_version)
class Test01Code(unittest.TestCase):
def test_00banner(self):
"[CODE TESTS]"
def test_03license(self):
"GPL 3 Compliance"
global project_root_dir
# Iterate through the python files and check for compliance
source_files = [os.path.join(project_root_dir, 'mesh')] + glob.glob(os.path.join(project_root_dir, 'test', '*.py')) + glob.glob(os.path.join(project_root_dir, 'src', '*.py'))
failures = []
for fname in source_files:
header = open(os.path.join(project_root_dir, fname), 'r').read(1024)
if not gpl_header in header:
failures.append("The source file '%s' has a malformed or missing GPL header." % fname)
if failures:
self.fail('\n'+'\n'.join(failures))
class Test02Syntax(unittest.TestCase):
def test_00banner(self):
"[SYNTAX TESTS]"
def test_03import(self):
"No syntax errors are encountered in main project files"
import communicator, master, meshlib, port_assigner, port_requestor, pull_proxy
class Test02zmq(unittest.TestCase):
def test_00banner(self):
"[MESSAGING TESTS]"
def test_01pubsub(self):
"ZMQ Publish/Subscribe pattern works over ipc"
global project_root_dir
# Subscriber subprocess
sub_command = ('/usr/bin/env', 'python', os.path.join(project_root_dir, 'test/zmq_sub.py'),)
sub_process = subprocess.Popen(sub_command)
# Publisher subprocess
pub_command = ('/usr/bin/env', 'python', os.path.join(project_root_dir, 'test/zmq_pub.py'),)
pub_process = subprocess.Popen(pub_command)
# Watch only the subscriber, publisher obviously works if the subscriber successfully gets the message
sub_retcode = sub_process.poll()
for i in range(100):
if (sub_retcode != None):
break
time.sleep(.05)
sub_retcode = sub_process.poll()
sub_retcode = sub_process.poll()
if sub_retcode == 0:
return
else:
self.fail("ZMQ Publish/Subscribe pattern failed with retcodes %s/%s" % (str(sub_retcode), str(pub_retcode)))
def test_03reqrep(self):
"ZMQ Request/Reply pattern works over ipc"
global project_root_dir
# Reply subprocess
rep_command = ('/usr/bin/env', 'python', os.path.join(project_root_dir, 'test/zmq_rep.py'),)
rep_process = subprocess.Popen(rep_command)
# Request subprocess
req_command = ('/usr/bin/env', 'python', os.path.join(project_root_dir, 'test/zmq_req.py'),)
req_process = subprocess.Popen(req_command)
# Make sure both process exit successfully
rep_retcode = rep_process.poll()
req_retcode = req_process.poll()
while (rep_retcode == None) or (req_retcode == None):
time.sleep(.05)
rep_retcode = rep_process.poll()
req_retcode = req_process.poll()
if (rep_retcode == 0) and (req_retcode == 0):
return
else:
self.fail("ZMQ Request/Reply pattern failed with with retcodes %s/%s" % (str(rep_retcode), str(req_retcode)))
class Test03meshlib(unittest.TestCase):
def setUp(self):
import M2Crypto
self.alice_key = M2Crypto.RSA.load_key(os.path.join(project_root_dir, 'test', 'certs', 'alice.key'))
self.alice_cert = M2Crypto.X509.load_cert(os.path.join(project_root_dir, 'test', 'certs', 'alice.cert'))
self.bob_key = M2Crypto.RSA.load_key(os.path.join(project_root_dir, 'test', 'certs', 'bob.key'))
self.bob_cert = M2Crypto.X509.load_cert(os.path.join(project_root_dir, 'test', 'certs', 'bob.cert'))
self.ca_cert = M2Crypto.X509.load_cert(os.path.join(project_root_dir, 'test', 'certs', 'test-ca-cert.pem'))
def test_00banner(self):
"[MESHLIB]"
import meshlib
def test_03verifycert(self):
"(CLI or M2Crypto) SSL certificates signed by the CA get verified correctly"
import meshlib
global project_root_dir
# This certificate should be valid
if not meshlib.verify_cert(cafile=os.path.join(project_root_dir, 'test', 'certs', 'test-ca-cert.pem'), certfile=os.path.join(project_root_dir, 'test', 'certs', 'alice.cert')):
self.fail("A certificate that should be valid could not be verified. Note that you can ignore this failure if the same test via the M2Crypto method succeeded.")
def test_04verifycert_fail(self):
"(CLI or M2Crypto) Self-signed SSL certificates do not get verified"
import meshlib
global project_root_dir
# This certificate should not be valid
if meshlib.verify_cert(cafile=os.path.join(project_root_dir, 'test', 'certs', 'test-ca-cert.pem'), certfile=os.path.join(project_root_dir, 'test', 'certs', 'test-self-sign.cert')):
self.fail("A certificate that should not be valid was verified. Note that you can ignore this failure if the same test via the M2Crypto method succeeded.")
def test_06encrypt(self):
"Encrypting a string using a public key seems to work"
import meshlib, M2Crypto, os
global project_root_dir
data = 'Confidential data!'
cryptogram = meshlib.encrypt(data, self.alice_cert)
if cryptogram == data:
self.fail('Encryption failed spectacularly')
def test_09decrypt_known(self):
"Decrypt a known pre-encrypted string"
import meshlib, M2Crypto, os
global project_root_dir
cryptogram = """bvzkTmVVWmLfw6lvJtdrXIaXFTHoI8U+AWE906c9FC4ca7dfDiLB5TOOxhy6thDkhUw+J9AnrEoh
FFsRRoGACYRufjm84bBDqOHMkK0rjyRFvU2uttphOTjdgqHPZJnA7iWrV7mHTBHogiaM6MpJWLQO
uNSdEHxKxqpjg9BR1xj/cYm+iqD0OFfONz7BqFgao3NDTg4a5qpS8i9m4mqFcIuAIRkZG2mC+uBN
h3JvaGQ7Opua72ninJI79Hr2X2VWBXtA4eOQM1BsxxbHWxiLspHplStM34zvXkaUgwUdvHZjvwo5
Tp7tERNH08s4Wb7hvIj6p/EloWtb/CA01EfQwA==
"""
message = meshlib.decrypt(cryptogram, self.alice_key)
if message != 'Confidential data!':
self.fail('Failed to decrypt an encrypted string.')
def test_12encrypt_decrypt(self):
"Encrypt and then decrypt a random string"
import meshlib, M2Crypto, os, random, string
# Generate a random string of 32 letters and numbers
message = ''.join([random.choice(string.ascii_letters + string.digits) for x in range(32)])
cryptogram = meshlib.encrypt(message, self.bob_cert)
decrypted_message = meshlib.decrypt(cryptogram, self.bob_key)
if message != decrypted_message:
self.fail('Input string came back differently when decrypted: "%s" != "%s"' % (message, decrypted_message))
def test_15socket_url(self):
"Function socket_url works"
import meshlib, zmq
zmq_context = zmq.Context()
push = zmq_context.socket(zmq.PUSH)
pull = zmq_context.socket(zmq.PULL)
url = meshlib.socket_url('ipc')
push.connect(url)
pull.bind(url)
msg = "This is a test message"
push.send(msg)
output = pull.recv()
if msg != output:
self.fail("We weren't able to send/receive a message with a url created by socket_url.")
def test_18is_socket_url(self):
"Function is_socket_url works"
import meshlib
for i in xrange(10):
url = meshlib.socket_url('ipc')
if not meshlib.is_socket_url(url):
self.fail("is_socket_url failed on '%s' created by socket_url('ipc')" % url)
url = 'ipc:///tmp/goodurl.ipc'
if not meshlib.is_socket_url(url):
self.fail("is_socket_url failed on a known good url '%s'" % url)
badurl = 'paosidfsadfsdfhncv'
if meshlib.is_socket_url(badurl):
self.fail("is_socket_url didn't detect bad url %s" % badurl)
def test_21load_config(self):
"Function load_config() works with a non-existent config file (defaults will be used)"
import meshlib
meshlib.load_config('/tmp/does_not_exist.conf')
def test_24load_config(self):
"Function load_config() works with default config file (defaults will be used if it doesn't exist)"
import meshlib
meshlib.load_config()
def test_27load_config(self):
"Function load_config() works with a real custom-provided config file"
global project_root_dir
import meshlib
meshlib.load_config(os.path.join(project_root_dir, 'test', 'mesh_b.conf'))
self.assertTrue(meshlib.get_config(None, 'port_assigner_port', None) == '5200')
def test_30get_config(self):
"Function get_config() works properly"
import meshlib
meshlib.load_config(os.path.join(project_root_dir, 'test', 'mesh_b.conf'))
# Global options that aren't there return the default value
self.assertTrue(meshlib.get_config(None, 'fake_option', 'the default value') == 'the default value')
# Global options that are there return the real value
self.assertTrue(meshlib.get_config(None, 'next_push_port', None) == '5205')
# Plugin options that aren't there return the default value
self.assertTrue(meshlib.get_config('fake_plugin', 'another_fake_option', 'the default value') == 'the default value')
# Plugin options that are there return the real value
self.assertTrue(meshlib.get_config('p_template', 'banana_threshold', None) == '1')
# Plugin option overrides global options
self.assertTrue(meshlib.get_config('p_template', 'duplicate_option', None) == 'plugin_value')
# Missing plugin option falls back to global option
self.assertTrue(meshlib.get_config('p_template', 'inbound_pull_proxy_port', None) == '5201')
def test_33get_identifer(self):
"Function get_identifier() returns something."
import meshlib
meshlib.load_config()
self.assertTrue(meshlib.get_identifier())
def test_36tail(self):
"Function tail() can tail a file"
import meshlib
tempfilething = tempfile.NamedTemporaryFile()
filename = tempfilething.name
echo_process = subprocess.Popen(('bash', '-c', 'while echo testing >> %s ; do sleep .1 ; done' % filename))
for line in meshlib.tail(filename):
if line.strip() != 'testing':
self.fail("Tailing is working correctly")
echo_process.kill()
del tempfilething
else:
break
echo_process.kill()
del tempfilething
class Test04master(unittest.TestCase):
def setUp(self):
pass
def test_00banner(self):
"[MASTER]"
import master
def test_06create_zmq_context(self):
"ZMQ context was created"
import master, zmq
self.assertTrue(type(master.zmq_context) == zmq.core.context.Context)
def test_09create_push_communicator(self):
"Function create_push_communicator() works"
import master, zmq
master.create_push_communicator()
if type(master.push_communicator) != zmq.core.socket.Socket:
self.fail("push_communicator socket is wrong type: %s" % str(type(master.push_communicator)))
def test_12create_pull(self):
"Function create_pull() works"
import master, zmq
master.create_pull()
if type(master.pull) != zmq.core.socket.Socket:
self.fail("pull socket is wrong type: %s" % str(type(master.pull)))
# def test_15start_port_assigner(self):
# "Function start_port_assigner works"
# import master
# master.start_port_assigner()
# def test_18start_communicator(self):
# "Function start_communicator works"
# import master
# master.start_communicator()
# def test_21start_inbound_pull_proxy(self):
# "Function start_inbound_pull_proxy works"
# import master
# master.start_inbound_pull_proxy()
# def test_24start_outbound_pull_proxy(self):
# "Function start_outbound_pull_proxy works"
# import master
# master.start_outbound_pull_proxy('test')
# def test_27process_message(self):
# "Function process_message works"
class Test05communicator(unittest.TestCase):
def test_00banner(self):
"[COMMUNICATOR]"
import communicator
template_header = """
import meshlib, sys, unittest
if __name__ == '__main__':
zmq_context, push_master = meshlib.init_plugin(sys.argv)
"""
class Test06plugins(unittest.TestCase):
def test_00banner(self):
"[PLUGINS]"
def test_02template_header(self):
"All plugins have the template header"
import glob
global project_root_dir
failures = []
for plugin_file in glob.glob(os.path.join(project_root_dir, 'src', 'p_*py')):
plugin = os.path.split(plugin_file)[1][:-3]
plugin_contents = open(plugin_file).read()
if template_header not in plugin_contents:
failures.append("Plugin '%s' has a missing or malformed template section." % plugin)
if failures:
self.fail('\n'+'\n'.join(failures))
def test_03supported_os(self):
"All plugins define supported_os"
import glob
global project_root_dir
failures = []
for plugin_file in glob.glob(os.path.join(project_root_dir, 'src', 'p_*py')):
plugin = os.path.split(plugin_file)[1][:-3]
module = __import__(plugin)
supported_os = getattr(module, 'supported_os', None)
if supported_os == None:
failures.append("Plugin '%s' does not define supported_os." % plugin)
elif type(supported_os) != list:
failures.append("Plugin '%s' defined supported_os as a '%s' instead of a list!" % (plugin, type(supported_os)))
if failures:
self.fail('\n'+'\n'.join(failures))
def test_06description(self):
"All plugins define a description with summary and threshold"
import glob
global project_root_dir
failures = []
for plugin_file in glob.glob(os.path.join(project_root_dir, 'src', 'p_*py')):
plugin = os.path.split(plugin_file)[1][:-3]
module = __import__(plugin)
description = getattr(module, 'description', None)
if description == None:
failures.append("Plugin '%s' does not define description." % plugin)
continue
if type(description) != str:
failures.append("Plugin '%s' defined description as a '%s' instead of a string!" % (plugin, type(description)))
continue
desc_lines = description.strip().split('\n')
if len(desc_lines) > 1:
if desc_lines[1].strip():
failures.append("Plugin '%s' does not have an empty second line in the description." % plugin)
if 'threshold' not in description.lower():
failures.append("Plugin '%s' does not describe the threshold conditions" % plugin)
if failures:
self.fail('\n'+'\n'.join(failures))
def test_09configured(self):
"All plugins define a configured function"
import glob
global project_root_dir
failures = []
for plugin_file in glob.glob(os.path.join(project_root_dir, 'src', 'p_*py')):
plugin = os.path.split(plugin_file)[1][:-3]
module = __import__(plugin)
configured = getattr(module, 'configured', None)
if configured == None:
failures.append("Plugin '%s' does not define configured." % plugin)
continue
if type(configured) != type(lambda x: x):
failures.append("Plugin '%s' defined configured as a '%s' instead of a function!" % (plugin, type(configured)))
continue
if failures:
self.fail('\n'+'\n'.join(failures))
| [
"[email protected]"
] | |
0b1abe5fdb7987e0b580bb34afc4c2743ed01d20 | e84c5b51e672f687ea4bbf74114c2cf5079188a9 | /Hamonsoft_project01/urls.py | 09eb94bf091d6643d5e07db5fb4010a9173c789e | [] | no_license | jisuuuu/Hamonsoft_project01 | 8620af8d3996cddec206d0d228cf0954fbdd4a45 | 13a1b9438efd30f5890da0994fc22d90ac920507 | refs/heads/master | 2021-05-17T00:39:15.591854 | 2020-03-27T14:06:23 | 2020-03-27T14:06:23 | 250,540,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | """project_1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import include, url
from django.conf import settings
from . import views
from django.conf.urls.static import static
app_name = 'project_1'
urlpatterns = [
url(r'^$', views.Project_main.as_view(), name='project_1'),
url(r'^[0-9]+/dblist$', views.Show_list.as_view(), name='dblist'),
path('main', views.main, name='main'),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"[email protected]"
] | |
44b33e99e4d184d47d2dff12c7aa8a5883e26fff | b09147b584d6f19b4bb60fef782783ed3cd90648 | /ipconfig/ipvalid.py | 126474efd5f32f20d6dbdb47d7fb4bf1304ed2ba | [] | no_license | xiaobabyLu/myspider | 9d442f06262acb2e43d92df96a75d2684820a2eb | dc8300bf31dcb249112698875f99ca0df512da77 | refs/heads/master | 2021-07-01T13:40:02.512194 | 2017-09-22T03:20:38 | 2017-09-22T03:20:38 | 104,065,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,949 | py | # -*- coding : utf-8 -*-
import urllib2
import time
from bs4 import BeautifulSoup
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
print sys.getdefaultencoding()
xici_headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
'Hosts': 'hm.baidu.com',
'Referer': 'http://www.xicidaili.com/nn',
'Connection': 'keep-alive'
}
baidu_header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Connection':'keep-alive',
'Referer':'https://www.baidu.com/',
'Accept-Encoding':'gzip, deflate, br',
'Accept-Language':'zh-CN,zh;q=0.8'
# 'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.3'
}
req_timeout = 5
testUrl = 'https://www.douban.com/'
testStr = 'douban'
cookies = urllib2.HTTPCookieProcessor()
checked_num =0
grasp_num =0
file = open(r'douban_proxy.txt','a')
for page in range(1,10):
req = urllib2.Request('http://www.xici.net.co/nn/'+str(page),None,xici_headers)
html_doc = urllib2.urlopen(req,timeout=req_timeout).read()
soup = BeautifulSoup(html_doc,'html.parser')
trs = soup.find('table',id = 'ip_list').find_all('tr')
# print trs[1:]
for tr in trs[1:]:
tds = tr.find_all('td')
ip = tds[1].text.strip()
port = tds[2].text.strip()
location = tds[3].text.strip()
ishidden = tds[4].text.strip()
protocol = tds[5].text.strip()
if protocol =='HTTP' or protocol == 'HTTPS':
print '%s = %s : %s' % (protocol,ip,port)
grasp_num += 1
proxyHandler = urllib2.ProxyHandler({'http':r'http://%s:%s' % (ip,port)})
opener = urllib2.build_opener(cookies,proxyHandler)
opener.addheaders = [('User-Agent','Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36')]
try:
print 1111
req = opener.open(testUrl,timeout=req_timeout)
print 2222
result = req.read()
print 3333
print result
pos = result.find(testStr)
if pos>1:
file.write(protocol+'\t'+ip+'\t'+port+'\n')
file.flush()
checked_num += 1
print checked_num,grasp_num
else:
print 'forbbidon'
continue
except Exception as e:
continue
file.close()
print checked_num,grasp_num | [
"[email protected]"
] | |
566f030c732e651fcf0627d3621b4e3301bb19d1 | 0263a4863b56d7b35f3d0b948dd946ff104083b6 | /part03/list_factory.py | 844aaa01ecdf996c8c006656bda68acc9cd712c7 | [] | no_license | who-you-me/design_pattern | 79208bf7de5b8d516b04c651d9d61a53f03beec2 | 338421901d21870be7a4c791e59cd495e38aff83 | refs/heads/master | 2021-01-15T23:30:43.311710 | 2015-02-11T14:50:45 | 2015-02-11T14:50:45 | 28,767,439 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,226 | py | # coding: utf-8
from factory import Factory, Link, Tray, Page
class ListFactory(Factory):
def create_link(caption, url):
return ListLink(caption, url)
def create_tray(caption):
return ListTray(caption)
def create_page(title, author):
return ListPage(title, author)
class ListLink(Link):
def make_html(self):
return ' <li><a href="' + self._url + '">' + self._caption + '</a></li>\n'
class ListTray(Tray):
def make_html(self):
buffer = ""
buffer += "<li>\n"
buffer += self._caption + "\n"
buffer += "<ul>\n"
for item in self._tray:
buffer += item.make_html()
buffer += "</ul>\n"
buffer += "</li>\n"
return buffer
class ListPage(Page):
def make_html(self):
buffer = ""
buffer += "<html><head><title>" + self._title + "</title></head>\n"
buffer += "<body>\n"
buffer += "<h1>" + self._title + "</h1>\n"
buffer += "<ul>\n"
for item in self._content:
buffer += item.make_html()
buffer += "</ul>\n"
buffer += "<hr><address>" + self._author + "</address>\n"
buffer += "</body></html>\n"
return buffer
| [
"[email protected]"
] | |
81ae8ada7fd81464e17d0959e6d5cd34aea1928c | c69422b3b24928cb7f1bec375edf0edbb0eca791 | /test/rbdefects/rbdefects/resources.py | 15dc68bf1a2ca9186ee62de69cab36c3e2edf2a7 | [] | no_license | pcr/reviewboard_git_hooks | aff0101167fa343790a6235c8b50327db4abdce5 | 2e38cf3303f6699fb2653c492169da213059d719 | refs/heads/master | 2016-09-10T23:10:54.240772 | 2014-02-20T02:47:54 | 2014-02-20T02:47:54 | 15,449,656 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | from reviewboard.webapi.resources import WebAPIResource
| [
"[email protected]"
] | |
1c7f22e097b3a2d83172781b98634e53be397dc9 | 5a19bcfbdf57eff9863d3e958fd00674022c9065 | /Meritt_Josh_Midterm/Part_2/Meritt_Josh_Midterm_part_2.py | 613e1ee38d6e5c0d6b72c27a2a2ce5cf74246237 | [] | no_license | M3ritt/ISC_215 | 06f57109f284d805a11406fb0792a5b20c3358a9 | 06d68c9cf88d264f65e5af7e8e8e04c3a68dbf10 | refs/heads/main | 2023-03-19T23:38:49.096647 | 2021-03-11T21:28:03 | 2021-03-11T21:28:03 | 340,162,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,110 | py | """
@Authors
Josh Meritt : 805157393
Midterm part 2:
Simple Linear Regression
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
def get_file():
cwd = os.getcwd()
file = 'Salary_Education_Data.csv'
path = cwd + "\\" + file
while True:
try:
dataset = pd.read_csv(path)
read_data(dataset)
return
except FileNotFoundError:
file = input("[Error] File was not found. Please enter a correct file: ")
def read_data(dataset):
x = dataset.iloc[:,:-1].values
y = dataset.iloc[:,-1].values
creating_sets(x,y)
def creating_sets(x, y):
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.20, random_state = 0)
#print(x_train, x_test, y_train, y_test)
regressor = LinearRegression()
regressor.fit(x_train, y_train)
y_predictions = regressor.predict(x_test)
print("[Y_prediction]:", y_predictions)
print("[Y_test]:", y_test)
create_training_set(x_train, y_train, regressor)
plt.clf() #Need to clear current plot or will be combined when plotting test set
create_test_set(x_test, y_test, x_train, regressor)
def create_training_set(x_train, y_train, regressor):
plt.scatter(x_train, y_train, color = 'red')
plt.plot(x_train, regressor.predict(x_train), color = 'blue')
plt.xlabel('Years of Education (Training set)')
plt.ylabel('Salary')
plt.savefig('TrainingSet.png')
plt.show()
print('[Process complete] TrainingSet.png')
def create_test_set(x_test, y_test, x_train, regressor):
plt.scatter(x_test, y_test, color = 'green')
plt.plot(x_train, regressor.predict(x_train), color = 'blue')
plt.xlabel('Years of Education (Test set)')
plt.ylabel('Salary')
plt.savefig('TestSet.png')
plt.show()
print('[Process complete] TestSet.png')
if __name__ == '__main__':
get_file();
print("[Exiting program]")
| [
"[email protected]"
] | |
9bc07e7f9b6af02f91aaf4a8526b71f89cb6eee9 | 76711371a37406353e8ddad84f234a1527013e1b | /SuperResolution/main.py | 0cc35a3337a17df64104228517440b7fa3131aa3 | [] | no_license | oosky9/MachineLearning | b3ac9349ee15bd18a9dc23e8128de276f3858881 | e89ab8a86ae59a2aee6ad5f1683be340e15daf29 | refs/heads/master | 2023-04-16T17:38:57.110069 | 2020-05-23T07:47:58 | 2020-05-23T07:47:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,452 | py | import torch
import torchvision
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from srgan import Generator, Discriminator, GeneratorLoss, DiscriminatorLoss
from utils import load_datasets, psnr, ssim
from tqdm import tqdm
import statistics
import os
import argparse
def get_psnr(fake, real):
cpu = torch.device("cpu")
psnr_list = []
for i in range(len(fake)):
np_fake = fake[i].to(cpu).detach().clone().numpy().transpose([1, 2, 0])
np_real = real[i].to(cpu).detach().clone().numpy().transpose([1, 2, 0])
psnr_list.append(psnr(np_fake, np_real))
return statistics.mean(psnr_list)
def get_ssim(fake, real):
cpu = torch.device("cpu")
ssim_list = []
for i in range(len(fake)):
np_fake = fake[i].to(cpu).detach().clone().numpy().transpose([1, 2, 0])
np_real = real[i].to(cpu).detach().clone().numpy().transpose([1, 2, 0])
ssim_list.append(ssim(np_fake, np_real))
return statistics.mean(ssim_list)
def pre_train(x_train, x_test, pre_epochs, upscale_factor, device, save_image_path, save_model_path):
writer = SummaryWriter()
model_G = Generator(upscale_factor).to(device)
optimizer_G = torch.optim.Adam(model_G.parameters(),
lr=0.0002, betas=(0.5, 0.999))
mae_loss = nn.L1Loss()
best_loss = 99999
best_model_name = ''
result = {}
result["pre_train/l1_loss_G"] = []
result["pre_test/l1_loss_G"] = []
result["pre_test/psnr"] = []
result["pre_test/ssim"] = []
for epoch in range(pre_epochs):
print("train step: epoch {}".format(epoch))
train_loss_G = []
for (input_img, real_img), _ in tqdm(x_train):
input_img, real_img = input_img.to(device), real_img.to(device)
optimizer_G.zero_grad()
fake_img = model_G(input_img)
g_loss = mae_loss(real_img, fake_img)
g_loss.backward()
optimizer_G.step()
train_loss_G.append(g_loss.item())
result["pre_train/l1_loss_G"].append(statistics.mean(train_loss_G))
writer.add_scalar('pre_train/l1_loss_G', result["pre_train/l1_loss_G"][-1], epoch)
if epoch % 5 == 0 or epoch == pre_epochs - 1:
with torch.no_grad():
print("test step")
test_loss_G = []
test_psnr = []
test_ssim = []
for (input_img, real_img), _ in tqdm(x_test):
input_img, real_img = input_img.to(device), real_img.to(device)
fake_img = model_G(input_img)
g_loss = mae_loss(real_img, fake_img)
test_loss_G.append(g_loss.item())
test_psnr.append(get_psnr(fake_img, real_img))
test_ssim.append(get_ssim(fake_img, real_img))
total_test_loss_G = statistics.mean(test_loss_G)
result["pre_test/l1_loss_G"].append(total_test_loss_G)
writer.add_scalar('pre_test/l1_loss_G', result["pre_test/l1_loss_G"][-1], epoch)
result["pre_test/psnr"].append(statistics.mean(test_psnr))
result["pre_test/ssim"].append(statistics.mean(test_ssim))
writer.add_scalar('pre_test/psnr', result["pre_test/psnr"][-1], epoch)
writer.add_scalar('pre_test/ssim', result["pre_test/ssim"][-1], epoch)
torchvision.utils.save_image(fake_img[:32],
os.path.join(save_image_path, f"fake_epoch_{epoch:03}.png"),
range=(-1.0, 1.0), normalize=True)
torchvision.utils.save_image(real_img[:32],
os.path.join(save_image_path, f"real_epoch_{epoch:03}.png"),
range=(-1.0, 1.0), normalize=True)
if best_loss > total_test_loss_G:
best_loss = total_test_loss_G
best_model_name = os.path.join(save_model_path, f"pre_gen_{epoch:03}.pytorch")
print("save model ==>> {}".format(best_model_name))
torch.save(model_G.state_dict(), best_model_name)
writer.close()
return best_model_name
def train(x_train, x_test, epochs, upscale_factor, device, pre_model, save_image_path, save_model_path):
writer = SummaryWriter()
model_G = Generator(upscale_factor)
model_G.load_state_dict(torch.load(pre_model))
model_G = model_G.to(device)
optimizer_G = torch.optim.Adam(model_G.parameters(),
lr=0.0002, betas=(0.5, 0.999))
model_D = Discriminator()
model_D = model_D.to(device)
optimizer_D = torch.optim.Adam(model_D.parameters(),
lr=0.0002, betas=(0.5, 0.999))
loss_G = GeneratorLoss().to(device)
loss_D = DiscriminatorLoss().to(device)
result = {}
result["train/loss_G"] = []
result["train/loss_D"] = []
result["test/loss_G"] = []
result["test/loss_D"] = []
result["test/psnr"] = []
result["test/ssim"] = []
for epoch in range(epochs):
print("train step: epoch {}".format(epoch))
train_loss_G, train_loss_D = [], []
for (input_img, real_img), _ in tqdm(x_train):
input_img, real_img = input_img.to(device), real_img.to(device)
fake_img = model_G(input_img)
fake_img_tensor = fake_img.detach()
# Update D
optimizer_D.zero_grad()
D_out_real = model_D(real_img)
D_out_fake = model_D(fake_img_tensor)
d_loss = loss_D(D_out_real, D_out_fake)
d_loss.backward(retain_graph=True)
optimizer_D.step()
train_loss_D.append(d_loss.item())
# Update G
optimizer_G.zero_grad()
g_loss = loss_G(D_out_fake, real_img, fake_img)
g_loss.backward(retain_graph=True)
optimizer_G.step()
train_loss_G.append(g_loss.item())
result["train/loss_G"].append(statistics.mean(train_loss_G))
result["train/loss_D"].append(statistics.mean(train_loss_D))
writer.add_scalar('train/loss_G', result["train/loss_G"][-1], epoch)
writer.add_scalar('train/loss_D', result["train/loss_D"][-1], epoch)
if epoch % 5 == 0 or epoch == epochs - 1:
with torch.no_grad():
print("test step")
test_loss_G, test_loss_D = [], []
test_psnr, test_ssim = [], []
for (input_img, real_img), _ in tqdm(x_test):
input_img, real_img = input_img.to(device), real_img.to(device)
fake_img = model_G(input_img)
D_out_real = model_D(real_img)
D_out_fake = model_D(fake_img)
d_loss = loss_D(D_out_real, D_out_fake)
test_loss_D.append(d_loss.item())
g_loss = loss_G(D_out_fake, real_img, fake_img)
test_loss_G.append(g_loss.item())
test_psnr.append(get_psnr(fake_img, real_img))
test_ssim.append(get_ssim(fake_img, real_img))
result["test/loss_G"].append(statistics.mean(test_loss_G))
result["test/loss_D"].append(statistics.mean(test_loss_D))
result["test/psnr"].append(statistics.mean(test_psnr))
result["test/ssim"].append(statistics.mean(test_ssim))
writer.add_scalar('test/loss_G', result["test/loss_G"][-1], epoch)
writer.add_scalar('test/loss_D', result["test/loss_D"][-1], epoch)
writer.add_scalar('test/psnr', result["test/psnr"][-1], epoch)
writer.add_scalar('test/ssim', result["test/ssim"][-1], epoch)
torchvision.utils.save_image(fake_img[:32],
os.path.join(save_image_path, f"fake_epoch_{epoch:03}.png"),
range=(-1.0, 1.0), normalize=True)
torchvision.utils.save_image(real_img[:32],
os.path.join(save_image_path, f"real_epoch_{epoch:03}.png"),
range=(-1.0, 1.0), normalize=True)
if epoch % 10 == 0 or epoch == epochs - 1:
torch.save(model_G.state_dict(), os.path.join(save_model_path, f"gen_{epoch:03}.pytorch"))
torch.save(model_D.state_dict(), os.path.join(save_model_path, f"dis_{epoch:03}.pytorch"))
writer.close()
def check_dir(p):
if not os.path.isdir(p):
os.makedirs(p)
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--pre_model_path', type=str, default="sr_rnn/model")
parser.add_argument('--pre_image_path', type=str, default="sr_rnn/images")
parser.add_argument('--model_path', type=str, default="sr_gan/model")
parser.add_argument('--image_path', type=str, default="sr_gan/images")
parser.add_argument('--pre_epochs', type=int, default=50)
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--upscale_factor', type=int, default=4)
parser.add_argument('--batch_size', type=int, default=64)
args = parser.parse_args()
return args
def main(args):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
x_train, x_test = load_datasets(args.upscale_factor, args.batch_size)
check_dir(args.pre_image_path)
check_dir(args.pre_model_path)
pre_model_name = pre_train(
x_train=x_train,
x_test=x_test,
pre_epochs=args.pre_epochs,
upscale_factor=args.upscale_factor,
device=device,
save_image_path=args.pre_image_path,
save_model_path=args.pre_model_path,
)
check_dir(args.image_path)
check_dir(args.model_path)
train(
x_train=x_train,
x_test=x_test,
epochs=args.epochs,
upscale_factor=args.upscale_factor,
device=device,
pre_model=pre_model_name,
save_image_path=args.image_path,
save_model_path=args.model_path,
)
if __name__ == '__main__':
args = arg_parser()
main(args) | [
"[email protected]"
] | |
a508f1b21eb0f6780f7e25f0848a5d2a51ae29ab | 1edd52cf197e5ae67b5939a3beb3e70761334e62 | /Udemy/python/Session-1&2-Intro-print-indentation-comments/using_indent.py | dce3afaf2ce37572dcc99994e37f4ba78baff000 | [] | no_license | sandeepmchary/Devops_wordpress_Notes | bdcd85d526780d03c494ecb93e714e7ffe0a4d58 | ffd2092162073e1e7342c6066d023d04e6ca8c1c | refs/heads/master | 2022-06-18T21:33:02.471025 | 2022-06-12T11:14:47 | 2022-06-12T11:14:47 | 154,679,658 | 1 | 4 | null | 2022-05-19T16:59:57 | 2018-10-25T13:51:40 | HTML | UTF-8 | Python | false | false | 117 | py | print("this is for indentation")
if 3 > 1:
print("using if condition")
print("we are comparing 3 with 1")
| [
"[email protected]"
] | |
8a9561159e82907417c9e0b374b3f8f11bf078ac | 72e11a80587342b3f278d4df18406cd4ce7531e8 | /pip-10.0.0.dev0-py3.6.egg/pip/_internal/index.py | 7c1c210b697a85f199e819826dd94ed5eab4da72 | [] | no_license | EnjoyLifeFund/Debian_py36_packages | 740666f290cef73a4f634558ccf3fd4926addeda | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | refs/heads/master | 2021-08-24T02:17:24.349195 | 2017-12-06T06:18:35 | 2017-12-06T06:18:35 | 113,167,612 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,528 | py | """Routines related to PyPI, indexes"""
from __future__ import absolute_import
import cgi
import itertools
import logging
import mimetypes
import os
import posixpath
import re
import sys
import warnings
from collections import namedtuple
from pip._vendor import html5lib, requests, six
from pip._vendor.distlib.compat import unescape
from pip._vendor.packaging import specifiers
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.requests.exceptions import SSLError
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip._internal.compat import ipaddress
from pip._internal.download import HAS_TLS, is_url, path_to_url, url_to_path
from pip._internal.exceptions import (
BestVersionAlreadyInstalled, DistributionNotFound, InvalidWheelFilename,
UnsupportedWheel,
)
from pip._internal.models import PyPI
from pip._internal.pep425tags import get_supported
from pip._internal.utils.deprecation import RemovedInPip11Warning
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import (
ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS, cached_property, normalize_path,
splitext,
)
from pip._internal.utils.packaging import check_requires_python
from pip._internal.wheel import Wheel, wheel_ext
__all__ = ['FormatControl', 'fmt_ctl_handle_mutual_exclude', 'PackageFinder']
SECURE_ORIGINS = [
# protocol, hostname, port
# Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)
("https", "*", "*"),
("*", "localhost", "*"),
("*", "127.0.0.0/8", "*"),
("*", "::1/128", "*"),
("file", "*", None),
# ssh is always secure.
("ssh", "*", "*"),
]
logger = logging.getLogger(__name__)
class InstallationCandidate(object):
def __init__(self, project, version, location):
self.project = project
self.version = parse_version(version)
self.location = location
self._key = (self.project, self.version, self.location)
def __repr__(self):
return "<InstallationCandidate({0!r}, {1!r}, {2!r})>".format(
self.project, self.version, self.location,
)
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, InstallationCandidate):
return NotImplemented
return method(self._key, other._key)
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links.
"""
def __init__(self, find_links, index_urls, allow_all_prereleases=False,
trusted_hosts=None, process_dependency_links=False,
session=None, format_control=None, platform=None,
versions=None, abi=None, implementation=None):
"""Create a PackageFinder.
:param format_control: A FormatControl object or None. Used to control
the selection of source packages / binary packages when consulting
the index and links.
:param platform: A string or None. If None, searches for packages
that are supported by the current system. Otherwise, will find
packages that can be built on the platform passed in. These
packages will only be downloaded for distribution: they will
not be built locally.
:param versions: A list of strings or None. This is passed directly
to pep425tags.py in the get_supported() method.
:param abi: A string or None. This is passed directly
to pep425tags.py in the get_supported() method.
:param implementation: A string or None. This is passed directly
to pep425tags.py in the get_supported() method.
"""
if session is None:
raise TypeError(
"PackageFinder() missing 1 required keyword argument: "
"'session'"
)
# Build find_links. If an argument starts with ~, it may be
# a local file relative to a home directory. So try normalizing
# it and if it exists, use the normalized version.
# This is deliberately conservative - it might be fine just to
# blindly normalize anything starting with a ~...
self.find_links = []
for link in find_links:
if link.startswith('~'):
new_link = normalize_path(link)
if os.path.exists(new_link):
link = new_link
self.find_links.append(link)
self.index_urls = index_urls
self.dependency_links = []
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.format_control = format_control or FormatControl(set(), set())
# Domains that we won't emit warnings for when not using HTTPS
self.secure_origins = [
("*", host, "*")
for host in (trusted_hosts if trusted_hosts else [])
]
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
# The Session we'll use to make requests
self.session = session
# The valid tags to check potential found wheel candidates against
self.valid_tags = get_supported(
versions=versions,
platform=platform,
abi=abi,
impl=implementation,
)
# If we don't have TLS enabled, then WARN if anyplace we're looking
# relies on TLS.
if not HAS_TLS:
for link in itertools.chain(self.index_urls, self.find_links):
parsed = urllib_parse.urlparse(link)
if parsed.scheme == "https":
logger.warning(
"pip is configured with locations that require "
"TLS/SSL, however the ssl module in Python is not "
"available."
)
break
def get_formatted_locations(self):
lines = []
if self.index_urls and self.index_urls != [PyPI.simple_url]:
lines.append(
"Looking in indexes: {}".format(", ".join(self.index_urls))
)
if self.find_links:
lines.append(
"Looking in links: {}".format(", ".join(self.find_links))
)
return "\n".join(lines)
def add_dependency_links(self, links):
# # FIXME: this shouldn't be global list this, it should only
# # apply to requirements of the package that specifies the
# # dependency_links value
# # FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
warnings.warn(
"Dependency Links processing has been deprecated and will be "
"removed in a future release.",
RemovedInPip11Warning,
)
self.dependency_links.extend(links)
@staticmethod
def _sort_locations(locations, expand_dir=False):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
logger.warning(
"Url '%s' is ignored: it is neither a file "
"nor a directory.", url)
elif is_url(url):
# Only add url with clear scheme
urls.append(url)
else:
logger.warning(
"Url '%s' is ignored. It is either a non-existing "
"path or lacks a specific scheme.", url)
return files, urls
def _candidate_sort_key(self, candidate):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min(self.valid_tags)
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
support_num = len(self.valid_tags)
build_tag = tuple()
if candidate.location.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(candidate.location.filename)
if not wheel.supported(self.valid_tags):
raise UnsupportedWheel(
"%s is not a supported wheel for this platform. It "
"can't be sorted." % wheel.filename
)
pri = -(wheel.support_index_min(self.valid_tags))
if wheel.build_tag is not None:
match = re.match(r'^(\d+)(.*)$', wheel.build_tag)
build_tag_groups = match.groups()
build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
else: # sdist
pri = -(support_num)
return (candidate.version, build_tag, pri)
def _validate_secure_origin(self, logger, location):
# Determine if this url used a secure transport mechanism
parsed = urllib_parse.urlparse(str(location))
origin = (parsed.scheme, parsed.hostname, parsed.port)
# The protocol to use to see if the protocol matches.
# Don't count the repository type as part of the protocol: in
# cases such as "git+ssh", only use "ssh". (I.e., Only verify against
# the last scheme.)
protocol = origin[0].rsplit('+', 1)[-1]
# Determine if our origin is a secure origin by looking through our
# hardcoded list of secure origins, as well as any additional ones
# configured on this PackageFinder instance.
for secure_origin in (SECURE_ORIGINS + self.secure_origins):
if protocol != secure_origin[0] and secure_origin[0] != "*":
continue
try:
# We need to do this decode dance to ensure that we have a
# unicode object, even on Python 2.x.
addr = ipaddress.ip_address(
origin[1]
if (
isinstance(origin[1], six.text_type) or
origin[1] is None
)
else origin[1].decode("utf8")
)
network = ipaddress.ip_network(
secure_origin[1]
if isinstance(secure_origin[1], six.text_type)
else secure_origin[1].decode("utf8")
)
except ValueError:
# We don't have both a valid address or a valid network, so
# we'll check this origin against hostnames.
if (origin[1] and
origin[1].lower() != secure_origin[1].lower() and
secure_origin[1] != "*"):
continue
else:
# We have a valid address and network, so see if the address
# is contained within the network.
if addr not in network:
continue
# Check to see if the port patches
if (origin[2] != secure_origin[2] and
secure_origin[2] != "*" and
secure_origin[2] is not None):
continue
# If we've gotten here, then this origin matches the current
# secure origin and we should return True
return True
# If we've gotten to this point, then the origin isn't secure and we
# will not accept it as a valid location to search. We will however
# log a warning that we are ignoring it.
logger.warning(
"The repository located at %s is not a trusted or secure host and "
"is being ignored. If this repository is available via HTTPS we "
"recommend you use HTTPS instead, otherwise you may silence "
"this warning and allow it anyway with '--trusted-host %s'.",
parsed.hostname,
parsed.hostname,
)
return False
def _get_index_urls_locations(self, project_name):
"""Returns the locations found via self.index_urls
Checks the url_name on the main (first in the list) index and
use this url_name to produce all locations
"""
def mkurl_pypi_url(url):
loc = posixpath.join(
url,
urllib_parse.quote(canonicalize_name(project_name)))
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's
# behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
return [mkurl_pypi_url(url) for url in self.index_urls]
def find_all_candidates(self, project_name):
"""Find all available InstallationCandidate for project_name
This checks index_urls, find_links and dependency_links.
All versions found are returned as an InstallationCandidate list.
See _link_package_versions for details on which files are accepted
"""
index_locations = self._get_index_urls_locations(project_name)
index_file_loc, index_url_loc = self._sort_locations(index_locations)
fl_file_loc, fl_url_loc = self._sort_locations(
self.find_links, expand_dir=True)
dep_file_loc, dep_url_loc = self._sort_locations(self.dependency_links)
file_locations = (
Link(url) for url in itertools.chain(
index_file_loc, fl_file_loc, dep_file_loc)
)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
# We explicitly do not trust links that came from dependency_links
# We want to filter out any thing which does not have a secure origin.
url_locations = [
link for link in itertools.chain(
(Link(url) for url in index_url_loc),
(Link(url) for url in fl_url_loc),
(Link(url) for url in dep_url_loc),
)
if self._validate_secure_origin(logger, link)
]
logger.debug('%d location(s) to search for versions of %s:',
len(url_locations), project_name)
for location in url_locations:
logger.debug('* %s', location)
canonical_name = canonicalize_name(project_name)
formats = fmt_ctl_formats(self.format_control, canonical_name)
search = Search(project_name, canonical_name, formats)
find_links_versions = self._package_versions(
# We trust every directly linked archive in find_links
(Link(url, '-f') for url in self.find_links),
search
)
page_versions = []
for page in self._get_pages(url_locations, project_name):
logger.debug('Analyzing links from page %s', page.url)
with indent_log():
page_versions.extend(
self._package_versions(page.links, search)
)
dependency_versions = self._package_versions(
(Link(url) for url in self.dependency_links), search
)
if dependency_versions:
logger.debug(
'dependency_links found: %s',
', '.join([
version.location.url for version in dependency_versions
])
)
file_versions = self._package_versions(file_locations, search)
if file_versions:
file_versions.sort(reverse=True)
logger.debug(
'Local files found: %s',
', '.join([
url_to_path(candidate.location.url)
for candidate in file_versions
])
)
# This is an intentional priority ordering
return (
file_versions + find_links_versions + page_versions +
dependency_versions
)
def find_requirement(self, req, upgrade):
"""Try to find a Link matching req
Expects req, an InstallRequirement and upgrade, a boolean
Returns a Link if found,
Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
"""
all_candidates = self.find_all_candidates(req.name)
# Filter out anything which doesn't match our specifier
compatible_versions = set(
req.specifier.filter(
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
# pkg_resources._vendor.packaging.version.Version as different
# types. This way we'll use a str as a common data interchange
# format. If we stop using the pkg_resources provided specifier
# and start using our own, we can drop the cast to str().
[str(c.version) for c in all_candidates],
prereleases=(
self.allow_all_prereleases
if self.allow_all_prereleases else None
),
)
)
applicable_candidates = [
# Again, converting to str to deal with debundling.
c for c in all_candidates if str(c.version) in compatible_versions
]
if applicable_candidates:
best_candidate = max(applicable_candidates,
key=self._candidate_sort_key)
else:
best_candidate = None
if req.satisfied_by is not None:
installed_version = parse_version(req.satisfied_by.version)
else:
installed_version = None
if installed_version is None and best_candidate is None:
logger.critical(
'Could not find a version that satisfies the requirement %s '
'(from versions: %s)',
req,
', '.join(
sorted(
set(str(c.version) for c in all_candidates),
key=parse_version,
)
)
)
raise DistributionNotFound(
'No matching distribution found for %s' % req
)
best_installed = False
if installed_version and (
best_candidate is None or
best_candidate.version <= installed_version):
best_installed = True
if not upgrade and installed_version is not None:
if best_installed:
logger.debug(
'Existing installed version (%s) is most up-to-date and '
'satisfies requirement',
installed_version,
)
else:
logger.debug(
'Existing installed version (%s) satisfies requirement '
'(most up-to-date version is %s)',
installed_version,
best_candidate.version,
)
return None
if best_installed:
# We have an existing version, and its the best version
logger.debug(
'Installed version (%s) is most up-to-date (past versions: '
'%s)',
installed_version,
', '.join(sorted(compatible_versions, key=parse_version)) or
"none",
)
raise BestVersionAlreadyInstalled
logger.debug(
'Using version %s (newest of versions: %s)',
best_candidate.version,
', '.join(sorted(compatible_versions, key=parse_version))
)
return best_candidate.location
def _get_pages(self, locations, project_name):
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors.
"""
seen = set()
for location in locations:
if location in seen:
continue
seen.add(location)
page = self._get_page(location)
if page is None:
continue
yield page
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search):
result = []
for link in self._sort_links(links):
v = self._link_package_versions(link, search)
if v is not None:
result.append(v)
return result
def _log_skipped_link(self, link, reason):
if link not in self.logged_links:
logger.debug('Skipping link %s; %s', link, reason)
self.logged_links.add(link)
def _link_package_versions(self, link, search):
"""Return an InstallationCandidate or None"""
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
self._log_skipped_link(link, 'not a file')
return
if ext not in SUPPORTED_EXTENSIONS:
self._log_skipped_link(
link, 'unsupported archive format: %s' % ext)
return
if "binary" not in search.formats and ext == wheel_ext:
self._log_skipped_link(
link, 'No binaries permitted for %s' % search.supplied)
return
if "macosx10" in link.path and ext == '.zip':
self._log_skipped_link(link, 'macosx10 one')
return
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
self._log_skipped_link(link, 'invalid wheel filename')
return
if canonicalize_name(wheel.name) != search.canonical:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
if not wheel.supported(self.valid_tags):
self._log_skipped_link(
link, 'it is not compatible with this Python')
return
version = wheel.version
# This should be up by the search.ok_binary check, but see issue 2700.
if "source" not in search.formats and ext != wheel_ext:
self._log_skipped_link(
link, 'No sources permitted for %s' % search.supplied)
return
if not version:
version = egg_info_matches(egg_info, search.supplied, link)
if version is None:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
self._log_skipped_link(
link, 'Python version is incorrect')
return
try:
support_this_python = check_requires_python(link.requires_python)
except specifiers.InvalidSpecifier:
logger.debug("Package %s has an invalid Requires-Python entry: %s",
link.filename, link.requires_python)
support_this_python = True
if not support_this_python:
logger.debug("The package %s is incompatible with the python"
"version in use. Acceptable python versions are:%s",
link, link.requires_python)
return
logger.debug('Found link %s, version: %s', link, version)
return InstallationCandidate(search.supplied, version, link)
def _get_page(self, link):
return HTMLPage.get_page(link, session=self.session)
def egg_info_matches(
egg_info, search_name, link,
_egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)):
"""Pull the version part out of a string.
:param egg_info: The string to parse. E.g. foo-2.1
:param search_name: The name of the package this belongs to. None to
infer the name. Note that this cannot unambiguously parse strings
like foo-2-2 which might be foo, 2-2 or foo-2, 2.
:param link: The link the string came from, for logging on failure.
"""
match = _egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s', link)
return None
if search_name is None:
full_match = match.group(0)
return full_match[full_match.index('-'):]
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
class HTMLPage(object):
"""Represents one page, along with its URL"""
def __init__(self, content, url, headers=None):
# Determine if we have any encoding information in our headers
encoding = None
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
encoding = params['charset']
self.content = content
self.parsed = html5lib.parse(
self.content,
transport_encoding=encoding,
namespaceHTMLElements=False,
)
self.url = url
self.headers = headers
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, skip_archives=True, session=None):
if session is None:
raise TypeError(
"get_page() missing 1 required keyword argument: 'session'"
)
url = link.url
url = url.split('#', 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
from pip._internal.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %s URL %s', scheme, link)
return None
try:
if skip_archives:
filename = link.filename
for bad_ext in ARCHIVE_EXTENSIONS:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(
url, session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
logger.debug('Getting page %s', url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = \
urllib_parse.urlparse(url)
if (scheme == 'file' and
os.path.isdir(urllib_request.url2pathname(path))):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urllib_parse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s', url)
resp = session.get(
url,
headers={
"Accept": "text/html",
"Cache-Control": "max-age=600",
},
)
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
inst = cls(resp.content, resp.url, resp.headers)
except requests.HTTPError as exc:
cls._handle_fail(link, exc, url)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(link, reason, url, meth=logger.info)
except requests.ConnectionError as exc:
cls._handle_fail(link, "connection error: %s" % exc, url)
except requests.Timeout:
cls._handle_fail(link, "timed out", url)
else:
return inst
@staticmethod
def _handle_fail(link, reason, url, meth=None):
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
@staticmethod
def _get_content_type(url, session):
"""Get the Content-Type of the given url, using a HEAD request"""
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in {'http', 'https'}:
# FIXME: some warning or something?
# assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
@cached_property
def base_url(self):
bases = [
x for x in self.parsed.findall(".//base")
if x.get("href") is not None
]
if bases and bases[0].get("href"):
return bases[0].get("href")
else:
return self.url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(
urllib_parse.urljoin(self.base_url, href)
)
pyrequire = anchor.get('data-requires-python')
pyrequire = unescape(pyrequire) if pyrequire else None
yield Link(url, self, requires_python=pyrequire)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None, requires_python=None):
"""
Object representing a parsed link from https://pypi.python.org/simple/*
url:
url of the resource pointed to (href of the link)
comes_from:
instance of HTMLPage where the link was found, or string.
requires_python:
String containing the `Requires-Python` metadata field, specified
in PEP 345. This may be specified by a data-requires-python
attribute in the HTML link tag, as described in PEP 503.
"""
# url can be a UNC windows share
if url.startswith('\\\\'):
url = path_to_url(url)
self.url = url
self.comes_from = comes_from
self.requires_python = requires_python if requires_python else None
def __str__(self):
if self.requires_python:
rp = ' (requires-python:%s)' % self.requires_python
else:
rp = ''
if self.comes_from:
return '%s (from %s)%s' % (self.url, self.comes_from, rp)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url == other.url
def __ne__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url != other.url
def __lt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url < other.url
def __le__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url <= other.url
def __gt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url > other.url
def __ge__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urllib_parse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
name = urllib_parse.unquote(name)
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urllib_parse.urlsplit(self.url)[0]
@property
def netloc(self):
return urllib_parse.urlsplit(self.url)[1]
@property
def path(self):
return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2])
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
return self.splitext()[1]
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url)
return urllib_parse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'[#&]egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)')
@property
def subdirectory_fragment(self):
match = self._subdirectory_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(
r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)'
)
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def is_wheel(self):
return self.ext == wheel_ext
@property
def is_artifact(self):
"""
Determines if this points to an actual artifact (e.g. a tarball) or if
it points to an "abstract" thing like a path or a VCS location.
"""
from pip._internal.vcs import vcs
if self.scheme in vcs.all_schemes:
return False
return True
FormatControl = namedtuple('FormatControl', 'no_binary only_binary')
"""This object has two fields, no_binary and only_binary.
If a field is falsy, it isn't set. If it is {':all:'}, it should match all
packages except those listed in the other field. Only one field can be set
to {':all:'} at a time. The rest of the time exact package name matches
are listed, with any given package only showing up in one field at a time.
"""
def fmt_ctl_handle_mutual_exclude(value, target, other):
new = value.split(',')
while ':all:' in new:
other.clear()
target.clear()
target.add(':all:')
del new[:new.index(':all:') + 1]
if ':none:' not in new:
# Without a none, we want to discard everything as :all: covers it
return
for name in new:
if name == ':none:':
target.clear()
continue
name = canonicalize_name(name)
other.discard(name)
target.add(name)
def fmt_ctl_formats(fmt_ctl, canonical_name):
result = {"binary", "source"}
if canonical_name in fmt_ctl.only_binary:
result.discard('source')
elif canonical_name in fmt_ctl.no_binary:
result.discard('binary')
elif ':all:' in fmt_ctl.only_binary:
result.discard('source')
elif ':all:' in fmt_ctl.no_binary:
result.discard('binary')
return frozenset(result)
def fmt_ctl_no_binary(fmt_ctl):
fmt_ctl_handle_mutual_exclude(
':all:', fmt_ctl.no_binary, fmt_ctl.only_binary)
Search = namedtuple('Search', 'supplied canonical formats')
"""Capture key aspects of a search.
:attribute supplied: The user supplied package.
:attribute canonical: The canonical package name.
:attribute formats: The formats allowed for this package. Should be a set
with 'binary' or 'source' or both in it.
"""
| [
"[email protected]"
] | |
4161ab8e5b053e034447ffc09fd26f16745bd00c | a52c74a91f2de9ecf08559b1a4e6e8e07b34702d | /2_Loops_and_Lists/1_word_loop.py | 7791bd34c847def50c2d35dd772884270974bb3c | [] | no_license | Demdillypickles/Python_Demos | 63d5624817031437dba950d22e6a9149e0ae294d | 899c25d14a1310bca227057d77713cc93c2356b3 | refs/heads/main | 2023-04-15T13:30:35.095365 | 2021-04-22T12:56:10 | 2021-04-22T12:56:10 | 360,518,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | from time import sleep
separator = "#---------------#\n\n"
word = input("Give me a word!\n>>> ")
for x in word:
print(x)
sleep(0.5)
print('\n' + separator)
for i in range(0, 20):
print(word[0: i % len(word) + 1])
sleep(0.5)
| [
"[email protected]"
] | |
2b2ecf4b17dd2b31fbfbf57f46f019b2b1eb04ec | c903382b1c2d170ca5a00a4482ee23be94da76d8 | /quokka/core/admin/views.py | b7dff6308251262454ea8aa0e2499378eaebf24c | [
"MIT"
] | permissive | alyoung/quokka | 63c74ff913fe3d3b5ebdef38d9d267b149a6c9c1 | a38749379f01c01cc887838999efa364dea5de04 | refs/heads/master | 2021-01-17T22:56:34.760694 | 2013-11-22T21:42:50 | 2013-11-22T21:42:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,499 | py | # Create customized index view class
from flask import current_app
from quokka.core.models import Content
from quokka.utils.routing import expose
from quokka.core.widgets import TextEditor, PrepopulatedText
from .ajax import AjaxModelLoader
from .models import BaseIndexView, BaseView, ModelAdmin, BaseContentAdmin
class IndexView(BaseIndexView):
roles_accepted = ('admin', 'editor', 'moderator', 'writer', 'staff')
@expose('/')
def index(self):
return self.render('admin/index.html')
class InspectorView(BaseView):
roles_accepted = ('admin',)
@expose('/')
def index(self):
context = {
"app": current_app
}
return self.render('admin/inspector.html', **context)
###############################################################
# Admin model views
###############################################################
class LinkAdmin(BaseContentAdmin):
roles_accepted = ('admin', 'editor', 'writer', 'moderator')
column_list = ('title', 'channel', 'slug', 'published')
form_columns = ('title', 'slug', 'channel', 'link',
'content_format', 'summary', 'contents',
'values', 'available_at', 'available_until', 'published')
form_args = {
'summary': {'widget': TextEditor()}
}
class ConfigAdmin(ModelAdmin):
roles_accepted = ('admin', 'developer')
column_list = ("group", "description", "published",
"created_at", "updated_at")
column_filters = ("group", "description")
form_columns = ("group", "description", "published", "values")
class SubContentPurposeAdmin(ModelAdmin):
roles_accepted = ('admin', 'editor')
class ChannelTypeAdmin(ModelAdmin):
roles_accepted = ('admin', 'editor')
class ContentTemplateTypeAdmin(ModelAdmin):
roles_accepted = ('admin', 'editor')
class ChannelAdmin(ModelAdmin):
roles_accepted = ('admin', 'editor')
column_list = ('title', 'long_slug', 'is_homepage',
'channel_type', 'created_at', 'available_at', 'published',
'view_on_site')
column_filters = ['published', 'is_homepage', 'include_in_rss',
'show_in_menu', 'indexable']
column_searchable_list = ('title', 'description')
form_columns = ['title', 'slug', 'content_format', 'description',
'parent', 'is_homepage',
'include_in_rss', 'indexable', 'show_in_menu', 'order',
'per_page', 'tags',
'published', 'canonical_url', 'values', 'channel_type',
'inherit_parent', 'content_filters', 'available_at',
'available_until', 'render_content', 'redirect_url']
column_formatters = {
'view_on_site': ModelAdmin.formatters.get('view_on_site'),
'created_at': ModelAdmin.formatters.get('datetime'),
'available_at': ModelAdmin.formatters.get('datetime')
}
form_subdocuments = {}
form_widget_args = {
'title': {'style': 'width: 400px'},
'slug': {'style': 'width: 400px'},
}
form_args = {
'description': {'widget': TextEditor()},
'slug': {'widget': PrepopulatedText(master='title')}
}
form_ajax_refs = {
'render_content': AjaxModelLoader('render_content',
Content,
fields=['title', 'slug']),
'parent': {'fields': ['title', 'slug', 'long_slug']},
}
| [
"[email protected]"
] | |
1dea255ed37b2dc4dc02999793cf9f877ff42627 | f5c7b5ccee346fa60fd3da4aa296de58b7a0cf2a | /catalyst_mlcomp/executors/valid.py | 0aea9d4e86c42a63be7207562706c6a505bb4314 | [] | no_license | CozyDoomer/steel-defect-detection | 8a1df7a418c68c1dea0446ba67178cdd87a06305 | bb1b803cde34cc5dbb5cae819791be00a62d5ad0 | refs/heads/master | 2022-02-25T08:49:46.693357 | 2019-10-21T18:18:39 | 2019-10-21T18:18:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,567 | py | from collections import OrderedDict
from copy import deepcopy
import cv2
import numpy as np
from tqdm import tqdm
from mlcomp.contrib.dataset.segment import ImageWithMaskDataset
from mlcomp.contrib.metrics.dice import dice_numpy
from mlcomp.worker.executors import Executor
from mlcomp.worker.executors.valid import Valid
from mlcomp.worker.reports.segmenation import SegmentationReportBuilder
from catalyst_segment import Experiment
from utils.executor_mixin import ExecutorMixin
@Executor.register
class ValidSeverstal(Valid, ExecutorMixin):
def __init__(self, **kwargs):
cache_names = ['y', 'y_segment']
super().__init__(
layout='img_segment', cache_names=cache_names, **kwargs
)
self.x_source = ImageWithMaskDataset(
img_folder='data/train_images',
mask_folder='data/train_masks',
fold_csv='data/masks.csv',
fold_number=self.fold_number,
is_test=True,
transforms=Experiment.prepare_test_transforms(),
num_classes=4,
max_count=self.max_count
)
self.builder = None
self.x = None
self.dices = []
def create_base(self):
colors = [
(255, 255, 0), # yellow
(0, 155, 191), # light blue
(148, 0, 211), # purple
(255, 0, 0) # red
]
self.builder = SegmentationReportBuilder(
session=self.session,
layout=self.layout,
task=self.task,
name=self.name,
colors=colors,
plot_count=self.plot_count
) if self.layout else None
def adjust_part(self, part):
self.x = deepcopy(self.x_source)
self.x.data = self.x.data[part[0]:part[1]]
def count(self):
return len(self.x_source)
def score(self, preds):
dices_by_photo = []
for i, img_pred in tqdm(enumerate(preds)):
dice_by_photo = []
for j, c in enumerate(img_pred):
target = self.x[i]['targets'][j]
score = dice_numpy(target, c, empty_one=True)
self.dices.append(score)
dice_by_photo.append(score)
dices_by_photo.append(np.mean(dice_by_photo))
return {'dice': dices_by_photo}
def score_final(self):
return np.mean(self.dices)
def _plot_main(self, preds, scores):
imgs = []
attrs = []
targets = []
self.x.transforms = None
for i, img_pred in tqdm(enumerate(preds)):
if i >= self.builder.plot_count:
break
row = self.x[i]
attr = {}
for j, c in enumerate(img_pred):
contours, _ = cv2.findContours(
c, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE
)
attr[f'attr{i + 1}'] = len(contours)
imgs.append(row['features'])
targets.append(row['targets'].astype(np.uint8))
attr['attr5'] = sum(attr.values())
attrs.append(attr)
preds = OrderedDict({'y': preds})
if 'y_segment' in self.cache:
preds['y_segment'] = self.cache['y_segment']
self.builder.process_pred(
imgs=imgs,
preds=preds,
targets=targets,
attrs=attrs,
scores=scores
)
def plot(self, preds, scores):
self._plot_main(preds, scores)
def plot_final(self, score):
self.builder.process_scores({'dice': score})
| [
"[email protected]"
] | |
b76a0dfa887b2f0159fd967b61e4c40f5431f2a8 | 6729079d2ed02ebbca05adbb165d321dd37966a7 | /courses/tests.py | 353141a1272890080377ed5dc42f7c2dfdc29162 | [] | no_license | ivanprytula/yps_test_task | 0df9064d76cf9acfa9613da729d7ce531de35d75 | 6b6bf5ea8bb29b187bbbfc2ab8582a38906e4f72 | refs/heads/main | 2023-04-15T12:16:34.237140 | 2021-04-26T23:17:41 | 2021-04-26T23:17:41 | 361,918,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,961 | py | import datetime
from http import HTTPStatus
from django.test import TestCase, Client
from .models import Course
class ModelTestCase(TestCase):
"""This class defines the test suite for the Course model."""
def setUp(self):
self.course_name = "Yalantis Python course"
self.start_date = datetime.datetime.now()
self.end_date = self.start_date + datetime.timedelta(days=30)
self.lectures_quantity = 1
self.course = Course(name=self.course_name,
start_date=self.start_date,
end_date=self.end_date,
lectures_quantity=self.lectures_quantity)
def test_model_can_create_a_course(self):
"""Test the Course model can create a course/instance."""
old_count = Course.objects.count()
self.course.save()
new_count = Course.objects.count()
self.assertNotEqual(old_count, new_count)
class APIViewTestCase(TestCase):
"""Test suite for the api views."""
def setUp(self):
"""Define the test client and other test variables."""
self.client = Client()
self.course_data = {
'id': '1',
'name': 'Python course',
'start_date': '2021-05-01',
'end_date': '2021-07-01',
'lectures_quantity': '20',
}
self.response = self.client.post(
'/api/courses/create/',
self.course_data,
content_type='application/json',
format='json')
def test_api_can_create_a_course(self):
"""Test the API has course creation capability."""
self.assertEqual(self.response.status_code, HTTPStatus.CREATED)
def test_api_can_retrieve_courses_list(self):
"""Test the API has courses list retrieving capability."""
response = self.client.get('/api/courses/', format='json')
self.assertEqual(response.status_code, HTTPStatus.OK)
| [
"[email protected]"
] | |
6665c52b545bfa40ef7b723af971b5ab3734efa2 | d58f9e650c4d8f1fe0379bb5c0a8d2f58ae697ec | /thorvald_penetrometer/scripts/penetrometer_driver.py | 5116ed52841c9316380b945b56beff444f675d8b | [] | no_license | mfkiwl/lcas_hardware-gnss-imu | 41a02418c3895463b0e22e36eb9669560d516e8a | 2aaed0601e2fd165cfb1f2a58b7c19df3b55ed2d | refs/heads/master | 2021-09-28T20:33:53.096301 | 2018-11-20T09:21:55 | 2018-11-20T09:21:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,844 | py | #!/usr/bin/env python
import rospy
import actionlib
import serial
import threading
import yaml
import std_srvs.srv
#import actionlib_msgs.msg
import dynamic_reconfigure.server # import Server
from thorvald_penetrometer.cfg import PenetrometerConfig
import std_msgs.msg #import String
import thorvald_penetrometer.msg
class PenetrometerServer(object):
"""
Class for Penetrometer Control
"""
_feedback = thorvald_penetrometer.msg.ProbeSoilFeedback()
_result = thorvald_penetrometer.msg.ProbeSoilResult()
_config_commands={ "ground_level":'g', "speed":'s', "top_speed":'t', "home_speed":'k',
"acceleration":'a', "deceleration":'d', "probe_depth":'l', "dist_per_reading": 'q',
"lfd_tolerance":'r', "max_force":'m', "min_force":'n', "max_force_delta":'x',
"min_force_delta":'y',"force_delta_abs":'v',"safe_disconnect":'o'}
_decimilimiters_configs=['ground_level','probe_depth','dist_per_reading']
_centiseconds_configs=['lfd_tolerance']
def __init__(self, name):
"""
Initialization for Class
"""
self.cancelled = False
self.running=True
self.reply_buf=[]
self.last_response = ''
self.config={}
self.serial_port = rospy.get_param('~serial_port', '/dev/ttyACM0')
rospy.loginfo("opening serial port")
self.ser = serial.Serial(self.serial_port, 57600, timeout=0, parity=serial.PARITY_NONE)
thread = threading.Thread(target=self.read_from_port)#, args=(serial_port,))
thread.start()
self.input_pub = rospy.Publisher('/penetrometer_raw_data', std_msgs.msg.String, queue_size=0)
#Creating Action Server
rospy.loginfo("Creating action server.")
self._as = actionlib.SimpleActionServer(name, thorvald_penetrometer.msg.ProbeSoilAction, execute_cb = self.executeCallback, auto_start = False)
self._as.register_preempt_callback(self.preemptCallback)
rospy.loginfo(" ...starting")
self._as.start()
rospy.loginfo(" ...done")
rospy.loginfo("Creating services")
sname=name+'/save_params'
s = rospy.Service(sname, std_srvs.srv.Trigger, self.save_params)
sname=name+'/clear_errors'
s1 = rospy.Service(sname, std_srvs.srv.Trigger, self.clear_errors_req)
rospy.loginfo("initialising device done ...")
self.initialise_penetrometer()
self.clear_errors()
self.set_e_stop(False)
self.set_power_enable(True)
#Creating Dyn reconf server
rospy.loginfo("Creating dynamic reconfigure server.")
self.dynsrv = dynamic_reconfigure.server.Server(PenetrometerConfig, self.dyn_reconf_callback)
self.send_home()
rospy.loginfo("ROS init done ...")
rospy.loginfo("ALL done ...")
rospy.spin()
#self.write_config_to_file()
self.running=False
self.ser.close()
def clear_errors_req(self, req):
self.clear_errors()
return True, "Errors cleared"
def save_params(self, req):
self.write_config_to_file()
return True, "saved to params.yaml"
def write_config_to_file(self):
config = dict(self.config)
del config['groups']
yml = yaml.safe_dump(config, default_flow_style=False)
fh = open("params.yaml", "w")
s_output = str(yml)
fh.write(s_output)
fh.close()
def read_from_port(self):
serial_buffer=[]
#response=[]
while self.running:
if (self.ser.inWaiting()>0): #if incoming bytes are waiting to be read from the serial input buffer
data_str = self.ser.read(self.ser.inWaiting())#.decode('ascii') #read the bytes and convert from binary array to ASCII
print "|"+data_str+"|"
for i in data_str:
serial_buffer.append(i)
while '\n' in serial_buffer:
#print "end found"
nind= serial_buffer.index('\n')
self.reply_buf.append(serial_buffer[0:nind])
pen_data = ''.join(serial_buffer[0:nind])
self.input_pub.publish(pen_data)
for i in reversed(range(nind+1)):
serial_buffer.pop(i)
print serial_buffer
# if len(self.reply_buf)>0:
# print(self.reply_buf)
rospy.sleep(0.001) # Optional: sleep 10 ms (0.01 sec) once per loop to let other threads on your PC run
def dyn_reconf_callback(self, config, level):
#rospy.loginfo("""Reconfigure Request: {counts}""".format(**config))
#self.max_counts = config['counts']
#print "reconfigure ", config
if self.config:
changed_dict = {x: self.config[x] != config[x] for x in self.config if x in config}
lk = [key for (key, value) in changed_dict.items() if value]
#print "config changed ", lk, config[lk[0]]
self.set_config(lk[0], config[lk[0]])
self.config = config
else:
#print "First config: ", config.items()
self.config = config
for i in config.items():
self.set_config(i[0], i[1])
rospy.sleep(0.1)
return config
def set_config(self, field, value):
if field in self._config_commands.keys():
print field, value
if isinstance(value,bool):
value=int(value)
if field in self._decimilimiters_configs:
value=int(value*10)
if field in self._centiseconds_configs:
value=int(value/10)
command = self._config_commands[field]+str(value)
reply = self._config_commands[field].capitalize()+str(value)
print command, reply
self.send_command(command)
response = self.wait_for_reply(reply)
if response:
rospy.loginfo("%s set at %d" %(field, value))
else:
rospy.logerr("Something failed when setting %s set at %d, response code (%s)" %(field,value,self.last_response))
rospy.loginfo("Maybe, try again?")
def send_command(self, command):
for i in command:
self.ser.write(i)
rospy.sleep(0.001)
self.ser.write('\n')
def clear_reply_buf(self):
self.reply_buf=[]
def wait_for_reply(self, expected, timeout=10):
time_count=0
response=''
replied=False
while not replied and time_count<= (timeout*20) :
if len(self.reply_buf)>0:
response = self.reply_buf.pop(0)
replied = True
else:
rospy.sleep(0.05)
time_count+=1
self.last_response = ''.join(response)
if self.last_response == expected:
return True
else:
if time_count > (timeout*20):
self.last_response = 'timeout'
return False
def initialise_penetrometer(self, retries=3):
self.clear_reply_buf()
self.send_command('@')
rospy.loginfo("waiting for initialisation confirmation")
response = self.wait_for_reply("@1")
if response:
rospy.loginfo("initialisation correct!")
else:
if retries > 0:
rospy.logwarn("wrong response try again")
self.initialise_penetrometer(retries=retries-1)
else:
rospy.logerr("too many fails!!")
def clear_errors(self):
self.clear_reply_buf()
self.send_command('f0')
rospy.loginfo("clearing errors")
response = self.wait_for_reply('F0 ')
if response:
rospy.loginfo("Errors cleared")
else:
rospy.logerr("Something failed, response code (%s)" %self.last_response)
rospy.loginfo("Maybe, try clear error service again?")
def set_e_stop(self, enable):
self.clear_reply_buf()
if enable:
command = 'e1'
reply = 'E1'
else:
command = 'e0'
reply = 'E0'
self.send_command(command)
rospy.loginfo("clearing estop")
response = self.wait_for_reply(reply)
if response:
rospy.loginfo("estop reset")
else:
rospy.logerr("Something failed, response code (%s)" %self.last_response)
rospy.loginfo("Maybe, try set estop service again?")
def set_power_enable(self, enable):
self.clear_reply_buf()
if enable:
command = 'p1'
reply = 'P1'
else:
command = 'p0'
reply = 'P0'
self.send_command(command)
rospy.loginfo("Enabling")
response = self.wait_for_reply(reply)
if response:
rospy.loginfo("Enabled")
else:
rospy.logerr("Something failed, response code (%s)" %self.last_response)
rospy.loginfo("Maybe, try set power service again?")
def send_home(self):
self.clear_reply_buf()
self.send_command('z')
rospy.loginfo("Homing")
response = self.wait_for_reply('Z1', timeout=120)
if response:
rospy.loginfo("Homed")
else:
rospy.logerr("Something failed, response code (%s)" %self.last_response)
rospy.loginfo("Maybe, try send home service again?")
def get_data(self, timeout=10):
print "."
time_count=0
response=''
replied=False
while (not replied) and time_count <= (timeout*20) :
#print (self.reply_buf)
if len(self.reply_buf)>0:
response = self.reply_buf.pop(0)
response = ''.join(response)
#print "data in ->", response
replied = True
else:
rospy.sleep(0.05)
time_count+=1
if time_count > (timeout*20):
#print "timed out"
return None
else:
return response
def executeCallback(self, goal):
self.cancelled=False
finished=False
self.depth_data=[]
self.force_data=[]
self.clear_reply_buf()
rospy.loginfo("Probing")
self.send_command('!1')
while not self.cancelled and not finished:
print "++"
data_str = self.get_data(timeout=15)
print data_str
if data_str:
if data_str == '!1':
finished=True
else:
print "appending"
cd=data_str.lstrip('*').split(',')
self.depth_data.append(int(cd[0]))
self.force_data.append(int(cd[1]))
else:
self.cancelled=True
rospy.loginfo("Probe finished")
self.send_home()
if not self.cancelled:
self._result.result = True
self._result.depth = self.depth_data
self._result.force = self.force_data
rospy.loginfo('Succeeded')
self._as.set_succeeded(self._result)
else:
self._as.set_preempted()
def preemptCallback(self):
self.cancelled=True
if __name__ == '__main__':
rospy.init_node('thorvald_penetrometer')
server = PenetrometerServer(rospy.get_name())
| [
"[email protected]"
] | |
0b1fe5dd3b5144549478b281e02f43cbacf5bb45 | ae6622afe8364de9fe7464d20228dff80f1f13d5 | /cours_POO/Projet/Plateau.py~ | b7e6bccdadb4433065a5b4cf603e8175e3a4254d | [] | no_license | porigonop/code_v2 | eafb2adcbbb2d88dc607fb54d87ef301faeca776 | 147773cc8871d74f1ec1d6bd03e3cce95e9490d1 | refs/heads/master | 2021-06-25T20:39:37.819172 | 2021-01-11T13:31:45 | 2021-01-11T13:31:45 | 97,024,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | #!/usr/bin/env python3
from Moteur import Moteur
class Plateau:
def __init__(self, max_x, max_y):
"""constructeur du plateau, demande les maximum ou i peux se
déplacer
"""
self.moteur_x = Moteur(0, max_x)
self.moteur_y = Moteur(0, max_y)
def deplace_x(x):
x_init = self.moteur_x.position()
if x_init < x:
if self.moteur_x.direction():
self.moteur_x.avance(x - x_init)
else:
self.moteur_x.change_direction()
self.moteur_x.avance(x - x_init)
else:
if not self.moteur_x.direction():
self.moteur_x.avance(x_init - x)
def aller_a(self, x, y):
x_init = self.moteur_x.position()
y_init = self.moteur_y.porition()
| [
"Antoine Lievre"
] | Antoine Lievre |
|
1f4cbb30733a343b0ca5263a94e3f50c0079b49c | 6f9c652eda2a9fad43031d3321e3b77add8737fd | /app/api/views/version.py | eaa2aa610592940a5ea6a1fd823ba829eff73204 | [] | no_license | Y16v/intelect | eddd6a8f47d71226287d0c27f63995468950929a | 0542ae3a7300c366d77284fcb1a991dd0316f97a | refs/heads/main | 2023-08-15T17:05:33.627560 | 2021-10-14T16:16:49 | 2021-10-14T16:16:49 | 417,198,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | from django.http import JsonResponse
import os
from intelect_kg.settings import FRONTEND_DIR
def get_version(request):
try:
version_path = os.path.join(os.path.dirname(FRONTEND_DIR), 'version')
version_file = open(version_path, 'r')
version = version_file.readline()
except FileNotFoundError:
version = ""
return JsonResponse({
'version': version
}) | [
"[email protected]"
] | |
39c5075ed44cd2213f1a886f5908e0a89c393c13 | f886c00fa022cc997f18e8d1228510624e274af8 | /BlackJack.py | 6ee9eda15954c7ceb8a820b76fecc59a6746e41f | [] | no_license | manugangadhar/Python-Bootcamp-Zero-to-Hero | 9eaecfd557dca62c404cd9ed0e83fcf77d2ee58b | d7d75bc4789e20c4e655100257422f7bd9bd4183 | refs/heads/master | 2020-04-16T23:35:16.007948 | 2019-02-18T10:23:03 | 2019-02-18T10:23:03 | 166,017,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,056 | py | import random
card_types = ['s', 'h', 'd', 'c']
global players_list
players_list = {}
global dealer_cards
dealer_cards = []
global fiftytwo_cards
fiftytwo_cards = set()
class Blackjack:
def __init__(self, name, bet, player_cards, gain_loss):
self.name = name
self.bet = bet
self.player_cards = player_cards
self.gain_loss = gain_loss
def card_distribute(self, set_of_cards):
self.player_cards.append(set_of_cards.pop())
def player_stand(self):
pass
def player_hit(self, set_of_cards):
print(f"*****Player {self.name} will take a hit*****")
self.player_cards.append(set_of_cards.pop())
def player_split(self):
pass
def calculate_wins(self):
self.gain_loss = self.gain_loss + self.bet
return self.gain_loss
def calculate_loss(self):
self.gain_loss = self.gain_loss - self.bet
return self.gain_loss
def value_of_cards(self, player_cards):
card_values = []
for i in player_cards:
if i[1:] in ['J','Q','K']:
card_values.append(10)
elif i[1:] in ['1','2','3','4','5','6','7','8','9','10']:
card_values.append(int(i[1:]))
else:
if (21 - sum(card_values)) >= 11:
card_values.append(11)
else:
card_values.append(1)
return sum(card_values)
def show_cards(self, player_cards):
for i in self.player_cards:
if i[0] == 's':
print('------------')
print(f'|{i[1:]} |')
print('| |')
print('| Spade - |')
print('| |')
print(f'| {i[1:]}|')
print('------------')
if i[0] == 'h':
print('------------')
print(f'|{i[1:]} |')
print('| |')
print('| Heart - |')
print('| |')
print(f'| {i[1:]}|')
print('------------')
if i[0] == 'd':
print('------------')
print(f'|{i[1:]} |')
print('| |')
print('| Diamond -|')
print('| |')
print(f'| {i[1:]}|')
print('------------')
if i[0] == 'c':
print('------------')
print(f'|{i[1:]} |')
print('| |')
print('| clubs - |')
print('| |')
print(f'| {i[1:]}|')
print('------------')
def card_suffle(fiftytwo_cards):
random.shuffle(fiftytwo_cards)
random.shuffle(fiftytwo_cards)
return fiftytwo_cards
def card_distribute_dealer(set_cards):
global set_of_cards
dealer_cards.append(set_of_cards.pop())
def show_cards_dealer(dealer_cards):
for i in dealer_cards:
if i[0] == 's':
print('------------')
print(f'|{i[1:]} |')
print('| |')
print('| Spade - |')
print('| |')
print(f'| {i[1:]}|')
print('------------')
if i[0] == 'h':
print('------------')
print(f'|{i[1:]} |')
print('| |')
print('| Heart - |')
print('| |')
print(f'| {i[1:]}|')
print('------------')
if i[0] == 'd':
print('------------')
print(f'|{i[1:]} |')
print('| |')
print('| Diamond -|')
print('| |')
print(f'| {i[1:]}|')
print('------------')
if i[0] == 'c':
print('------------')
print(f'|{i[1:]} |')
print('| |')
print('| clubs - |')
print('| |')
print(f'| {i[1:]}|')
print('------------')
def check_value_of_dealercards(dealer_cards):
card_values = []
for i in dealer_cards:
if i[1:] in ['J', 'Q', 'K']:
card_values.append(10)
elif i[1:] in ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']:
card_values.append(int(i[1:]))
else:
if (21 - sum(card_values)) >= 11:
card_values.append(11)
else:
card_values.append(1)
return sum(card_values)
def populate_cards(card_types):
cards_fiftytwo = []
for i in card_types:
for j in range(1, 11):
if j == 1:
cards_fiftytwo.append(i + 'A')
else:
cards_fiftytwo.append(i+str(j))
cards_fiftytwo.append(i+'J')
cards_fiftytwo.append(i+'Q')
cards_fiftytwo.append(i+'K')
return cards_fiftytwo
def card_distribution(players_list, set_of_cards):
for i in players_list.values():
i.card_distribute(set_of_cards)
card_distribute_dealer(set_of_cards)
for i in players_list.values():
i.card_distribute(set_of_cards)
print("#######################################")
print("#Player {} cards are displayed below#".format(i.name))
print("#######################################")
i.show_cards(i.player_cards)
print("Total value of your cards is {}".format(i.value_of_cards(i.player_cards)))
card_distribute_dealer(set_of_cards)
print("#######################################")
print("#Dealer first card is displayed below#")
print("#######################################")
show_cards_dealer([dealer_cards[0]])
temp_cards = populate_cards(card_types)
set_of_cards = card_suffle(temp_cards)
print(set_of_cards)
set_of_cards.reverse()
number_players = int(input("Choose number of players who wish to play BlackJack\n"))
for i in range(1, number_players + 1):
name = input("Enter Player+{} name: ".format(str(i)))
bet = int(input("Enter your betting amount: "))
player_cards = []
players_list[str(i)] = Blackjack(name, bet, player_cards,0)
while 1:
card_distribution(players_list, set_of_cards)
if check_value_of_dealercards(dealer_cards) == 21:
print("JackPot for Dealer he got 21")
print("#######################################")
print("#Dealer cards are displayed below#")
print("#######################################")
show_cards_dealer(dealer_cards)
for i in players_list.values():
print(f"Player {i.name} loss or gain is {i.calculate_loss()}")
if int(input("Do you wish to continue? Enter 0 or 1")):
dealers_cards = []
for i in players_list.values():
i.player_cards = []
continue
else:
print("*********Bye***** Bye*****")
quit()
else:
print("Dealer doesn't have 21")
count_number_players_greater_21 = 0
players_on_stay = []
for i in players_list.values():
while 1:
if i.value_of_cards(i.player_cards) == 21:
print(f"Jackpot for player {i.name}")
print("#######################################")
print("#Player {} cards are displayed below#".format(i.name))
print("#######################################")
i.show_cards(i.player_cards)
print(f"Player {i.name} Total gains or loss is {i.calculate_wins()}")
i.player_cards = []
break
if i.value_of_cards(i.player_cards) < 21:
hit_stand_split = int(input(f"Player {i.name}, press 1 for hit, press 2 for stand, press 3 for split"))
if hit_stand_split == 1:
i.card_distribute(set_of_cards)
print("#######################################")
print("#Player {} cards are displayed below#".format(i.name))
print("#######################################")
i.show_cards(i.player_cards)
print("Total value of your cards is {}".format(i.value_of_cards(i.player_cards)))
continue
if hit_stand_split == 2:
players_on_stay.append(i)
print(f"Player {i.name} has opted for stay on value {i.value_of_cards(i.player_cards)}")
print("#######################################")
print("#Player {} cards are displayed below#".format(i.name))
print("#######################################")
i.show_cards(i.player_cards)
break
if hit_stand_split == 3:
pass
if i.value_of_cards(i.player_cards) > 21:
count_number_players_greater_21 = count_number_players_greater_21 + 1
print(f"Player {i.name} cards value is more than 21")
print("#######################################")
print("#Player {} cards are displayed below#".format(i.name))
print("#######################################")
i.show_cards(i.player_cards)
print(f"Player {i.name} Total gains or loss is {i.calculate_loss()}")
i.player_cards = []
break
if count_number_players_greater_21 == number_players:
print("All players lost and Dealer Won")
dealers_cards = []
for i in players_list.values():
i.player_cards = []
if int(input("Do you wish to continue? Enter 0 or 1")):
continue
else:
print("*********Bye***** Bye*****")
quit()
else:
print("#######################################")
print("#Dealer cards are displayed below######")
print("#######################################")
show_cards_dealer(dealer_cards)
while 1:
if check_value_of_dealercards(dealer_cards) > 21:
print("Dealer lost the game")
print("#######################################")
print("#Dealer cards are displayed below######")
print("#######################################")
show_cards_dealer(dealer_cards)
for i in players_on_stay:
print(f"Player {i.name} Total Gains or Loss is {i.calculate_wins()}")
i.player_cards = []
players_on_stay = []
dealers_cards = []
if int(input("Do you wish to continue? Enter 0 or 1")):
break
else:
print("*********Bye***** Bye*****")
quit()
if check_value_of_dealercards(dealer_cards) == 21:
print("JackPot for Dealer he got 21")
print("#######################################")
print("#Dealer cards are displayed below#")
print("#######################################")
show_cards_dealer(dealer_cards)
dealers_cards = []
for i in players_on_stay:
print(f"Player {i.name} loss or gain is {i.calculate_loss()}")
i.player_cards = []
players_on_stay = []
if int(input("Do you wish to continue? Enter 0 or 1")):
break
else:
print("*********Bye***** Bye*****")
quit()
if check_value_of_dealercards(dealer_cards) < 21:
count_negative = 0
count_positive = 0
for i in players_on_stay:
if check_value_of_dealercards(dealer_cards) - i.value_of_cards(i.player_cards) < 0:
count_negative = count_negative + 1
if check_value_of_dealercards(dealer_cards) - i.value_of_cards(i.player_cards) > 0:
count_positive = count_positive + 1
if (count_positive < count_negative) and (21 - check_value_of_dealercards(dealer_cards)) >= 6:
card_distribute_dealer(set_of_cards)
print("Dealer went for hit as he is having less than 21")
print("#######################################")
print("#Dealer cards are displayed below#")
print("#######################################")
show_cards_dealer(dealer_cards)
continue
if (count_positive < count_negative) and (21 - check_value_of_dealercards(dealer_cards)) < 6:
print("Dealer decided to stay and no more hits")
for i in players_on_stay:
if i.value_of_cards(i.player_cards) > check_value_of_dealercards(dealers_cards):
print(f"Player {i.name} loss or gain is {i.calculate_win()}")
elif i.value_of_cards(i.player_cards) == check_value_of_dealercards(dealers_cards):
print(f"No Loss or Gain for player {i.name}")
else:
print(f"Player {i.name} loss or gain is {i.calculate_loss()}")
dealers_cards = []
for i in players_on_stay:
print(f"Player {i.name} loss or gain is {i.calculate_loss()}")
i.player_cards = []
players_on_stay = []
if int(input("Do you wish to continue? Enter 0 or 1")):
break
else:
print("*********Bye***** Bye*****")
quit()
if (count_positive > count_negative) and (21 - check_value_of_dealercards(dealer_cards)) < 6:
print("Dealer decided to stay and no more hits")
for i in players_on_stay:
if i.value_of_cards(i.player_cards) > check_value_of_dealercards(dealers_cards):
print(f"Player {i.name} loss or gain is {i.calculate_win()}")
elif i.value_of_cards(i.player_cards) == check_value_of_dealercards(dealers_cards):
print(f"No Loss or Gain for player {i.name}")
else:
print(f"Player {i.name} loss or gain is {i.calculate_loss()}")
dealers_cards = []
for i in players_on_stay:
print(f"Player {i.name} loss or gain is {i.calculate_loss()}")
i.player_cards = []
players_on_stay = []
if int(input("Do you wish to continue? Enter 0 or 1")):
break
else:
print("*********Bye***** Bye*****")
quit()
if count_positive > count_negative and (21 - check_value_of_dealercards(dealer_cards)) >= 6:
card_distribute_dealer(set_of_cards)
print("Dealer went for hit as he is having less than 21")
print("#######################################")
print("#Dealer cards are displayed below#")
print("#######################################")
show_cards_dealer(dealer_cards)
continue
if count_positive == count_negative and (21 - check_value_of_dealercards(dealer_cards)) >= 6:
card_distribute_dealer(set_of_cards)
print("Dealer went for hit as he is having less than 21")
print("#######################################")
print("#Dealer cards are displayed below#")
print("#######################################")
show_cards_dealer(dealer_cards)
continue
if count_positive == count_negative and (21 - check_value_of_dealercards(dealer_cards)) < 6:
print("Dealer decided to stay and no loss no gain for rest of the players")
print(f"No Loss or Gain for player {i.name}")
dealers_cards = []
for i in players_on_stay:
i.player_cards = []
players_on_stay = []
if int(input("Do you wish to continue? Enter 0 or 1")):
break
else:
print("*********Bye***** Bye*****")
quit() | [
"[email protected]"
] | |
eae25c446f1476173e50c81ee59f60f158f41104 | c6ceceb555759b7aa59599fbd46adadc73f5ae2d | /forms.py | 4342b181de359bc477eb83a9955a385009244c0a | [] | no_license | DruSerkes/Adopt-a-Pet | 989ca9db1b152b5964aab651581465ac37628886 | 91120ef0c3b34a670d2108925fe10884e78cc660 | refs/heads/master | 2023-05-11T04:47:31.502430 | 2023-05-02T12:04:03 | 2023-05-02T12:04:03 | 270,837,220 | 0 | 0 | null | 2023-05-02T12:05:08 | 2020-06-08T21:45:23 | HTML | UTF-8 | Python | false | false | 1,070 | py | from flask_wtf import FlaskForm
from wtforms import StringField, FloatField, BooleanField, IntegerField, RadioField, SelectField
from wtforms.fields.html5 import URLField
from wtforms.validators import InputRequired, Optional, AnyOf, NumberRange, DataRequired
class PetForm(FlaskForm):
""" Form for Pet data """
name = StringField("Pet Name", validators=[
DataRequired(message="Enter a name for your pet")])
species = StringField("Species", validators=[
DataRequired(message="Enter a species for your pet"), AnyOf(['Dog', 'Cat', 'Porcupine'], message="Species must be either 'Dog' 'Cat' or 'Porcupine'")])
photo_url = URLField('Image URL', validators=[Optional()])
age = IntegerField("Pet Age", validators=[NumberRange(
min=0, max=30, message="Age must be between 0 and 30"), Optional()])
notes = StringField("Anything else we should know?",
validators=[Optional()])
available = BooleanField(
"Is this pet available for adoption?", default='checked')
| [
"[email protected]"
] | |
5c958bbe5102f43324270833aeb0498212e1dfa1 | d05cd4d5963de73323cb5e86b7a832992649091d | /python/Lab13/Lab13_6.py | 928b539d47b2c8e2d5f08aa6dc9489648fe8d09f | [] | no_license | bhelga/university | c46eeed000f08b8faa9e5e7556816b5ee3deb67e | 0169d245e9b9b459de2dfb4433653e609cb1e927 | refs/heads/main | 2023-07-02T20:22:18.372432 | 2021-08-04T16:03:41 | 2021-08-04T16:03:41 | 343,385,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,476 | py | from collections import namedtuple
import random
def good_insurers(insurers):
avarage = 0.0
for i in insurers:
avarage += i.number_of_agreements
avarage /= 7
ary = []
print("Страховики", end=" ")
for i in insurers:
if i.number_of_agreements > avarage:
ary.append(i)
print(i.last_name, end=", ")
print(" в цьому місяці найкращі!")
Insurer = namedtuple('Insurer', 'last_name mobile_phone address number_of_agreements')
insurer1 = Insurer("Melnuk", "0685453956", "6 Beatty Rise", 15)
insurer2 = Insurer("Shevchencko", "0539563407", "Fuglsangshaven 19, 1. sal. th.", 20)
insurer3 = Insurer("Voiko", "0995674967", "Kjeldsen Vej 55", 13)
insurer4 = Insurer("Kovalenko", "0675754965", "Kaivolakaarto 7", 18)
insurer5 = Insurer("Bondarenko", "0985354956", "12454 Dwight Keys", 10)
insurer6 = Insurer("Tkachenko", "0976754978", "696 Pete Avenue Apt. 151", 17)
insurer7 = Insurer("Oliynuk", "0986768967", "18572 Wunsch Keys", 9)
insurers = [insurer1, insurer2, insurer3, insurer4, insurer5, insurer6, insurer7]
good_insurers(insurers)
new = []
#temp = list(insurers)
for i in insurers:
new.append(i._replace(number_of_agreements=random.randint(1, 30)))
# insurers[i]._replace(number_of_agreements=random.randint(1, 30))
print(*i, sep="\n", end="\n\n")
#insurers.__add__(i._replace(number_of_agreements=random.randint(1, 30)))
print(*insurers)
good_insurers(new) | [
"[email protected]"
] | |
2b2b7d8dae3230411d0c99c98d1108263429d8e4 | 720c106a3e5e13b58154070e530574f110b1c138 | /01.tictactoe/sample.py | e077c485cf0a90b2eb073131d4283bdd8ce374a5 | [] | no_license | thanpolas/hackerrank | 37c8be4b3f0f96ed4211e6a93eab043828a7e347 | 7aaf3805c8e1811b912dee849b304f1e1d46c892 | refs/heads/master | 2020-12-24T13:36:07.891519 | 2013-01-09T17:50:07 | 2013-01-09T17:50:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | #!/bin/python
import random
# Complete the function below to print 2 integers separated by a single space which will be your next move
def nextMove(player,board):
print (1, 0)
#If player is X, I'm the first player.
#If player is O, I'm the second player.
player = raw_input()
#Read the board now. The board is a 3x3 array filled with X, O or _.
board = []
for i in xrange(0, 3):
board.append(raw_input())
nextMove(player,board) | [
"[email protected]"
] | |
5046ab134fa58280291b52068b117f161cc3c023 | 69cc3a2d036f5a7d26844913a8ae6167a26c33c4 | /univ_softplus.py | ad4aeb9711d3d7f39805910a2f127a9e0ee39323 | [] | no_license | JakeSigwart/Curve_fitting | c6758ce69b1e22973940418f4ccc5333046038f8 | 67adbd2d51aff023379143e17fe154cb7e21b6ce | refs/heads/master | 2020-03-22T01:59:03.302925 | 2018-07-01T15:57:29 | 2018-07-01T15:57:29 | 139,342,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,591 | py | #The purpose of this program is to show that a neural network is a universal approximator
#In order to be a univaersal approximator, the network must have non-linearities i.e. softplus function
#I am training the network to approximate sin x on [-2*pi, 2*pi]
#Results:
import os
import math
import time
import random
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
path = os.path.dirname(__file__)
save_path = path + "\\univ_softplus\\classifier.ckpt"
sess = tf.Session()
inputs = tf.placeholder(tf.float32, shape=[None, 1], name='inputs')
labels = tf.placeholder(tf.float32, shape=[None, 1], name='labels')
inputs_1 = tf.map_fn(lambda x_val: tf.fill([64], 1.000)*x_val, inputs)
W_1 = tf.Variable(tf.truncated_normal([64, 64],mean=0.0,stddev=0.088), name='W_1')
b_1 = tf.Variable(tf.constant(0.0005, shape=[64]), name='b_1')
h_1 = tf.matmul(inputs_1, W_1) + b_1
W_2 = tf.Variable(tf.truncated_normal([64, 64],mean=0.0,stddev=0.088), name='W_2')
b_2 = tf.Variable(tf.constant(0.0005, shape=[64]), name='b_2')
h_2 = tf.matmul(h_1, W_2) + b_2
h_actv_2 = tf.nn.softplus(h_2, name='h_actv_2')
W_3 = tf.Variable(tf.truncated_normal([64, 64],mean=0.0,stddev=0.088), name='W_3')
b_3 = tf.Variable(tf.constant(0.0005, shape=[64]), name='b_3')
h_3 = tf.matmul(h_actv_2, W_3) + b_3
h_actv_3 = tf.nn.softplus(h_3, name='h_actv_3')
W_4 = tf.Variable(tf.truncated_normal([64,1],mean=0.0,stddev=0.088), name='W_4')
h_output = tf.matmul(h_actv_3, W_4)
loss = tf.reduce_mean(tf.square(h_output - labels))
Optimize = tf.train.AdamOptimizer(5e-3).minimize(loss)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
if os.path.isfile(path + "\\univ_softplus\\checkpoint"):
saver.restore(sess, save_path)
print("Model restored.")
else:
print('Building new model...')
#TRAINING##
num_batches = 1024
batch_size = 1024
for i in range(num_batches):
#get random sine values
x = 4*math.pi*np.random.sample(batch_size) - 2*math.pi
x = x.reshape([batch_size,1])
y = np.zeros(shape=[batch_size, 1], dtype=float)
#y = sin of x[]
for index in range(batch_size):
y[index,0] = math.sin(x[index])
gradients, loss_out, output = sess.run([Optimize, loss, h_output], feed_dict={inputs: x, labels: y})
print(str(x[0]) + ', ' + str(output[0]))
print("Batch: " + str(i) + " Cross Entropy: " + str(loss_out))
save_path = saver.save(sess, path + "\\univ_softplus\\classifier.ckpt")
plt.scatter(x, output)
plt.xlabel('Independant Var x')
plt.ylabel('NN Aproximation of sin(x)')
plt.show()
| [
"[email protected]"
] | |
e1e6b4a1acf4098929d38b140640b4ebe4c39c3d | c5447b9d51b8135a79b39bdb9f19806de860cf00 | /quick_train.py | dc7ee4799629ad278cb2b0a14c7f15bd9aff8247 | [
"MIT"
] | permissive | HarlanHW/My_YOLOv3 | d85c75a9e6eb02277b8923c09d28adbfe14cb4e4 | 03ebdd564f77dd42cb389460efba111df3c516ea | refs/heads/master | 2020-05-02T05:38:19.455680 | 2019-03-26T12:11:39 | 2019-03-26T12:11:39 | 177,774,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,402 | py | #! /usr/bin/env python
# coding=utf-8
#================================================================
# Copyright (C) 2019 * Ltd. All rights reserved.
#
# Editor : VIM
# File name : quick_train.py
# Author : YunYang1994
# Created date: 2019-01-21 14:46:26
# Description :
#
#================================================================
import tensorflow as tf
from core import utils, yolov3
from core.dataset import dataset, Parser
sess = tf.Session()
IMAGE_H, IMAGE_W = 416, 416 #图形大小
BATCH_SIZE = 8 #批尺寸
EPOCHS = 20
LR = 0.001 #学习率 if Nan, set 0.0005, 0.0001
DECAY_STEPS = 100
DECAY_RATE = 0.9
SHUFFLE_SIZE = 200
CLASSES = utils.read_coco_names('./data/raccoon.names') #类别名称
ANCHORS = utils.get_anchors('./data/raccoon_anchors.txt', IMAGE_H, IMAGE_W) #锚框数据
NUM_CLASSES = len(CLASSES) #类别数量
EVAL_INTERNAL = 100
SAVE_INTERNAL = 500
train_tfrecord = "./raccoon_dataset/raccoon_train.tfrecords"
test_tfrecord = "./raccoon_dataset/raccoon_test.tfrecords"
parser = Parser(IMAGE_H, IMAGE_W, ANCHORS, NUM_CLASSES)
trainset = dataset(parser, train_tfrecord, BATCH_SIZE, shuffle=SHUFFLE_SIZE)
testset = dataset(parser, test_tfrecord , BATCH_SIZE, shuffle=None)
is_training = tf.placeholder(tf.bool)
example = tf.cond(is_training, lambda: trainset.get_next(), lambda: testset.get_next())
images, *y_true = example
model = yolov3.yolov3(NUM_CLASSES, ANCHORS)
with tf.variable_scope('yolov3'):
pred_feature_map = model.forward(images, is_training=is_training)
loss = model.compute_loss(pred_feature_map, y_true)
y_pred = model.predict(pred_feature_map)
tf.summary.scalar("loss/coord_loss", loss[1])
tf.summary.scalar("loss/sizes_loss", loss[2])
tf.summary.scalar("loss/confs_loss", loss[3])
tf.summary.scalar("loss/class_loss", loss[4])
global_step = tf.Variable(0, trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])
write_op = tf.summary.merge_all()
writer_train = tf.summary.FileWriter("./data/train")
writer_test = tf.summary.FileWriter("./data/test")
saver_to_restore = tf.train.Saver(var_list=tf.contrib.framework.get_variables_to_restore(include=["yolov3/darknet-53"]))
update_vars = tf.contrib.framework.get_variables_to_restore(include=["yolov3/yolo-v3"])
learning_rate = tf.train.exponential_decay(LR, global_step, decay_steps=DECAY_STEPS, decay_rate=DECAY_RATE, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
# set dependencies for BN ops
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss[0], var_list=update_vars, global_step=global_step)
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
saver_to_restore.restore(sess, "./checkpoint/yolov3.ckpt")
saver = tf.train.Saver(max_to_keep=2)
for epoch in range(EPOCHS):
run_items = sess.run([train_op, write_op, y_pred, y_true] + loss, feed_dict={is_training:True})
if (epoch+1) % EVAL_INTERNAL == 0:
train_rec_value, train_prec_value = utils.evaluate(run_items[2], run_items[3])
writer_train.add_summary(run_items[1], global_step=epoch)
writer_train.flush() # Flushes the event file to disk
if (epoch+1) % SAVE_INTERNAL == 0: saver.save(sess, save_path="./checkpoint/yolov3.ckpt", global_step=epoch+1)
print("=> EPOCH %10d [TRAIN]:\tloss_xy:%7.4f \tloss_wh:%7.4f \tloss_conf:%7.4f \tloss_class:%7.4f"
%(epoch+1, run_items[5], run_items[6], run_items[7], run_items[8]))
run_items = sess.run([write_op, y_pred, y_true] + loss, feed_dict={is_training:False})
if (epoch+1) % EVAL_INTERNAL == 0:
test_rec_value, test_prec_value = utils.evaluate(run_items[1], run_items[2])
print("\n=======================> evaluation result <================================\n")
print("=> EPOCH %10d [TRAIN]:\trecall:%7.4f \tprecision:%7.4f" %(epoch+1, train_rec_value, train_prec_value))
print("=> EPOCH %10d [VALID]:\trecall:%7.4f \tprecision:%7.4f" %(epoch+1, test_rec_value, test_prec_value))
print("\n=======================> evaluation result <================================\n")
writer_test.add_summary(run_items[0], global_step=epoch)
writer_test.flush() # Flushes the event file to disk
| [
"[email protected]"
] | |
fc309d02b0fd046fac2d2d289f27fba3c46c9ed1 | 766865913811b66ff8b3cfd2b1d1588a0b34cd52 | /app.py | 695e2b6fbe3adee25787a9a17bd3810dead6bc4b | [] | no_license | waizhen/Recruitment_Scam_Prediction | a80e0428128a648bb0ad5fe6ef953a015700f7b2 | ff0bb7554945797f11063254fbb35cdc784bf185 | refs/heads/main | 2023-06-27T08:37:44.706868 | 2021-07-19T14:52:15 | 2021-07-19T14:52:15 | 387,481,272 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,571 | py | #!/usr/bin/env python
# coding: utf-8
# In[2]:
import gradio as gr
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import joblib
import lightgbm as lgb
from lightgbm import LGBMClassifier
# Create a country abbreviation dict
country_dict = {
'United States': 'US', 'New Zealand': 'NZ', 'Germany': 'DE', 'Great Britain': 'GB', 'Australia': 'AU',
'Singapore': 'SG', 'Israel': 'IL', 'United Arab Emirates': 'AE', 'Canada': 'CA', 'India': 'IN', 'Egypt': 'EG',
'Poland': 'PL', 'Greece': 'GR', 'Undefined': 'Undefined', 'Pakistan': 'PK', 'Belgium': 'BE', 'Brazil': 'BR',
'Saudi Arabia': 'SA', 'Denmark': 'DK', 'Russia': 'RU', 'South Africa': 'ZA', 'Cyprus': 'CY', 'Hong Kong': 'HK',
'Turkey': 'TR', 'Ireland': 'IE', 'Lithuania': 'LT', 'Japan': 'JP', 'Netherlands': 'NL', 'Austria': 'AT',
'South Korea': 'KR', 'France': 'FR', 'Estonia': 'ES', 'Thailand': 'TH', 'Panama': 'PA', 'Kenya': 'KE',
'Mauritius': 'MU', 'Mexico': 'MX', 'Romania': 'RO', 'Malaysia': 'MY', 'Finland': 'FI', 'China': 'CN', 'Spain': 'ES',
'Sweden': 'SE', 'Chile': 'CL', 'Ukraine': 'UA', 'Qatar': 'QA', 'Italy': 'IT', 'Latvia': 'LV', 'Iraq': 'IQ',
'Bulgaria': 'BG', 'Philippines': 'PH', 'Czech Republic': 'CZ', 'U.S. Virgin Islands': 'VI', 'Malta': 'MT',
'Hungary': 'HU', 'Non-US countries': 'Non-US', 'Bangladesh': 'BD', 'Kuwait': 'KW', 'Luxembourg': 'LU',
'Nigeria': 'NG', 'Serbia': 'RS', 'Belarus': 'BY', 'Vietnam': 'VN', 'Indonesia': 'ID', 'Zambia': 'ZM', 'Norway': 'NO',
'Bahrain': 'BH', 'Uganda': 'UG', 'Switzerland': 'CH', 'U.S. & Other countries': 'US+other', 'Trinidad & Tobago': 'TT',
'Sudan': 'SD', 'Slovakia': 'SK', 'Argentina': 'AR', 'Taiwan': 'TW', 'Portugal': 'PT', 'Peru': 'PE', 'Colombia': 'CO',
'Iceland': 'IS', 'Slovenia': 'SI', 'Morocco': 'MA', 'Armenia': 'AM', 'Tunisia': 'TN', 'Ghana': 'GH', 'Albania': 'AL',
'Croatia': 'HR', 'Cameroon': 'CM', 'El Salvador': 'SV', 'Nicaragua': 'NI', 'Sri Lanka': 'LK', 'Jamaica': 'JM',
'Kazakhstan': 'KZ', 'Cambodia': 'KH'
}
# Create feature list
feature_list = [
"telecommuting", "has_company_logo", "has_questions", "raw_word_count", "employment_type_Contract", "employment_type_Full-time",
"employment_type_Full-time & Part-time", "employment_type_Other", "employment_type_Part-time", "employment_type_Temporary",
"required_experience_Associate", "required_experience_Director", "required_experience_Entry level", "required_experience_Executive",
"required_experience_Internship", "required_experience_Mid-Senior level", "required_experience_Not Applicable",
"required_education_Associate Degree", "required_education_Bachelor's Degree", "required_education_Certification",
"required_education_Doctorate", "required_education_Empty requirements", "required_education_High School or equivalent",
"required_education_Master's Degree", "required_education_Professional", "required_education_Some College Coursework Completed",
"required_education_Some High School Coursework", "required_education_Unspecified", "required_education_Vocational",
"required_education_Vocational - Degree", "required_education_Vocational - HS Diploma", "function_Accounting/Auditing",
"function_Administrative", "function_Advertising", "function_Art/Creative", "function_Business Analyst", "function_Business Development",
"function_Consulting", "function_Customer Service", "function_Data Analyst", "function_Design", "function_Distribution",
"function_Education", "function_Engineering", "function_Finance", "function_Financial Analyst", "function_General Business",
"function_Health Care Provider", "function_Human Resources", "function_Information Technology", "function_Legal",
"function_Management", "function_Manufacturing", "function_Marketing", "function_Other", "function_Product Management",
"function_Production", "function_Project Management", "function_Public Relations", "function_Purchasing", "function_Quality Assurance",
"function_Research", "function_Sales", "function_Science", "function_Strategy/Planning", "function_Supply Chain",
"function_Training", "function_Writing/Editing", "country_AE", "country_AL", "country_AM", "country_AR", "country_AT",
"country_AU", "country_BD", "country_BE", "country_BG", "country_BH", "country_BR", "country_BY", "country_CA", "country_CH",
"country_CL", "country_CM", "country_CN", "country_CO", "country_CY", "country_CZ", "country_DE", "country_DK", "country_EE",
"country_EG", "country_ES", "country_FI", "country_FR", "country_GB", "country_GH", "country_GR", "country_HK", "country_HR",
"country_HU", "country_ID", "country_IE", "country_IL", "country_IN", "country_IQ", "country_IS", "country_IT", "country_JM",
"country_JP", "country_KE", "country_KH", "country_KR", "country_KW", "country_KZ", "country_LK", "country_LT", "country_LU",
"country_LV", "country_MA", "country_MT", "country_MU", "country_MX", "country_MY", "country_NG", "country_NI", "country_NL",
"country_NO", "country_NZ", "country_Non-US", "country_PA", "country_PE", "country_PH", "country_PK", "country_PL",
"country_PT", "country_QA", "country_RO", "country_RS", "country_RU", "country_SA", "country_SD", "country_SE", "country_SG",
"country_SI", "country_SK", "country_SV", "country_TH", "country_TN", "country_TR", "country_TT", "country_TW", "country_UA",
"country_UG", "country_US", "country_US+other", "country_Undefined", "country_VI", "country_VN", "country_ZA", "country_ZM"
]
# Create a predict function with input parameters from user inputs
def predict(job_title, employment_type, required_experience, required_education, function, country,
telecommuting, has_company_logo, has_questions, location, company_profile, job_description,
requirements, benefits, plot_option):
# Convert country to 2 letter abbrieviation
country = country_dict[country]
# Create structured df with all int zeros
df_structured = pd.DataFrame(int(0), index=np.arange(1), columns=feature_list)
# Fill the features accordingly to structured df
df_structured.loc[0, 'telecommuting'] = int(np.where(telecommuting == 'Yes', 1, 0))
df_structured.loc[0, 'has_company_logo'] = int(np.where(has_company_logo == 'Yes', 1, 0))
df_structured.loc[0, 'has_questions'] = int(np.where(has_questions == 'Yes', 1, 0))
df_structured.loc[0, 'employment_type' + '_' + str(employment_type)] = 1
df_structured.loc[0, 'required_experience' + '_' + str(required_experience)] = 1
df_structured.loc[0, 'required_education' + '_' + str(required_education)] = 1
df_structured.loc[0, 'function' + '_' + str(function)] = 1
df_structured.loc[0, 'country' + '_' + str(country)] = 1
# Text df
df_text = pd.DataFrame({
'text': [job_title + ' ' + location + ' ' + company_profile + ' ' + job_description + ' ' + \
requirements + ' ' + benefits]
})
# Get the raw word count
df_structured['raw_word_count'] = df_text['text'].apply(lambda x: len(x.split()))
# For plotting
filtered_val = ''
if plot_option == 'function':
filtered_val = function
elif plot_option == 'country':
filtered_val = country
df_plot = pd.read_csv("recruitment_plotting.csv")
df_plotted = df_plot[
df_plot[str(plot_option)] == str(filtered_val)
].groupby('fraudulent').size().reset_index().rename(columns={0: 'count'})
df_plotted['percentage_count'] = df_plotted['count'] / df_plotted['count'].sum() * 100
# Plot graph based on user input (function or country, count or percent count)
fig = plt.figure(figsize=(9.5, 7))
sns.set(font_scale=1)
ax = sns.barplot(data=df_plotted, x='fraudulent', y='percentage_count', palette='Oranges_r')
plt.title('Percentage count of real and fraud jobs from ' + str(plot_option) + ' ' + str(filtered_val), fontsize=13)
if (len(df_plotted) == 1) & (df_plotted['fraudulent'][0] == 0):
ax.set_xticklabels(['Real job'])
elif (len(df_plotted) == 1) & (df_plotted['fraudulent'][0] == 1):
ax.set_xticklabels(['Fraud job'])
elif len(df_plotted) > 1:
ax.set_xticklabels(['Real job', 'Fraud job'])
plt.ylabel("Percentage count", fontsize=12.25)
plt.xlabel("Fraudulent", fontsize=12.25)
plt.close()
model = joblib.load('RS_lgbm_over_55-45_18072021.pkl')
X = df_structured.values
pred = model.predict(X)[0]
prob = model.predict_proba(X)[0]
message = str(np.where(pred == 1, '\nThe model identifies the job post as a fraud employment posting.\nBe careful of this job!\n ',
"\nThe model identifies the job post as a real employment posting.\nYou can proceed with the job!\n "))
return {'Real Job Post': prob[0], 'Fraud Job Post': prob[1]}, message, fig
# Define input variables
job_title = gr.inputs.Textbox(placeholder="Enter job title here", label='Job Title')
employment_type = gr.inputs.Radio(['Full-time', 'Part-time', 'Contract', 'Temporary', 'Full-time & Part-time', 'Other'],
label="Employment Type", default='Full-time')
required_experience = gr.inputs.Dropdown(['Internship', 'Entry level', 'Associate', 'Mid-Senior level', 'Executive',
'Director', 'Not Applicable'], label="Required Experience Category",
default = 'Entry level')
required_education = gr.inputs.Dropdown([
"Bachelor's Degree", "Master's Degree", 'High School or equivalent', 'Empty requirements', 'Associate Degree',
'Some College Coursework Completed', 'Vocational', 'Certification', 'Doctorate', 'Professional',
'Some High School Coursework', 'Vocational - Degree', 'Vocational - HS Diploma', 'Unspecified'
], label="Required Education", default="Bachelor's Degree")
function = gr.inputs.Dropdown(sorted([
'Marketing', 'Customer Service', 'Administrative', 'Sales', 'Health Care Provider', 'Management',
'Information Technology', 'Human Resources', 'Other', 'Engineering', 'Design', 'Education', 'Production',
'Supply Chain', 'Business Development', 'Product Management', 'Financial Analyst', 'Consulting', 'Project Management',
'Manufacturing', 'Public Relations', 'Writing/Editing', 'Strategy/Planning', 'General Business', 'Advertising',
'Finance', 'Research', 'Accounting/Auditing', 'Art/Creative', 'Quality Assurance', 'Training', 'Data Analyst',
'Business Analyst', 'Distribution', 'Legal', 'Science', 'Purchasing'
]), label="Job Function")
country = gr.inputs.Dropdown(sorted([
'United States', 'New Zealand', 'Germany', 'Great Britain', 'Australia', 'Singapore', 'Israel',
'United Arab Emirates', 'Canada', 'India', 'Egypt', 'Poland', 'Greece', 'Undefined', 'Pakistan', 'Belgium', 'Brazil',
'Saudi Arabia', 'Denmark', 'Russia', 'South Africa', 'Cyprus', 'Hong Kong', 'Turkey', 'Ireland', 'Lithuania', 'Japan',
'Netherlands', 'Austria', 'South Korea', 'France', 'Estonia', 'Thailand', 'Panama', 'Kenya', 'Mauritius', 'Mexico',
'Romania', 'Malaysia', 'Finland', 'China', 'Spain', 'Sweden', 'Chile', 'Ukraine', 'Qatar', 'Italy', 'Latvia', 'Iraq',
'Bulgaria', 'Philippines', 'Czech Republic', 'U.S. Virgin Islands', 'Malta', 'Hungary', 'Non-US countries',
'Bangladesh', 'Kuwait', 'Luxembourg', 'Nigeria', 'Serbia', 'Belarus', 'Vietnam', 'Indonesia', 'Zambia', 'Norway',
'Bahrain', 'Uganda', 'Switzerland', 'U.S. & Other countries', 'Trinidad & Tobago', 'Sudan', 'Slovakia', 'Argentina',
'Taiwan', 'Portugal', 'Peru', 'Colombia', 'Iceland', 'Slovenia', 'Morocco', 'Armenia', 'Tunisia', 'Ghana', 'Albania',
'Croatia', 'Cameroon', 'El Salvador', 'Nicaragua', 'Sri Lanka', 'Jamaica', 'Kazakhstan', 'Cambodia'
]), label="Country of Job Post", default="United States")
telecommuting = gr.inputs.Radio(['Yes', 'No'], label='Are there any telecommuting options available?')
has_company_logo = gr.inputs.Radio(['Yes', 'No'], label='Does the company have any company logo?')
has_questions = gr.inputs.Radio(['Yes', 'No'], label='Does the job post contain any questions?')
location = gr.inputs.Textbox(placeholder="Enter location here: eg. US, CA, Los Angeles",
label='Location in the form of country, state, city')
company_profile = gr.inputs.Textbox(lines=4, placeholder="Enter company profile here", label='Company Profile (if any)')
job_description = gr.inputs.Textbox(lines=4, placeholder="Enter job description here", label='Full Job Description')
requirements = gr.inputs.Textbox(lines=4, placeholder="Enter job requirements here", label='Full Job Requirements (if any)')
benefits = gr.inputs.Textbox(lines=4, placeholder="Enter job benefits here", label='Benefits of the Job (if any)')
plot_option = gr.inputs.Radio(['function', 'country'], label='Statistical plotting option')
# Output textbox
output_label = gr.outputs.Label(num_top_classes=2, label="Predicted Label")
output_text = gr.outputs.Textbox(label="Output Message")
output_plot = gr.outputs.Image(plot=True, label="Statistical Plot", type='plot')
# Combine the inputs and outputs into Interface function
io = gr.Interface(predict, [
job_title, employment_type, required_experience, required_education, function, country, telecommuting,
has_company_logo, has_questions, location, company_profile, job_description, requirements, benefits, plot_option
], [output_label, output_text, output_plot], verbose=True, theme='default', title="Job Posting Fraud Prediction Web App",
layout = "unaligned",
description="""
This is a simple web app that helps users to detect if the job posting is real or fraud, please fill up the inputs and enjoy exploring the app!
""",
article="""
Nowadays, job scams are on the rise due to the advancement of technology and internet. There have been many reported crimes on victims
getting scammed by fake jobs that claimed to offer attractive salaries and bright futures. Thus, in order to counter the issue, we adopt
data science and machine learning to create a web app that enables users to screen job postings and identify potential fraud jobs.
Data source: Employment Scam Aegean Dataset (EMSCAD)
""")
if __name__ == "__main__":
io.launch()
| [
"[email protected]"
] | |
11f252b09bcb7de7f6dbdee53b3882b0b0a61b75 | f200822cc6d970ad5b1bfc17cd0a01c4104719ac | /functions/greet_users.py | 104e1499d255e049c2bd985bf7752577607e6837 | [] | no_license | iepisas/python-crash-course | 5da7b48d96e4a73f9f9675bb241bd0e1f1d54f96 | cf20911ea5c4c2ae47c314f2900f483ab86c4bd1 | refs/heads/master | 2022-11-23T04:17:47.089911 | 2020-07-25T16:04:52 | 2020-07-25T16:04:52 | 280,761,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | def greet_users(names):
for name in names:
if type(name) == str:
msg = f"Hello,{name.title()}"
print(msg)
else:
msg = f"Hello,{name}"
print(msg)
usernames = ['adhfa','adf','qw','awe',12,45,33]
greet_users(usernames) | [
"[email protected]"
] | |
36811db9fc562b21f2ab8e14d862f543801cf47e | 56ca3845607c8fed8fac81a07cdfa4d48139b7ef | /deep_translator/google_trans.py | 89f6ed38a37dcc5beeb0414d583a0fd7af889b30 | [
"MIT"
] | permissive | trucnguyenlam/deep-translator | 0db420de6d2de699db8aa26d5f32e4d0d41fc187 | 6cfdf620c7e27a839183ea854b52b01c51687d65 | refs/heads/master | 2023-03-20T16:52:09.878932 | 2021-03-08T18:08:08 | 2021-03-08T18:08:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,303 | py | """
google translator API
"""
from deep_translator.constants import BASE_URLS, GOOGLE_LANGUAGES_TO_CODES
from deep_translator.exceptions import TooManyRequests, LanguageNotSupportedException, TranslationNotFound, NotValidPayload, RequestError
from deep_translator.parent import BaseTranslator
from bs4 import BeautifulSoup
import requests
from time import sleep
import warnings
import logging
class GoogleTranslator(BaseTranslator):
"""
class that wraps functions, which use google translate under the hood to translate text(s)
"""
_languages = GOOGLE_LANGUAGES_TO_CODES
supported_languages = list(_languages.keys())
def __init__(self, source="auto", target="en"):
"""
@param source: source language to translate from
@param target: target language to translate to
"""
self.__base_url = BASE_URLS.get("GOOGLE_TRANSLATE")
if self.is_language_supported(source, target):
self._source, self._target = self._map_language_to_code(source.lower(), target.lower())
super(GoogleTranslator, self).__init__(base_url=self.__base_url,
source=self._source,
target=self._target,
element_tag='div',
element_query={"class": "t0"},
payload_key='q', # key of text in the url
hl=self._target,
sl=self._source)
self._alt_element_query = {"class": "result-container"}
@staticmethod
def get_supported_languages(as_dict=False):
"""
return the supported languages by the google translator
@param as_dict: if True, the languages will be returned as a dictionary mapping languages to their abbreviations
@return: list or dict
"""
return GoogleTranslator.supported_languages if not as_dict else GoogleTranslator._languages
def _map_language_to_code(self, *languages):
"""
map language to its corresponding code (abbreviation) if the language was passed by its full name by the user
@param languages: list of languages
@return: mapped value of the language or raise an exception if the language is not supported
"""
for language in languages:
if language in self._languages.values() or language == 'auto':
yield language
elif language in self._languages.keys():
yield self._languages[language]
else:
raise LanguageNotSupportedException(language)
def is_language_supported(self, *languages):
"""
check if the language is supported by the translator
@param languages: list of languages
@return: bool or raise an Exception
"""
for lang in languages:
if lang != 'auto' and lang not in self._languages.keys():
if lang != 'auto' and lang not in self._languages.values():
raise LanguageNotSupportedException(lang)
return True
def translate(self, text, **kwargs):
"""
function that uses google translate to translate a text
@param text: desired text to translate
@return: str: translated text
"""
if self._validate_payload(text):
text = text.strip()
if self.payload_key:
self._url_params[self.payload_key] = text
response = requests.get(self.__base_url,
params=self._url_params, headers ={'User-agent': 'your bot 0.1'})
if response.status_code == 429:
raise TooManyRequests()
if response.status_code != 200:
raise RequestError()
soup = BeautifulSoup(response.text, 'html.parser')
element = soup.find(self._element_tag, self._element_query)
if not element:
element = soup.find(self._element_tag, self._alt_element_query)
if not element:
raise TranslationNotFound(text)
if element.get_text(strip=True) == text.strip():
to_translate_alpha = ''.join(ch for ch in text.strip() if ch.isalnum())
translated_alpha = ''.join(ch for ch in element.get_text(strip=True) if ch.isalnum())
if to_translate_alpha and translated_alpha and to_translate_alpha == translated_alpha:
self._url_params["tl"] = self._target
if "hl" not in self._url_params:
return text.strip()
del self._url_params["hl"]
return self.translate(text)
else:
return element.get_text(strip=True)
def translate_file(self, path, **kwargs):
"""
translate directly from file
@param path: path to the target file
@type path: str
@param kwargs: additional args
@return: str
"""
try:
with open(path) as f:
text = f.read().strip()
return self.translate(text)
except Exception as e:
raise e
def translate_sentences(self, sentences=None, **kwargs):
"""
translate many sentences together. This makes sense if you have sentences with different languages
and you want to translate all to unified language. This is handy because it detects
automatically the language of each sentence and then translate it.
@param sentences: list of sentences to translate
@return: list of all translated sentences
"""
warnings.warn("deprecated. Use the translate_batch function instead", DeprecationWarning, stacklevel=2)
logging.warning("deprecated. Use the translate_batch function instead")
if not sentences:
raise NotValidPayload(sentences)
translated_sentences = []
try:
for sentence in sentences:
translated = self.translate(text=sentence)
translated_sentences.append(translated)
return translated_sentences
except Exception as e:
raise e
def translate_batch(self, batch=None):
"""
translate a list of texts
@param batch: list of texts you want to translate
@return: list of translations
"""
if not batch:
raise Exception("Enter your text list that you want to translate")
print("Please wait.. This may take a couple of seconds because deep_translator sleeps "
"for two seconds after each request in order to not spam the google server.")
arr = []
for i, text in enumerate(batch):
translated = self.translate(text)
arr.append(translated)
print("sentence number ", i+1, " has been translated successfully")
sleep(2)
return arr
# if __name__ == '__main__':
# batch = ["tude", "fleck", "incongruous"]
# text = GoogleTranslator(source="en", target="de").translate_batch(batch)
# print(text)
| [
"[email protected]"
] | |
32cce0cbf7ef28210c580969ba28b3dc75ae60e1 | 15353c80ea6e594ea0a994f634bdc0731b8908dd | /env/bin/fixup_firestore_v1_keywords.py | 0e25ae65b410682c50fb128f4cb178cec38ad785 | [] | no_license | Yasinymous/CoronaMap-py-vue | 45294c88bf5ead536fc7d6da81bc7b3b76da74b6 | 62abd84e4ef799217e5ebb0112ee749fd8d9dc91 | refs/heads/main | 2023-04-22T17:37:59.368803 | 2021-05-08T03:06:31 | 2021-05-08T03:06:31 | 364,439,543 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,273 | py | #!/Users/yasinymous/Desktop/Projects/CmapQuasar/Cmap/env/bin/python3
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
try:
import libcst as cst
except ImportError:
raise ImportError('Run `python -m pip install "libcst >= 0.2.5"` to install libcst.')
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class firestoreCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'batch_get_documents': ('database', 'documents', 'mask', 'transaction', 'new_transaction', 'read_time', ),
'batch_write': ('database', 'writes', 'labels', ),
'begin_transaction': ('database', 'options', ),
'commit': ('database', 'writes', 'transaction', ),
'create_document': ('parent', 'collection_id', 'document', 'document_id', 'mask', ),
'delete_document': ('name', 'current_document', ),
'get_document': ('name', 'mask', 'transaction', 'read_time', ),
'list_collection_ids': ('parent', 'page_size', 'page_token', ),
'list_documents': ('parent', 'collection_id', 'page_size', 'page_token', 'order_by', 'mask', 'transaction', 'read_time', 'show_missing', ),
'listen': ('database', 'add_target', 'remove_target', 'labels', ),
'partition_query': ('parent', 'structured_query', 'partition_count', 'page_token', 'page_size', ),
'rollback': ('database', 'transaction', ),
'run_query': ('parent', 'structured_query', 'transaction', 'new_transaction', 'read_time', ),
'update_document': ('document', 'update_mask', 'mask', 'current_document', ),
'write': ('database', 'stream_id', 'writes', 'stream_token', 'labels', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: not a.keyword.value in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=firestoreCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the firestore client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
| [
"[email protected]"
] | |
3a19230ff45e21637cf52aff6917cd3099d7b236 | 08aa57ca17363db0b73e4703d835564ecdbef7e8 | /Django/finalproject/finalproject/settings.py | d469e817213ef58a4defaa6694d8893ea69d2d07 | [] | no_license | y2k6302/ITskillMaster | 3009b7b165c2fe271fe6f930689ec4c05317c5f9 | 50f4678ed2cc739163a92e0c40f157b0412f90d7 | refs/heads/master | 2020-05-26T00:35:45.946567 | 2016-03-22T16:46:51 | 2016-03-22T16:46:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,831 | py | """
Django settings for finalproject project.
Generated by 'django-admin startproject' using Django 1.8.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l3kvr=r085#3i24fmk&%_q@#0lic6ug9fatl_=4%h-o3#gmppr'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'jobs',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'finalproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'finalproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'project',
'USER': 'root',
'PASSWORD': 'chunyi',
'HOST':'',
'PORT':'3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'zh-TW'
TIME_ZONE = 'Asia/Taipei'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
'./static/',
)
| [
"Jimmy Yang"
] | Jimmy Yang |
371cc9f0dc52fef0024cbd20662008e67b8ba5b3 | fbaef3fca70c200316583f4ec1247995604efeab | /venv/bin/waitress-serve | 6b0f25bd844f966a21ee8cfc50439b30efa8a21c | [] | no_license | thevivotran/flask_tutorial | aedffa7ea8ae968f978110b7fc5152656037360d | 9820ae544a9e98295ee2f051fe10f91ba8e021ff | refs/heads/master | 2023-01-01T16:17:28.861590 | 2020-10-25T04:26:07 | 2020-10-25T04:26:07 | 306,787,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | #!/home/vivo/flask_tutorial/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from waitress.runner import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"[email protected]"
] | ||
1208b40ce0c798e530cea20672f8b392f0158c00 | 5821d864fb40417184cd37a3ee3c889895d39efb | /src/ch6/3-caricature/mesh.py | 1dad28a1503f6e6857e5eb6ebcdb79d0a699128d | [
"WTFPL"
] | permissive | ssloy/least-squares-course | 9c86d8c54894248440fba78206ce253559f4257b | 13692cdfd40a8005893fd33887d6cc743c5f01ec | refs/heads/master | 2022-08-18T15:53:15.313071 | 2021-12-01T12:44:59 | 2021-12-01T12:44:59 | 222,901,933 | 162 | 18 | WTFPL | 2022-07-28T21:16:03 | 2019-11-20T09:38:37 | TeX | UTF-8 | Python | false | false | 3,119 | py | import numpy as np
class Mesh():
def __init__(self, filename):
# parse the .obj file
V, T = [], []
with open(filename) as f:
for line in f.readlines():
if line.startswith('#'): continue
values = line.split()
if not values: continue
if values[0] == 'v':
V.append([float(x) for x in values[1:4]])
elif values[0] == 'f':
T.append([int(x) for x in values[1:4]])
self.V, self.T = np.array(V), np.array(T)-1
# compute the adjacency
self.v2c = np.array([-1]*self.nverts) # vertex-to-corner map
self.c2c = np.array([-1]*self.ncorners) # corner-to-corner unordered circular lists
for c in range(self.ncorners):
v = self.T[c//3][c%3]
self.v2c[v] = c
for c in range(self.ncorners):
v = self.T[c//3][c%3]
self.c2c[c] = self.v2c[v]
self.v2c[v] = c
# speed up the computations: precompute the opposites and the boundary flags
self.opp = np.array([-1]*self.ncorners)
for c in range(self.ncorners):
c_org = self.T[c//3][c%3]
c_dst = self.T[c//3][(c+1)%3]
cir = c
opp = -1
while True:
cand = (cir//3)*3 + (cir+2)%3
cand_org = self.T[cand//3][cand%3]
cand_dst = self.T[cand//3][(cand+1)%3]
if (cand_org == c_dst and cand_dst == c_org):
opp = cand # we suppose manifold input
cir = self.c2c[cir]
if (cir==c): break
self.opp[c] = opp
self.boundary = np.array([False]*self.nverts)
for v in range(self.nverts):
cir = self.v2c[v]
if cir<0: continue
while (True):
if self.opp[cir]<0:
self.boundary[v] = True
break
cir = self.c2c[cir]
if (cir==self.v2c[v]): break
@property
def nverts(self):
return len(self.V)
@property
def ntriangles(self):
return len(self.T)
@property
def ncorners(self):
return self.ntriangles*3;
def org(self, c):
return self.T[c//3][c%3]
def dst(self, c):
return self.T[c//3][(c+1)%3]
def prev(self, c):
return (c//3)*3 + (c+2)%3
def opposite(self, c):
return self.opp[c]
def on_border(self, v):
return self.boundary[v]
def neighbors(self,v):
out = []
cir = self.v2c[v]
if cir<0: return []
cnt = 0
while True:
neigh = self.T[cir//3][(cir+1)%3]
out.append(neigh)
cir = self.c2c[cir]
if (cir==self.v2c[v]): break
cnt = cnt + 1
return out
def __str__(self):
ret = ""
for v in self.V:
ret = ret + ("v %f %f %f\n" % (v[0], v[1], v[2]))
for t in self.T:
ret = ret + ("f %d %d %d\n" % (t[0]+1, t[1]+1, t[2]+1))
return ret
| [
"[email protected]"
] | |
bc3fb289dd1d8667dc0ee7fdf9dabd0bcf56c95e | bb80b2d1d7b51f50d000294df4b68cde053fe288 | /proyectos/10/src/jack_listener.py | a6c18a2c319f3cfeac257e4684586bae1bd03da8 | [] | no_license | imurielr/Computer-Achitecture | f76f44a700f5da6ac847de0b23192710f55c060b | cd941f725b328a3f9bac85b7fc6f73c932d7129f | refs/heads/master | 2021-01-15T05:44:28.559781 | 2020-02-25T02:46:53 | 2020-02-25T02:46:53 | 242,893,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,851 | py | """ Module containing Listener class which inherit from the JackListener class generated by Antlr """
from JackListener import JackListener
from JackParser import JackParser
class Listener(JackListener):
keyword = ["class","constructor","function","method","field","static","var","int","char","boolean","void","true","false","null","this","let","do","if","else","while","return"]
symbol = ['{', '}', '(', ')', '[', ']', '.', ',', ';', '+', '-', '*', '/', '&', '|', '<', '>', '=', '~']
non_terminals = ['class', 'classVarDec', 'subroutineDec', 'parameterList', 'subroutineBody', 'varDec', 'statements', 'whileStatement', 'ifStatement', 'returnStatement', 'letStatement', 'doStatement', 'expression', 'term', 'expressionList']
def __init__(self, parser, output_file):
""" Set the parser and the output file. Count the ident for each line """
self.parser = parser
self.output_file = open(output_file, 'w')
self.ident = " "
self.num_ident = 0
def __del__(self):
""" Closes the output file """
self.output_file.close()
def enterEveryRule(self, ctx):
""" Enter each rule and write the open tag if the rule name is in the non-terminals list """
open_tag = "{}<{}>\n".format(self.get_current_ident(), self.parser.ruleNames[ctx.getRuleIndex()][:-2]) # Remove the last 2 letters from the rule name (NT)
if self.parser.ruleNames[ctx.getRuleIndex()][:-2] in Listener.non_terminals:
self.output_file.write(open_tag)
self.increase_ident()
return super().enterEveryRule(ctx)
def exitEveryRule(self, ctx):
""" Exit each rule and write the closing tag if the rule name is in the non-terminals list """
close_tag = "{}</{}>\n".format(self.get_current_ident(), self.parser.ruleNames[ctx.getRuleIndex()][:-2]) # Remove the last 2 letters from the rule name (NT)
if self.parser.ruleNames[ctx.getRuleIndex()][:-2] in Listener.non_terminals:
self.decrease_ident()
self.output_file.write(close_tag)
return super().exitEveryRule(ctx)
def visitTerminal(self, node):
""" Get the value for each terminal and check if it is a keyword, symbol, interge constant, string constant or identifier """
terminal_value = node.getText()
if terminal_value in Listener.keyword:
terminal = "keyword"
elif terminal_value in Listener.symbol:
terminal = "symbol"
# If the symbol is '<', '>', '"' or '&' change its value so that the xml file doesn't have syntax errors
if terminal_value == '<':
terminal_value = "<"
elif terminal_value == '>':
terminal_value = ">"
elif terminal_value == '"':
terminal_value = """
elif terminal_value == '&':
terminal_value = "&"
elif terminal_value.isdigit():
terminal = "integerConstant"
elif '"' in terminal_value:
terminal = "stringConstant"
terminal_value = terminal_value.strip('"') # Remove the quotes from the string
else:
terminal = "identifier"
# Write the tag on the output file depending on the terminal value and its class
self.output_file.write("{}<{}> {} </{}>\n".format(self.get_current_ident(), terminal, terminal_value, terminal))
return super().visitTerminal(node)
def get_current_ident(self):
""" Get the ident for the current line in the xml file """
return self.num_ident * self.ident
def increase_ident(self):
""" Increase the ident in the xml file """
self.num_ident += 1
def decrease_ident(self):
""" Decrease the ident in the xml file """
self.num_ident -= 1 | [
"[email protected]"
] | |
43904990078a40f1f4a6b35bf7f6e392a966f1e9 | 1bb0690ab2750aae7485a011d6a176fca100bf31 | /openacademy/model/openacademy_course.py | 1ca2c294fd9501abc778e7c683d0a8db050c99d7 | [
"Apache-2.0"
] | permissive | gylmargarcia/openacademy-project | 5a4d3a619c8d06ec36e2e97ae06051d4a268f021 | 3657080f929a380df66063d6b522cd8d8ddbc917 | refs/heads/master | 2020-05-25T09:26:59.605268 | 2014-12-12T05:54:15 | 2014-12-12T05:54:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | from openerp import models, fields, api
class Course(models.Model):
'''
This class create model of course
'''
_name = 'openacademy.course' #Model odooname
name = fields.Char(string='Title', required=True) #Fields reserved to identify name record
description = fields.Text(string='Description')
| [
"[email protected]"
] | |
5ba3eb2c99cb4886c0d71494e016a22abad98aee | acbe6bd6cefaf8b12070d7258dab30e4f7fcebed | /ui/style.py | c5b1d5ee83c7a5fb029f0f3becf8dba8c57a3b3b | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | RogueScholar/debreate | 02c98c5a78d33041798410f0e3b99e80fda65d00 | dfe9bcac7333a53082b3a2ae169806cf604d59f6 | refs/heads/master | 2023-06-07T11:49:03.821969 | 2023-04-28T02:14:25 | 2023-04-28T02:14:25 | 253,707,766 | 0 | 0 | MIT | 2023-05-28T15:24:17 | 2020-04-07T06:34:47 | Python | UTF-8 | Python | false | false | 1,267 | py |
# ******************************************************
# * Copyright © 2016-2023 - Jordan Irwin (AntumDeluge) *
# ******************************************************
# * This software is licensed under the MIT license. *
# * See: LICENSE.txt for details. *
# ******************************************************
## @module ui.style
import wx
# FIXME: legacy wx version no longer supported
if wx.MAJOR_VERSION > 2:
PANEL_BORDER = wx.BORDER_THEME
else:
PANEL_BORDER = wx.BORDER_MASK
## Layout styles for sizers.
class layout:
ALGN_T = wx.ALIGN_TOP
ALGN_B = wx.ALIGN_BOTTOM
ALGN_L = wx.ALIGN_LEFT
ALGN_LT = ALGN_L|ALGN_T
ALGN_LB = ALGN_L|ALGN_B
ALGN_R = wx.ALIGN_RIGHT
ALGN_RT = ALGN_R|ALGN_T
ALGN_RB = ALGN_R|ALGN_B
ALGN_C = wx.ALIGN_CENTER
ALGN_CH = wx.ALIGN_CENTER_HORIZONTAL
ALGN_CV = wx.ALIGN_CENTER_VERTICAL
ALGN_CL = ALGN_CV|ALGN_L
ALGN_CR = ALGN_CV|ALGN_R
ALGN_CT = ALGN_CH|ALGN_T
ALGN_CB = ALGN_CH|ALGN_B
PAD_LT = wx.LEFT|wx.TOP
PAD_LB = wx.LEFT|wx.BOTTOM
PAD_LTB = PAD_LT|wx.BOTTOM
PAD_RT = wx.RIGHT|wx.TOP
PAD_RB = wx.RIGHT|wx.BOTTOM
PAD_RTB = PAD_RT|wx.BOTTOM
PAD_LR = wx.LEFT|wx.RIGHT
PAD_LRB = PAD_LR|wx.BOTTOM
PAD_LRT = PAD_LR|wx.TOP
PAD_TB = wx.TOP|wx.BOTTOM
| [
"[email protected]"
] | |
c140e474ec324763c62376fce310ab79a289d686 | 7bf0becb36d56808e7d2ee0d278325ec86e2a465 | /PyLibrary/BinarySearch/binarySearchIterative.py | 4d88ba408b4f0d13673ee16ef3aaa0b5324f02b3 | [] | no_license | adrianna/DSandA | afb49225b7f910c6c98b9bbe809563efa1a484c9 | 0087dcfd02f4b9617f3eb05e7f1ce73fc3ca386e | refs/heads/master | 2020-05-22T02:07:21.811682 | 2019-08-31T19:19:33 | 2019-08-31T19:19:33 | 186,191,976 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,142 | py | #!/usr/bin/env python
# coding: utf-8
# # Binary search practice
#
# Let's get some practice doing binary search on an array of integers. We'll solve the problem two different ways—
# both iteratively and resursively.
#
# Here is a reminder of how the algorithm works:
#
# 1. Find the center of the list (try setting an upper and lower bound to find the center)
# 2. Check to see if the element at the center is your target.
# 3. If it is, return the index.
# 4. If not, is the target greater or less than that element?
# 5. If greater, move the lower bound to just above the current center
# 6. If less, move the upper bound to just below the current center
# 7. Repeat steps 1-6 until you find the target or until the bounds are the same or cross (the upper bound is less
# than the lower bound).
#
#
# ## Problem statement:
# Given a sorted array of integers, and a target value, find the index of the target value in the array. If the
# target value is not present in the array, return -1.
# ## Iterative solution
def binary_search(array, target):
start_index = 0
end_index = len(array) - 1
while start_index <= end_index:
mid_index = (start_index + end_index)//2 # integer division in Python 3
mid_element = array[mid_index]
if target == mid_element: # we have found the element
return mid_index
elif target < mid_element: # the target is less than mid element
end_index = mid_index - 1 # we will only search in the left half
else: # the target is greater than mid element
start_index = mid_element + 1 # we will search only in the right half
return -1
def test_function(test_case):
answer = binary_search(test_case[0], test_case[1])
if answer == test_case[2]:
print("Pass!")
else:
print("Fail!")
## Test
array = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
target = 6
index = 6
test_case = [array, target, index]
test_function(test_case)
| [
"[email protected]"
] | |
6439b3fa39579b18cc7fe52d0293ff002b66b28e | 5da5415e27383731e3d141430a051d9a99c97c78 | /ex_11/ex_11_01.py | 509683e7fa6722a431b29c102ed6329e5a211e7b | [] | no_license | Harshith-S/Python-4-Everybody | ea89cda84d117168ebfc9a8da0f668eb7c3f355e | 6a62da9f5c0159e7a8a3eb46c42562380fe6df85 | refs/heads/master | 2020-05-19T12:24:08.873275 | 2019-05-05T10:05:09 | 2019-05-05T10:05:09 | 184,626,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | """
Finding Numbers in a Haystack
In this assignment you will read through and parse a file with text and numbers. You will extract all the numbers in the file and compute the sum of the numbers.
Data Files
We provide two files for this assignment.
One is a sample file where we give you the sum for your testing and the other is the actual data you need to process for the assignment.
Sample data: http://py4e-data.dr-chuck.net/regex_sum_42.txt (There are 90 values with a sum=445833)
Actual data: http://py4e-data.dr-chuck.net/regex_sum_208835.txt (There are 94 values and the sum ends with 448)
"""
import re
name = input("Enter the file : ")
handle = open(name)
result=list() #Empty list(Because the result of findall() is a list, it'll be easy to add)
for line in handle:
number=re.findall('[0-9]+',line) #Extract the numbers from file and store it in numbers
result=result+number #append the numbers found using the regex in this list
sum=0
for i in result:
sum=sum+int(i) #find the sum
print(sum) | [
"[email protected]"
] | |
5d831e4185fbab06635d9f25266e662f0c8d44d7 | a178885065966a14eebd0ab1ff4f9e9ca6598987 | /cdf/practice/a1.py | fd8cf688833c7e258c6800091fd67425813fffdc | [] | no_license | Abhik1998/CP | 34fa16330a408007d5643adf7830e17e08a2e4f8 | c836a96d8275e7e756f9d18153174c215b1263f9 | refs/heads/master | 2023-01-02T14:11:59.370497 | 2020-10-12T19:21:13 | 2020-10-12T19:21:13 | 292,824,072 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | MOD = 998244353
p = [1] * 200005
for i in range(1, 200005):
p[i] = (p[i - 1] * 10) % MOD
n = int(input())
for i in range(1, n):
res = 2 * 10 * 9 * p[n - i - 1]
res += (n - 1 - i) * 10 * 9 * 9 * p[n - i - 2]
print(res % MOD, end = ' ')
print(10) rt[]\ | [
"[email protected]"
] | |
ba623f7039cb4e93da77833f6adaa48a96c55ae1 | 21c2c8d3a71328f8aabf95b492e1907b7aff2608 | /testerapp/views.py | c3f937695cf3695083530089cfbc956876ef1c0f | [] | no_license | kushalp13/Issue-Management-System | 89891b31b684f1873b863feb67c1f6fe65b39b99 | 3aa5d239a47a4a69869625ac647587f61f95c657 | refs/heads/master | 2022-11-29T00:48:43.644523 | 2020-08-05T11:17:14 | 2020-08-05T11:17:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,347 | py | from django.shortcuts import render
from .models import BugInfo
# Create your views here.
def reportBug(request):
return(render(request,'loginapp/report_bug.html'))
def fixedBug(request):
appName = request.POST["username"]
password = request.POST["password"]
usertype = request.POST["usertype"]
return(render(request,'loginapp/fixed_bug.html'))
def addBugInfo(request):
appName = request.POST["application_name"]
moduleName = request.POST["modulename"]
bugID = appName + "@" + moduleName
bugDesc = request.POST["debug_info"]
currBuild = request.POST["current_build"]
dateRaised = request.POST["dateRaised"]
detectedBy = request.POST["testerID"]
bugPriority = request.POST["bug_severity"]
bugInfo = BugInfo( bugId=appName + "@" + moduleName , bugDesc=bugDesc , bugStatus="ACTIVE" , currBuild=currBuild , dateRaised=dateRaised , appName=appName , detectedBy=detectedBy , bugPriority=bugPriority )
bugInfo.save()
return(render(request,'testerapp/reportedBug.html',{'bugID':bugID}))
def viewBug(request):
allbug = BugInfo.objects.all()
return(render(request,'testerapp/viewBug.html',{'allbug':allbug}))
def fixedBug(request):
allbug = BugInfo.objects.all()
return(render(request,'testerapp/fixedBug.html',{'allbug':allbug})) | [
"[email protected]"
] | |
6d66a2db8f11ffd28ada75f1f0b889f531bb5f03 | c223a73c09f9f384ff964012be51873b8f997c8f | /manticora/controllers/modules/register.py | ad408f6e6eebc18a102675f4d40f99aaecc92537 | [] | no_license | WhiteSummeRK/manticora | c4d2a9c6a71ad6e96009887b0ee892662aaf0ce6 | b0e44f3b02451030d7f282102a60872d1153b1e0 | refs/heads/master | 2020-03-24T19:01:57.457597 | 2019-04-22T22:57:35 | 2019-04-22T22:57:35 | 142,907,269 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,696 | py | from manticora.models.database_functions.usuario import (
insert_new_user_account,
check_for_existing_mail,
check_for_existing_name,
update_user_from_db
)
from manticora.models.database_functions.restaurante import (
insert_new_rest,
get_actual_rest,
update_rest_from_db
)
from manticora.models.database.tables import db
from datetime import datetime
def register_user(nome, senha,
email, bairro,
cidade, rua,
numero, comp, is_adm=False,
return_entity=False):
"""Faz as verificações adequadas e insere os dados no banco."""
if check_for_existing_name(nome):
return 'Esse nome já existe.'
if check_for_existing_mail(email):
return 'Esse email já existe.'
try:
act = insert_new_user_account(nome, senha,
email, bairro,
cidade, rua,
numero, comp, is_adm)
except Exception:
return 'Algo deu errado, tente novamente.'
if return_entity:
return act
return 'Usuário inserido com sucesso.'
def register_rest(phone, num_phone, img, open, closed, adm):
"""Faz as verificações e insere novo adm."""
openned = datetime.strptime(open, '%H:%M').time()
closed = datetime.strptime(closed, '%H:%M').time()
if openned == closed:
db.session.rollback()
db.session.remove()
return "Desculpe, não é possivel utilizar um estabelecimento 24hrs"
try:
insert_new_rest(phone, num_phone, img, openned, closed, adm)
except Exception:
db.session.rollback()
db.session.remove()
return 'Algo deu errado, tente novamente.'
return 'ok'
def update_user(city, neigh, street, num, complement, current_user):
try:
update_user_from_db(city, neigh, street, num, complement, current_user)
return "ok"
except Exception:
return "Algo deu errado, tente novamente."
def update_rest(phone, hora_aber, hora_fech, current_user):
rest = get_actual_rest(current_user)
db_hora_aber = datetime.strptime(hora_aber, "%H:%M").time()
db_hora_fech = datetime.strptime(hora_fech, "%H:%M").time()
try:
update_rest_from_db(phone, db_hora_aber, db_hora_fech, None, rest)
return "ok"
except Exception:
return "Algo deu errado, tente novamente."
def update_rest_img(imagem, current_user):
rest = get_actual_rest(current_user)
try:
update_rest_from_db(False, False, False, imagem, rest)
return "ok"
except Exception:
return "Algo deu errado, tente novamente."
| [
"[email protected]"
] | |
e892918ab46f8e39c2e19115b98da50e402eff2c | 72b8cedcfeb3b98abf2965efa5f5ba7b02563647 | /train.py | 4d917ecf1f2ae99e6a3d49be9bd8166ed2c77d53 | [] | no_license | 18205097282/DBFC-Net | 48f0c1139d719c3769658dec16303a1bfafe5702 | 5166ffe45651595eb81fee6be0879173e12cec9b | refs/heads/master | 2022-11-27T03:11:33.487739 | 2020-07-12T14:20:29 | 2020-07-12T14:20:29 | 260,202,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,738 | py | import os
import time
import torch
from torch.autograd import Variable
from util import AverageMeter, Log
from rankingloss import *
def train(train_loader, train_loader1, train_loader2, train_loader3, args, model, criterion, center_loss, optimizer,
epoch,
num_epochs):
print(len(train_loader), len(train_loader1), len(train_loader2), len(train_loader3))
""" (图片)1499 (视频)1499 (音频)1500 (文本文档)1000"""
count = 0
since = time.time()
running_loss0 = AverageMeter()
running_loss1 = AverageMeter()
running_loss2 = AverageMeter()
running_loss3 = AverageMeter()
running_loss4 = AverageMeter()
running_loss5 = AverageMeter()
running_loss6 = AverageMeter()
running_loss7 = AverageMeter()
running_loss = AverageMeter()
log = Log()
model.train()
image_acc = 0
text_acc = 0
video_acc = 0
audio_acc = 0
for (i, (input, target)), (j, (input1, target1)), (k, (input2, target2)), (p, (input3, target3)) in zip(
enumerate(train_loader), enumerate(train_loader1), enumerate(train_loader2), enumerate(train_loader3)):
"""(i,j,k,p) 是 (n,n,n,n) n从0到999结束,故,共只迭代1000次! 有问题!"""
input_var = Variable(input.cuda())
input_var1 = Variable(input1.cuda())
input_var2 = Variable(input2.cuda())
input_var3 = Variable(input3.cuda())
targets = torch.cat((target, target1, target2, target3), 0)
targets = Variable(targets.cuda())
target_var = Variable(target.cuda())
target_var1 = Variable(target1.cuda())
target_var2 = Variable(target2.cuda())
target_var3 = Variable(target3.cuda())
outputs, feature = model(input_var, input_var1, input_var2, input_var3)
size = int(outputs.size(0) / 4)
img = outputs.narrow(0, 0, size)
vid = outputs.narrow(0, size, size)
aud = outputs.narrow(0, 2 * size, size)
txt = outputs.narrow(0, 3 * size, size)
_, predict1 = torch.max(img, 1) # 0是按列找,1是按行找
_, predict2 = torch.max(vid, 1) # 0是按列找,1是按行找
_, predict3 = torch.max(txt, 1) # 0是按列找,1是按行找
_, predict4 = torch.max(aud, 1) # 0是按列找,1是按行找
image_acc += torch.sum(torch.squeeze(predict1.float() == target_var.float())).item() / float(
target_var.size()[0])
video_acc += torch.sum(torch.squeeze(predict2.float() == target_var1.float())).item() / float(
target_var1.size()[0])
audio_acc += torch.sum(torch.squeeze(predict4.float() == target_var2.float())).item() / float(
target_var2.size()[0])
text_acc += torch.sum(torch.squeeze(predict3.float() == target_var3.float())).item() / float(
target_var3.size()[0])
loss0 = criterion(img, target_var)
loss1 = criterion(vid, target_var1)
loss2 = criterion(aud, target_var2)
loss3 = criterion(txt, target_var3)
loss4 = loss0 + loss1 + loss2 + loss3
loss5 = center_loss(feature, targets) * 0.001
if (args.loss_choose == 'r'):
loss6, _ = ranking_loss(targets, feature, margin=1, margin2=0.5, squared=False)
loss6 = loss6 * 0.1
else:
loss6 = 0.0
loss = loss4 + loss5 + loss6 # +loss7
# print(loss)
batchsize = input_var.size(0)
running_loss0.update(loss0.item(), batchsize)
running_loss1.update(loss1.item(), batchsize)
running_loss2.update(loss2.item(), batchsize)
running_loss3.update(loss3.item(), batchsize)
running_loss4.update(loss4.item(), batchsize)
running_loss5.update(loss5.item(), batchsize)
# running_loss7.update(loss7.item(), batchsize)
if (args.loss_choose == 'r'):
running_loss6.update(loss6.item(), batchsize)
running_loss.update(loss.item(), batchsize)
optimizer.zero_grad()
loss.backward()
for param in center_loss.parameters():
param.grad.data *= (1. / 0.001)
optimizer.step()
count += 1
if (i % args.print_freq == 0):
print('-' * 20)
print('Epoch [{0}/{1}][{2}/{3}]'.format(epoch, num_epochs, i, len(train_loader)))
print('Image Loss: {loss.avg:.5f}'.format(loss=running_loss0))
print('Video Loss: {loss.avg:.5f}'.format(loss=running_loss1))
print('Audio Loss: {loss.avg:.5f}'.format(loss=running_loss2))
print('Text Loss: {loss.avg:.5f}'.format(loss=running_loss3))
print('AllMedia Loss: {loss.avg:.5f}'.format(loss=running_loss4))
print('Center Loss: {loss.avg:.5f}'.format(loss=running_loss5))
# print('separate Loss: {loss.avg:.5f}'.format(loss=running_loss7))
if (args.loss_choose == 'r'):
print('Ranking Loss: {loss.avg:.5f}'.format(loss=running_loss6))
print('All Loss: {loss.avg:.5f}'.format(loss=running_loss))
# log.save_train_info(epoch, i, len(train_loader), running_loss)
print("训练第%d个epoch:" % epoch)
print("image:", image_acc / len(train_loader3))
print("text:", text_acc / len(train_loader3))
print("video:", video_acc / len(train_loader3))
print("audio:", audio_acc / len(train_loader3))
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60), "训练了%d个batch" % count)
| [
"[email protected]"
] | |
e0e16019e1c7c31f775be698bcb15dd511f10ebe | 1cd3177c88a9b15264aec1b3738c55b28cbc88e6 | /lib/pirates.py | b855410bf86a75ff45537329c9de59694c01bbab | [
"MIT"
] | permissive | maurogaravello/pirati | 3685f7d7ac164dd3ea6baf97ba434761a2b38993 | 27c07a368f026a43efc1cd5df9368004689bb04a | refs/heads/master | 2020-05-21T20:08:46.711754 | 2017-02-07T09:46:46 | 2017-02-07T09:46:46 | 64,026,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,926 | py | #!/usr/bin/env python
### pirates.py
### class containing all the relevant data
import numpy as np
import logging
class pirates(object):
def __init__(self, x_1, x_2, y_1, y_2, n_x, n_y, M, tMax, d_o,
InitialDatum_rho, InitialDatum_A, speed_ships, nu, DirName,
mathcal_K, cut_off_C_pirates, kappa, a, cut_off_C_ships, cut_off_C_police, controls, pictures = 90):
"""
Initializatium function for the class.
:param x_1: float. Lower bound for x-coordinate of the domain
:param x_2: float. Upper bound for x-coordinate of the domain
:param y_1: float. Lower bound for y-coordinate of the domain
:param y_2: float. Upper bound for y-coordinate of the domain
:param n_x: int. Number of points for the discretization of the x-component
of the domain.
:param n_y: int. Number of points for the discretization of the y-component
of the domain.
:param M: int. Number of police vessels.
:param tMax: float. Time of the simulation
:param d_o: tuple of lenght M. Each element is a tuple of 2 floats,
describing the initial position of the police vessels
:param InitialDatum_rho: function. It gives the initial density of the
pirates.
:param InitialDatum_A: function. It gives the initial density of the
commercial ships.
:param speed_ships: function. It gives the speed of ship when density is A
:param nu: tuple of two functions. They give the geometrical direction of
the ships.
:param DirName: string. It gives the name of the working directory, i.e. of
the directory containing the simulation
:param mathcal_K: function describing the kernel in the equation for pirates
:param cut_off_C_pirates: function describing the kernel in the equation for pirates
:param kappa: function. It gives the normalization for the
direction in the equation for pirates
:param cut_off_C_ships: function describing the kernel in the equation for ships
:param cut_off_C_police: function describing the kernel in the equation for police
:param a: array of floats. Coefficients a in the source term f for the eq
:param controls: function giving a list of controls.
:param pictures: int. Approximate number of pictures.
"""
# 2d domains
self.x_1 = x_1
self.x_2 = x_2
self.y_1 = y_1
self.y_2 = y_2
self.domain = ((x_1, x_2), (y_1, y_2))
self.n_x = n_x
self.n_y = n_y
self.check_domain()
self.create_mesh()
self.create_initial_datum(InitialDatum_rho, InitialDatum_A)
# police
self.police_vessels = M
self.police_initial_positions = d_o
self.check_positions()
self.controls = controls
# ships' velocity
self.ships_speed = speed_ships
self.ships_direction = nu
self.ships_direction_mesh = nu(self.x, self.y)
# time
self.time_of_simulation = tMax
self.create_time_mesh()
# printing mesh
self.pictures = pictures
self.create_print_mesh()
# base directory
self.base_directory = DirName
# kernels and cut-off
self.mathcal_K = mathcal_K
# self.cut_off_C = cut_off_C
self.cut_off_C_pirates = cut_off_C_pirates
self.cut_off_C_ships = cut_off_C_ships
self.cut_off_C_police = cut_off_C_police
self.create_kernels()
# normalization function kappa
self.kappa = kappa
# coefficient a for the source term f in the equation for pirates
self.a = a
#
# Function for creating the space mesh
#
def create_mesh(self):
"""
This function creates the grid for the domain Omega.
self.x = numpy vector starting from self.x_1, ending to self.x_2, with
self.n_x points, i.e. its length = self.n_x
self.dx = the horizontal step
self.y = numpy vector starting from self.y_1, ending to self.y_2, with
self.n_y points, i.e. its length = self.n_y
self.dy = the vertical step
self.x_mesh and self.y_mesh are the corresponding meshgrids. Their shape
is (self.n_y, self.n_x)
"""
(self.x, self.dx) = np.linspace(self.x_1, self.x_2, self.n_x, retstep=True)
(self.y, self.dy) = np.linspace(self.y_1, self.y_2, self.n_y, retstep=True)
self.x_mesh, self.y_mesh = np.meshgrid(self.x, self.y)
#
# Function for creating the time mesh
#
def create_time_mesh(self):
"""
This function creates the time mesh.
self.time = numpy vector starting from 0, ending to self.time_of_simulation
self.dt = the time step
"""
dxy = min(self.dx, self.dy)
dt = 0.25* min(dxy**2, dxy/self.ships_speed(0))
N = 2 + int(self.time_of_simulation / dt)
(self.time, self.dt) = np.linspace(0., self.time_of_simulation, N, retstep = True)
assert (self.dt <= dt)
#
# Function for creating the printing mesh
#
def create_print_mesh(self):
"""
This function creates the print mesh.
self.time = numpy vector starting from 0, ending to self.time_of_simulation
self.dt = the time step
"""
K = int(float(len(self.time))/self.pictures) + 1
self.printing = np.zeros_like(self.time, dtype= bool)
self.printing[::K] = True
self.printing[-1] = True
#
# Function for creating the kernels
#
def create_kernels(self):
"""
This function creates 2d vectors for the kernels
self.kernel_x = numpy vector centered at x=0 with same size as self.x
and mesh size self.dx
self.kernel_y = numpy vector centered at x=0 with same size as self.y
and mesh size self.dy
self.kernel_mathcal_K = numpy 2d vector generated by the function
self.mathcal_K
"""
self.kernel_x = self.x - (self.x_1 + self.x_2)/2.
self.kernel_y = self.y - (self.y_1 + self.y_2)/2.
self.kernel_mathcal_K = self.mathcal_K(self.kernel_x, self.kernel_y)
#
# Function for creating 2d vectors for intial data
#
def create_initial_datum(self, InitialDatum_rho, InitialDatum_A):
"""
This function creates two 2d-arrays for the initial data for ship and
pirates.
:param InitialDatum_rho: function. It gives the initial datum for pirates.
:param InitialDatum_A: function. It gives the initial datum for ships.
It creates self.initial_density_pirates and self.initial_density_ships.
They are two 2d-numpy array of shapes (n_y, n_x)
"""
self.initial_density_pirates = InitialDatum_rho(self.x_mesh, self.y_mesh)
self.initial_density_ships = InitialDatum_A(self.x_mesh, self.y_mesh)
#
# Projection into the domain
#
def project(self, position_police):
new_pos = []
for i in xrange(len(position_police)):
position_x = min(max(position_police[i][0], self.x_1), self.x_2)
position_y = min(max(position_police[i][1], self.y_1), self.y_2)
new_pos.append((position_x, position_y))
return new_pos
#
# Function for checking the domain is feasible.
#
def check_domain(self):
if self.x_1 >= self.x_2:
print 'Error: x_1 should be less than x_2'
logging.info('Error: x_1 should be less than x_2')
exit()
if self.y_1 >= self.y_2:
print 'Error: y_1 should be less than y_2'
logging.info('Error: y_1 should be less than y_2')
exit()
if not isinstance(self.n_x, int) or not isinstance(self.n_y, int):
print 'Error: both n_x and n_y should be integers'
logging.info('Error: both n_x and n_y should be integers')
exit()
if self.n_x == 0 or self.n_y == 0:
print 'Error: both n_x and n_y should be strictly positive'
logging.info('Error: both n_x and n_y should be strictly positive')
exit()
#
# Function for checking the initial position of the police vessels.
def check_positions(self):
if not isinstance(self.police_vessels, int):
print 'Error: the number M of vessels should be integer'
logging.info('Error: the number M of vessels should be integer')
exit()
if len(self.police_initial_positions) != self.police_vessels:
print 'Error: the number of police vessels does not coincide with their initial position'
logging.info('Error: the number of police vessels does not coincide with their initial position')
exit()
for i in xrange(len(self.police_initial_positions)):
if self.police_initial_positions[i][0] < self.x_1 or self.police_initial_positions[i][0] > self.x_2:
print 'Error: the x position of the ' + str(i+1) + '-th vessel is not correct'
logging.info('Error: the x position of the ' + str(i+1) + '-th vessel is not correct')
exit()
if self.police_initial_positions[i][1] < self.y_1 or self.police_initial_positions[i][1] > self.y_2:
print 'Error: the y position of the ' + str(i+1) + '-th vessel is not correct'
logging.info('Error: the y position of the ' + str(i+1) + '-th vessel is not correct')
exit()
| [
"[email protected]"
] | |
00584c9346dc4deaf86a7dc7b1447a77f268d162 | 370985396544a6c2601300b7802b16082017cc97 | /TULER/RNN/rnn_model.py | b84cf3b5e5f2ba2289d9a45bcc9374ea1b89e411 | [] | no_license | runnerxin/Paper-Reproduce | 038d6e3ea33d373bfea2bc28fc40110fbdfbc3dd | 2ad45ca791e45e8e0fe4d34e3bae10b05e0267b6 | refs/heads/master | 2020-03-31T05:19:03.456280 | 2018-12-11T10:40:12 | 2018-12-11T10:40:12 | 151,941,391 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,246 | py | # =============================================
# @Author : runnerxin
# @File : rnn_model.py
# @Software : PyCharm
# @Time : 2018/11/22 14:43
# =============================================
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from RNN.model import RNN
from RNN.data_read import Data
import torch
from torch import nn
import numpy as np
def run():
data = Data()
model = RNN()
# print(model)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.MSELoss()
for epoch in range(200):
for sentence in data.sentences:
x = np.array(sentence[:-1], dtype=np.float32)
y = np.array(sentence[1:], dtype=np.float32)
x = torch.from_numpy(x[:, np.newaxis, np.newaxis]) # shape (time_step, batch, input_size)
y = torch.from_numpy(y[:, np.newaxis, np.newaxis])
prediction, state = model(x) # rnn output
# print(prediction.shape)
# print(prediction.shape)
# print(h_n.shape)
# print(h_c.shape)
loss = loss_func(prediction, y) # calculate loss
print(loss)
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # back propagation, compute gradients
optimizer.step() # apply gradients
torch.save(model, 'model.pkl')
# y = np.array([4, 5, 6], dtype=np.float32)
# y = torch.from_numpy(y[:, np.newaxis, np.newaxis]) # shape (time_step, batch, input_size)
# loss = loss_func(prediction, y)
# print(loss)
# print(prediction)
def test():
model = torch.load('model.pkl')
x = np.array([3, 4, 5], dtype=np.float32)
x = torch.from_numpy(x[:, np.newaxis, np.newaxis]) # shape (time_step, batch, input_size)
prediction, state = model(x)
print(prediction)
for i in range(3):
in_x = prediction[-1, :, :]
in_x = in_x.view(-1, in_x.size(0), in_x.size(1))
# state = (state[0].data, state[1].data)
prediction, state = model(in_x, state)
print(prediction)
if __name__ == "__main__":
# run()
test()
| [
"[email protected]"
] | |
dd50d872377ade072ed8611c0266bc038e3ffa75 | bf418efc6dfa4a1702b6360265607c4f008f399d | /rgb/train_rgb.py | 6d7d5d47024f37538ad2cf4538a316aa616fc508 | [] | no_license | klnavaneet/differ | 4775f390e8f62755d23fb1a70315dc9e24b54d73 | 3aba1bd8fa58a8e3b1b8cc22abeb95cb6c65ea7e | refs/heads/master | 2020-05-20T06:16:36.673477 | 2019-05-09T19:38:36 | 2019-05-09T19:38:36 | 185,425,397 | 8 | 2 | null | 2019-05-09T19:38:37 | 2019-05-07T15:03:56 | Python | UTF-8 | Python | false | false | 16,499 | py | ###############################################################################
# Training code for reconstructing colored point clouds
# Use run_train_rgb.sh for running the code
###############################################################################
import os, sys
sys.path.append('../src')
from train_utils import *
from utils_train import *
# to hide scipy deprecation warnings while saving outputs
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
category_id = shapenet_category_to_id[args.category]
def create_feed_dict(models, indices, models_pcl, b, args):
batch = get_feed_dict(models, indices, models_pcl, b, args)
batch_ip, batch_gt_rgb, batch_gt_mask, batch_pcl_gt, batch_x, batch_y, batch_names = batch
feed_dict = {img_inp: batch_ip, proj_gt_mask: batch_gt_mask,
proj_gt_rgb: batch_gt_rgb, pcl_gt: batch_pcl_gt,
view_x:batch_x, view_y: batch_y}
return feed_dict, batch_names
def get_epoch_loss(models, indices, models_pcl, args):
batches = len(indices)/VAL_BATCH_SIZE
val_loss, val_fwd, val_bwd = [0.]*3
L_rgb_val, L_bce_val, L_fwd_val, L_bwd_val = [0.]*4
for b in xrange(0,batches,10):
feed_dict, _ = create_feed_dict(models, indices, models_pcl, b, args)
L,F,B,_L_rgb_val, _L_bce_val,_L_fwd_val,_L_bwd_val = sess.run([chamfer_distance_scaled,
dists_forward_scaled, dists_backward_scaled, loss_rgb, loss_bce,
loss_fwd, loss_bwd], feed_dict)
val_loss += L/batches
val_fwd += F/batches
val_bwd += B/batches
batch_out = [_L_rgb_val, _L_bce_val, _L_fwd_val, _L_bwd_val]
L_rgb_val, L_bce_val, L_fwd_val, L_bwd_val = get_average_from_dict(batch_out)
return val_loss[0], val_fwd[0], val_bwd[0], L_rgb_val, L_bce_val, L_fwd_val, L_bwd_val
def save_outputs(models, pair_indices, models_pcl, global_step, args):
sample_pair_indices = pair_indices[:10]
out_dir_imgs = join(proj_images_folder, str(global_step))
out_dir_pcl = join(proj_pcl_folder, str(global_step))
create_folder([out_dir_imgs, out_dir_pcl])
batches = len(sample_pair_indices)//VAL_BATCH_SIZE
for b in xrange(batches):
fd, model_names = create_feed_dict(models, pair_indices, models_pcl, b, args)
# save projections
_proj_mask, _proj_rgb = sess.run([proj_pred_mask, proj_pred_rgb],
feed_dict=fd)
for k in range(1): # view num
for l in range(1): # batch num
_proj_mask[k][l][_proj_mask[k][l]>=0.5] = 1.
_proj_mask[k][l][_proj_mask[k][l]<0.5] = 0.
sc.imsave('%s/%s_%s_input.png'%(out_dir_imgs,model_names[l],k),
fd[img_inp][l])
sc.imsave('%s/%s_%s_gt_mask.png'%(out_dir_imgs,model_names[l],k),
fd[proj_gt_mask][l][k])
sc.imsave('%s/%s_%s_pred_mask.png'%(out_dir_imgs,model_names[l],k),
_proj_mask[k][l])
batch_gt_rgb = (fd[proj_gt_rgb][k][l]*255.).astype(np.uint8)
batch_gt_rgb = cv2.cvtColor(batch_gt_rgb, cv2.COLOR_RGB2BGR)
batch_pred_rgb = (_proj_rgb[k][l]*255.).astype(np.uint8)
batch_pred_rgb = cv2.cvtColor(batch_pred_rgb, cv2.COLOR_RGB2BGR)
cv2.imwrite('%s/%s_%s_gt_rgb.png'%(out_dir_imgs,model_names[l],k), batch_gt_rgb)
cv2.imwrite('%s/%s_%s_pred_rgb.png'%(out_dir_imgs,model_names[l],k), batch_pred_rgb)
if args.save_pcl:
# save pointclouds
_pcl_out = sess.run(pcl_out, feed_dict=feed_dict)
for k in range(1): #range(len(_pcl_out)):
np.savetxt('%s/%s_%s_pred.xyz'%(out_dir_pcl,model_names[k],k),_pcl_out[k])
np.savetxt('%s/%s_%s_gt.xyz'%(out_dir_pcl,model_names[k],k),batch_pcl_gt[k])
if __name__=='__main__':
EXP_DIR = args.exp
create_folder([EXP_DIR])
filename = basename(__file__)
os.system('cp %s %s'%(filename, EXP_DIR))
# Define Logs Directories
snapshot_folder = join(EXP_DIR, 'snapshots')
best_folder = join(EXP_DIR, 'best')
logs_folder = join(EXP_DIR, 'logs')
log_file = join(EXP_DIR, 'logs.txt')
proj_images_folder = join(EXP_DIR, 'log_proj_images')
proj_pcl_folder = join(EXP_DIR, 'log_proj_pcl')
# Create log directories
create_folder([snapshot_folder, logs_folder, best_folder,
proj_images_folder, proj_pcl_folder, join(snapshot_folder,'best')])
args_file = join(logs_folder, 'args.json')
with open(args_file, 'w') as f:
json.dump(vars(args), f, ensure_ascii=False, indent=2, sort_keys=True)
train_models, val_models, train_pair_indices, val_pair_indices = get_shapenet_drc_models(data_dir, categs=[category_id])
train_models_pcl, val_models_pcl, _, _ = get_shapenet_drc_models(data_dir_pcl, categs=[category_id])
random.shuffle(val_pair_indices)
batches = len(train_pair_indices) / args.batch_size
# Create placeholders
img_inp = tf.placeholder(tf.float32, shape=(args.batch_size, args.IMG_H, args.IMG_W, 3),
name='img_inp')
proj_gt_mask = tf.placeholder(tf.float32, shape=(args.batch_size,
args.N_VIEWS, args.IMG_H, args.IMG_W), name='proj_gt_mask')
proj_gt_rgb = tf.placeholder(tf.float32, shape=(args.batch_size,
args.N_VIEWS, args.IMG_H, args.IMG_W, 3), name='proj_gt_rgb')
pcl_gt = tf.placeholder(tf.float32, shape=(args.batch_size, args.N_PTS, 3),
name='pcl_gt_2K')
view_x = tf.placeholder(tf.float32, shape=(args.batch_size,args.N_VIEWS),
name='view_x')
view_y = tf.placeholder(tf.float32, shape=(args.batch_size,args.N_VIEWS),
name='view_y')
# Tensorboard summary placeholders
train_loss_summ = []
loss_names = ['loss_total', 'loss_rgb', 'loss_bce', 'loss_aff_fwd', 'loss_aff_bwd']
for idx, name in enumerate(loss_names):
train_loss_summ.append(tf.placeholder(tf.float32, shape=(), name=name))
val_loss_summ = []
val_loss_names = ['chamfer_dist', 'val_loss_rgb', 'val_loss_bce',
'val_aff_fwd', 'val_aff_bwd', 'chamf_fwd', 'chamf_bwd']
for idx, name in enumerate(val_loss_names):
val_loss_summ.append(tf.placeholder(tf.float32, shape=(), name=name))
# Build graph
with tf.variable_scope('recon_net'):
if args.skipconn:
out = recon_net_rgb_skipconn(img_inp, args) # (B,N,3+C)
else:
out = recon_net_rgb(img_inp, args) # (B,N,3+C)
pcl_out = out[:,:,:3] # (B,N,3)
rgb_out = out[:,:,3:] # (B,N,3)
'''
pcl_out_rot --> dict of camera coordinate rotated pcls {(B,N,3)}
pcl_out_persp --> dict of pcls after perspective_transform {(B,N,3)}
proj_pred_mask --> dict of silhouette projection maps {(B,64,64)}
proj_pred_rgb --> dict of rgb projection maps {(B,C,64,64)}
proj_gt --> depth projection, placeholder of shape (B,V,64,64)
proj_gt_mask --> mask projection, placeholder of shape (B,V,64,64)
proj_gt_rgb --> placeholder of shape (B,V,64,64,3)
loss --> final loss to optimize on
loss_depth --> {V:(B,64,64)} --> l1 map for depth maps
loss_bce --> {V:(B,64,64)} --> bce map for silhouette mask
loss_rgb --> {V:(B,64,64)} --> bce map for rgb maps
fwd --> {V:(B,64,64)} --> affinity loss fwd
bwd --> {V:(B,64,64)} --> affinity loss bwd
loss_fwd --> {()} --> reduce_mean values for each view
loss_bwd --> {()} --> reduce_mean values for each view
'''
pcl_out_rot = {}; proj_pred_mask={}; proj_pred_rgb={}; pcl_out_persp = {}; loss = 0.;
loss_bce = {}; fwd = {}; bwd = {}; loss_rgb = {}; prob = {}; prob_mask = {};
loss_fwd = {}; loss_bwd = {};
grid_dist_tensor = grid_dist(args.grid_h, args.grid_w)
for idx in range(0,args.N_VIEWS):
# 3D to 2D Projection
pcl_out_rot[idx] = world2cam(pcl_out, view_x[:,idx], view_y[:,idx], args.batch_size) ### for batch size 1
pcl_out_persp[idx] = perspective_transform(pcl_out_rot[idx], args.batch_size)
proj_pred_mask[idx] = cont_proj(pcl_out_persp[idx], args.IMG_H, args.IMG_W, args.SIGMA_SQ_MASK)
proj_pred_rgb[idx], prob[idx], prob_mask[idx] = rgb_cont_proj(pcl_out_persp[idx], rgb_out, args.N_PTS, args.IMG_H, args.IMG_W, args.WELL_RADIUS, args.BETA, 'rgb')
# Loss
# mask
loss_bce[idx], fwd[idx], bwd[idx] = get_loss_proj(proj_pred_mask[idx],
proj_gt_mask[:,idx], 'bce_prob', 1.0, True, grid_dist_tensor)
loss_bce[idx] = tf.reduce_mean(loss_bce[idx])
loss_fwd[idx] = 1e-4*tf.reduce_mean(fwd[idx])
loss_bwd[idx] = 1e-4*tf.reduce_mean(bwd[idx])
loss += args.wt_bce*loss_bce[idx] # add mask loss to main loss
# add affinity loss if necessary
loss += (args.wt_aff_fwd * loss_fwd[idx]) + \
(args.wt_aff_bwd * loss_bwd[idx])
# RGB reconstruction loss
loss_rgb[idx], _, _ = get_loss_proj(proj_pred_rgb[idx],
proj_gt_rgb[:,idx], 'l2_sq')
loss_rgb[idx] = tf.reduce_mean(loss_rgb[idx])
loss += args.wt_rgb*tf.reduce_mean(loss_rgb[idx])
loss = (loss / args.N_VIEWS)
pcl_out_scaled, pcl_gt_scaled = scale(pcl_gt, pcl_out)
dists_forward_scaled, dists_backward_scaled, chamfer_distance_scaled = get_chamfer_metrics(pcl_gt_scaled, pcl_out_scaled)
train_vars = [var for var in tf.global_variables() if 'recon_net' in var.name]
load_vars = [var for var in tf.global_variables() if 'Variable' not in var.name]
# Optimizer
opt = tf.train.AdamOptimizer(args.lr, beta1=0.9)
optim = opt.minimize(loss, var_list=train_vars)
# Training params
start_epoch = 0
max_epoch = args.max_epoch
# Define savers to load and store models
saver = tf.train.Saver(max_to_keep=2)
saver_load = tf.train.Saver(load_vars)
# Add Tensorboard summaries
loss_summ = []
for idx, name in enumerate(loss_names):
loss_summ.append(tf.summary.scalar(name, train_loss_summ[idx]))
train_summ = tf.summary.merge(loss_summ)
# Add Tensorboard summaries
loss_summ_val = []
for idx, name in enumerate(val_loss_names):
loss_summ_val.append(tf.summary.scalar(name, val_loss_summ[idx]))
val_summ = tf.summary.merge(loss_summ_val)
# GPU configurations
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# Run session
with tf.Session(config=config) as sess:
print 'Session started'
train_writer = tf.summary.FileWriter(logs_folder+'/train', sess.graph_def)
val_writer = tf.summary.FileWriter(logs_folder+'/val', sess.graph_def)
print 'running initializer'
sess.run(tf.global_variables_initializer())
print 'done'
# Load previous checkpoint
init_flag = True
ckpt = tf.train.get_checkpoint_state(snapshot_folder)
if ckpt is not None:
print ('loading '+os.path.abspath(ckpt.model_checkpoint_path) + ' ....')
saver_load.restore(sess, os.path.abspath(ckpt.model_checkpoint_path))
st_iters = int(re.match('.*-(\d*)$', ckpt.model_checkpoint_path).group(1))
start_epoch = int(st_iters/batches)
st_batches = st_iters % batches
init_flag = False
since = time.time()
print '*'*30,'\n','Training Started !!!\n', '*'*30
if start_epoch == 0:
with open(log_file, 'w') as f:
f.write(' '.join(['Epoch','Train_loss','Val_loss','Minutes','Seconds','\n']))
train_loss_N, L_rgb_N, L_bce_N, L_fwd_N, L_bwd_N = [0.]*5
batch_out_mean = [0.]*5
epoch_out = [0.]*5
best_val_loss = 1e5
for i in xrange(start_epoch, max_epoch+1):
random.shuffle(train_pair_indices)
train_epoch_loss, train_epoch_rgb, train_epoch_bce, train_epoch_fwd, train_epoch_bwd = [0.]*5
if init_flag:
st_batches = 0
for b in xrange(st_batches, batches):
global_step = i*batches + b + 1
if global_step >= args.N_ITERS:
sys.exit('Finished Training')
# Load data
feed_dict, _ = create_feed_dict(train_models, train_pair_indices,
train_models_pcl, b, args)
# Calculate loss and run optimizer
L, _L_rgb, _L_bce, _L_fwd, _L_bwd, _ = sess.run([loss, loss_rgb,
loss_bce, loss_fwd, loss_bwd, optim], feed_dict)
L_rgb, L_bce, L_fwd, L_bwd = get_average_from_dict([_L_rgb,
_L_bce, _L_fwd, _L_bwd])
batch_out = [L, L_rgb, L_bce, L_fwd, L_bwd]
# Use loss values averaged over N batches for logging
batch_out_mean = average_stats(batch_out_mean, batch_out,
b%args.print_n)
train_loss_N, L_rgb_N, L_bce_N, L_fwd_N, L_bwd_N = batch_out_mean
epoch_out = average_stats(epoch_out, batch_out,
global_step%batches)
if global_step % args.print_n == 0:
feed_dict_summ = {}
for idx, item in enumerate(batch_out_mean):
feed_dict_summ[train_loss_summ[idx]] = item
_summ = sess.run(train_summ, feed_dict_summ)
# Add to tensorboard summary
train_writer.add_summary(_summ, global_step)
time_elapsed = time.time() - since
_pcl_out = sess.run(pcl_out, feed_dict)
print 'Iter = {} Loss = {:.5f} RGB = {:.5f} BCE = {:.5f} FWD = {:.5f} BWD = {:.5f} Time = {:.0f}m {:.0f}s'.format(global_step, train_loss_N, L_rgb_N, L_bce_N, L_fwd_N, L_bwd_N, time_elapsed//60, time_elapsed%60)
if (global_step-1) % args.save_n == 0:
save_outputs(val_models, val_pair_indices, val_models_pcl,
global_step, args)
if global_step % args.save_model_n == 0 and i != -1:
print 'Saving Model ....................'
saver.save(sess, join(snapshot_folder, 'model'), global_step=global_step)
print '..................... Model Saved'
# Val metrics
if global_step % args.save_n == 0:
val_t_st = time.time()
val_out = get_epoch_loss(val_models, val_pair_indices,
val_models_pcl, args)
feed_dict_summ = {}
for idx, item in enumerate(val_out):
feed_dict_summ[val_loss_summ[idx]] = item
_summ = sess.run(val_summ, feed_dict_summ)
val_epoch_loss = val_out[0]
val_writer.add_summary(_summ, global_step)
val_t_sp = time.time()
print 'Val Epoch Loss: {:.4f}'.format(val_epoch_loss*10000)
# Update best model if necessary
if (val_epoch_loss < best_val_loss):
saver.save(sess, join(snapshot_folder, 'best', 'best'))
os.system('cp %s %s'%(join(snapshot_folder, 'best/*'), best_folder))
best_val_loss = val_epoch_loss
print 'Best model at iter %s saved, loss = %.4f' %(global_step, best_val_loss*10000)
time_elapsed = time.time() - since
with open(log_file, 'a') as f:
epoch_str = '{} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.0f} {:.0f}'.format(i,
train_epoch_loss, train_epoch_rgb,
train_epoch_bce, train_epoch_fwd, train_epoch_bwd,
time_elapsed//60, time_elapsed%60)
f.write(epoch_str+'\n')
print '-'*65 + ' EPOCH ' + str(i) + ' ' + '-'*65
epoch_str = 'TRAIN Loss: {:.6f} RGB: {:.6f} BCE: {:.6f} FWD: {:.6f} BWD: {:.6f} Time:{:.0f}m {:.0f}s'.format(\
train_epoch_loss, train_epoch_rgb,
train_epoch_bce, train_epoch_fwd, train_epoch_bwd,
time_elapsed//60, time_elapsed%60)
print epoch_str
print '-'*140
print
| [
"[email protected]"
] | |
1f750de145ff1d1760c2e589a0d0a0c59f1a2da7 | 4f0b846a4cfb36803690480e78104072d28bcf9e | /week6Python/pset6/readability/readability.py | 863e53525facf8c895a95cd8cc9be39ffe469368 | [] | no_license | Jordhan-Carvalho/cs50-examples | 0c6f1b2b5b7f9078d827f1ee417713b1c9840681 | 2b51b805f89c325744d553aa9acab63e8c9a6d6f | refs/heads/master | 2022-10-06T10:48:23.114773 | 2020-04-03T14:03:53 | 2020-04-03T14:03:53 | 245,007,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | from cs50 import get_string
text = get_string("Text: ")
countWords = len(text.split())
# create a list of characters and remove the non alphanumerics and get the length
countLetter = len(list(filter(lambda x: x.isalpha(), list(text))))
countSentences = 0
for c in text:
if c == "." or c == "!" or c == "?":
countSentences += 1
# TO DO Find the average of lettters and setences per 100 words
L = (countLetter * 100) / countWords
S = (countSentences * 100) / countWords
colemanIndex = 0.0588 * L - 0.296 * S - 15.8
if colemanIndex < 1:
print("Before Grade 1")
elif colemanIndex > 16:
print("Grade 16+")
else:
print(f"Grade {round(colemanIndex)}")
| [
"[email protected]"
] | |
baeac5bf1cea541d8480d0f20f6e5a423da871c2 | 409625b6cff358da4074b9c095f663bee9c027e4 | /01-Introductions/02-While_loops/syntax.py | ed972146c32f0b620a3a8f9247e948f930aa965b | [
"MIT"
] | permissive | francisrod01/udacity_python_foundations | f610b683d85eaffa211ca07010187ebb1a0df62e | 2a384cf35ce7eff547c88097cdc45cc4e8fc6041 | refs/heads/master | 2021-01-24T06:42:26.181582 | 2017-07-29T18:52:44 | 2017-07-29T18:52:44 | 93,314,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | #!/usr/bin/python3.4
count = 0
while count < 9:
print("The count is: ", count)
count = count + 1
print("Good bye")
| [
"[email protected]"
] | |
0464801decb99dbb953fb042822a2cbb7de6d889 | 5e6af0801f40e2dfd8dddff100236822ecca2093 | /Calculations/separationArm.py | 3a5f9a01fa335f2f2f3bb1bd646241b643763301 | [] | no_license | jcchin/ANGEL | 40430afeff50aaaa51ce954b1840ac2804300442 | 3b9deca878f05b82216862b71601fbe95b747f51 | refs/heads/master | 2021-01-02T08:19:26.142272 | 2015-11-15T04:15:26 | 2015-11-15T04:15:26 | 22,022,686 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,598 | py | import time
import numpy as np
from scipy.interpolate import interp1d
from math import log, pi, sqrt, sin, cos
from numpy import arctan
#from matplotlib import pylab as plt
#from matplotlib import mlab as mlab
from openmdao.main.api import Assembly, Component
from openmdao.lib.datatypes.api import Float, Bool, Str
from openmdao.lib.drivers.api import CaseIteratorDriver, BroydenSolver, NewtonSolver
from openmdao.lib.casehandlers.api import BSONCaseRecorder, CaseDataset
class pendulum(Component):
""" Calculate final angular velocity and tilt after t_diff """
#--Inputs--
bfs_height = Float(0.3556, units='m', iotype='in', desc="BFS height (y direction, pin to bottom of BFS)")
bfs_width = Float(0.635, units='m', iotype='in', desc="BFS width (x direction)")
# inputs
t_diff = Float( units='s', iotype='in', desc="time between pin releases")
g = Float(9.81, units='m/s**2', iotype='in', desc="gravity")
n = Float(10000, iotype='in', desc="number of time steps")
t_0 = Float(0, iotype='in', desc="number of time steps")
alpha_0 = Float(0, units='rad/s**2', iotype='in', desc="initial angular acceleration")
omega_0 = Float(0, units='rad/s**2', iotype='in', desc="initial angular velocity")
# outputs
inertia = Float(units='m**2',iotype='out',desc="moment of inertia divided by mass in kg")
theta_0 = Float(units='rad', iotype='out', desc="initial angle", low=0, high=pi)
R_pendulum = Float(units='m', iotype='out', desc="distance from CG to sep pin", low=0, high=4)
t_step = Float(iotype='out', desc='time interval', low=0, high=0.1)
alpha = Float(units='rad/s**2', iotype='out', desc="release angular acceleration")
omega = Float(units='rad/s**2', iotype='out', desc="release angular velocity")
theta = Float(units='rad', iotype='out', desc="release angle", low=-pi, high=pi)
def execute(self):
"""Calculate Various Paramters"""
#geom calcs
self.theta_0 = arctan((self.bfs_width/2.)/self.bfs_height)
self.R_pendulum = sqrt(self.bfs_width/2**2+self.bfs_height**2) #pythag theorem
#dynamics calcs
#self.inertia = self.R_pendulum**2 #idealized pendulum
#self.inertia = ((self.bfs_width**2 + self.bfs_height**2)/12) + ((self.bfs_width/2)+self.bfs_height) #rough calc
self.inertia = 0.25 #see Tom Cressman's excel spreadsheet
self.alpha = self.alpha_0 #initial conditions = 0
self.omega = self.omega_0 #initial conditions = 0
self.theta = self.theta_0 #initial conditions = 0
self.t_step = (self.t_diff - self.t_0)/self.n
#crude time marching, rather than RK4, 10000 time steps.
while(self.t_0 < self.t_diff): #march slowly in time. alpha = (-gR/I)sin(theta)
self.omega = self.omega + self.t_step*(-self.g*self.R_pendulum/self.inertia)*sin(self.theta) # update velocity
self.theta = self.theta + self.omega*self.t_step # update position
self.t_0 = self.t_0 + self.t_step # update time
class swing(Component):
#geometry
agu_height = Float(0.4826, units='m', iotype='in', desc="AGU Height (y direction, pin to top of drogue)")
agu_width = Float(0.4064, units='m', iotype='in', desc="AGU width (x direction)")
#sep_length = Float(0.0381, units='m', iotype='in', desc="x-distance from pin to AGU")
bfs_width = Float(0.635, units='m', iotype='in', desc="BFS width (x direction)")
#dynamic
omega = Float(units='rad/s**2', iotype='in', desc="release angular velocity")
theta = Float(units='rad', iotype='in', desc="release angle")
R_pendulum = Float(units='m', iotype='in', desc="distance from CG to sep pin")
g = Float(9.81, units='m/s**2', iotype='in', desc="gravity")
theta_0 = Float(pi/4, units='rad', iotype='in', desc="initial angle")
#outputs
y_margin = Float(units='m', iotype='out', desc="y distance from drogue corner to sep at pin release", low=0, high=10)
x_margin = Float(units='m', iotype='out', desc="x distance from drogue corner to sep at pin release", low=-1, high=1)
k_theta_0 = Float(units='rad', iotype='out', desc="initial top angle", low=0, high=pi)
k = Float(units='m', iotype='out', desc="x distance from drogue corner to pivot point",low=0, high=10)
k_theta = Float(units='rad', iotype='out', desc="rotated top angle", low=-pi, high=pi)
velocity = Float(units='m/s', iotype='out', desc="tangential velocity")
x_veloc = Float( units='m/s', iotype='out', desc="tangential velocity")
t_drop = Float(units='s', iotype='out', desc="time to fully clear Y height (with just Y movement)")
x_dist = Float(units='m', iotype='out', desc="x distance from drogue corner to sep at clearance point")
def execute(self):
"""Calculate Various Paramters"""
self.velocity = self.omega * self.R_pendulum
self.x_veloc = self.velocity * sin(self.theta)
self.y_veloc = self.velocity * cos(self.theta)
v = self.y_veloc #vertical velocity
#distance BFS pivot corner to opposite drogue corner
#recalulate corner point relative to separation arm
self.k_theta_0 = arctan(((self.bfs_width/2) + (self.agu_width/2)) / self.agu_height)
self.k = sqrt(((self.bfs_width/2) + (self.agu_width/2))**2 + (self.agu_height**2))
self.k_theta = self.k_theta_0 + (self.theta_0-self.theta)
self.x_margin = self.bfs_width - self.k*sin(self.k_theta)
self.y_margin = self.k*cos(self.k_theta)
# free fall h = 1/2*g*t^2 + v_0t, solve for t, quadratic formula
#self.t_drop = v-sqrt(v**2-(2*self.g*self.y_margin)/self.g)
self.t_drop = sqrt(2*self.y_margin/self.g)
self.x_dist = (self.x_veloc * self.t_drop) + self.x_margin
class system(Assembly): #not currently used, could be used if we exand the calcs...
def configure(self):
#Add Components
self.add('swing', swing())
self.add('pendulum', pendulum())
driver = self.add('driver', BroydenSolver())
#driver.add_parameter('pendulum.t_diff',low=0.,high=10.)
#driver.add_constraint('swing.x_dist=0')
driver.workflow.add(['pendulum', 'swing'])
#Boundary Input Connections
self.connect('pendulum.theta_0','swing.theta_0')
self.connect('pendulum.theta','swing.theta')
self.connect('pendulum.omega','swing.omega')
#self.connect('pendulum.alpha','swing.alpha')
self.connect('pendulum.R_pendulum','swing.R_pendulum')
if __name__ == '__main__' and __package__ is None:
#top = system()
pen = pendulum()
pen.t_diff = 0.3 #modify here 0.068 original
bfs_width = 0.635 #modify here 0.635 original
pen.bfs_width = bfs_width
#initial run to converge things
pen.run()
print "------- -------"
print "t_diff: ", pen.t_diff , "seconds"
print "theta_0: ", pen.theta_0 , "rad"
print "theta : ", pen.theta , "rad"
print "omega : ", pen.omega , "rad/s"
print "inertia/m:", pen.inertia
sw = swing()
#sw.sep_length = 0.03
sw.theta_0 = pen.theta_0
sw.theta = pen.theta
sw.omega = pen.omega
sw.R_pendulum = pen.R_pendulum
sw.bfs_width = bfs_width
sw.run()
print "k_theta_0", sw.k_theta_0, 'rad'
print "y_margin: ", sw.y_margin, 'm'
print "x_margin: " , sw.x_margin, 'm'
print "x_veloc: " , sw.x_veloc, 'm/s'
print "t_drop: " , sw.t_drop, 's'
print "x_Dist: " , sw.x_dist, 'm'
actuator_speed = 0.00889 #m/s
print
print "pin margin: ", actuator_speed*pen.t_diff*1000, "mm"
| [
"[email protected]"
] | |
ae796b3097dabfd106f7fd7c8273d327130a63dc | 64181e8126bf77125e2a55df0d739643a8cc2ded | /transformada.py | 15ef06690c9bc07eb6909c98aee5a60523d6e274 | [] | no_license | pereznator/IPC2_Proyecto2_201900810 | ddac37439713d09a3a5f66dfb1a2a539caad1ff8 | 92f9a955d8fc469380d71ad494475a277568f42b | refs/heads/master | 2023-04-04T23:18:43.481296 | 2021-04-06T01:34:58 | 2021-04-06T01:34:58 | 352,747,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,585 | py | class Transformada:
resultado = []
def __init__(self, matriz):
self.matriz = matriz
self.verMatriz()
def verMatriz(self):
self.arreglo = []
faux = self.matriz.listaFilas.primero
for f in range(self.matriz.listaFilas.cuenta):
caux = faux.primero
fila = []
for c in range(faux.cuenta):
fila.append({'dato': caux.dato, 'x': caux.x, 'y': caux.y})
caux = caux.siguiente
self.arreglo.append(fila)
faux = faux.siguiente
def rotacionHorizontal(self):
nuevaMatriz = []
for fila in self.arreglo:
nuevaMatriz.append([])
i = len(nuevaMatriz) - 1
for fila in self.arreglo:
nuevaMatriz[i] = fila
i-=1
self.resultado = nuevaMatriz
def rotacionVertical(self):
nuevaMatriz = []
for fila in self.arreglo:
arr = []
for celda in fila:
arr.append({})
nuevaMatriz.append(arr)
f = 0
for fila in self.arreglo:
l = len(fila) - 1
for celda in fila:
nuevaMatriz[f][l] = celda
l-=1
f+=1
self.resultado = nuevaMatriz
def transpuesta(self):
columnas = []
for c in range(len(self.arreglo[0])):
columnas.append([])
for f in self.arreglo:
for c in f:
columnas[c['x'] - 1].append(c)
nuevaMatriz = []
for fila in self.arreglo:
nuevaMatriz.append([])
f = 0
for fila in columnas:
nuevaMatriz[f] = fila
f+=1
self.resultado = nuevaMatriz
def limpiarZona(self, coordenadas):
for y in range(int(coordenadas['y1'])-1, int(coordenadas['y2'])):
for x in range(int(coordenadas['x1'])-1, int(coordenadas['x2'])):
self.arreglo[y][x]['dato'] = '-'
self.resultado = self.arreglo
def agregarHorizontal(self, datos):
for dato in self.arreglo[int(datos['fila'])-1]:
if int(datos['x1']) <= dato['x'] <= int(datos['x2']):
dato['dato'] = '*'
self.resultado = self.arreglo
def agregarVertical(self, datos):
f = 1
for fila in self.arreglo:
if int(datos['y1']) <= f <= int(datos['y2']):
for celda in fila:
if celda['x'] == int(datos['columna']):
celda['dato'] = '*'
f+=1
self.resultado = self.arreglo
def agregarRectangulo(self, datos):
limy = datos['y'] + datos['filas']
limx = datos['x'] + datos['columnas']
celdas = []
for y in range(datos['y'], limy):
for x in range(datos['x'], limx):
celdas.append({'x': x-1, 'y': y-1})
for c in celdas:
self.arreglo[c['y']][c['x']]['dato'] = '*'
self.resultado = self.arreglo
def agregarTriangulo(self, datos):
limy = datos['y'] + datos['filas']
limx = datos['x'] + datos['columnas']
celdas = []
for y in range(datos['y'], limy):
for x in range(datos['x'], limx):
celdas.append({'x': x-1, 'y': y-1})
c = 1
triangulo = []
for x in range(datos['filas']):
fila = []
for m in range(c):
fila.append({})
triangulo.append(fila)
c+=1
self.resultado = self.arreglo
| [
"[email protected]"
] | |
b9a1445acb587e66c1aae0c2b015dd133b28be69 | 810d3856f8c1bb5364ea20917690a8f3b5e83e66 | /other/实验报告/NCM/three_spline_interpolation.py | 45541a7b6a9066d508a0503beb8f992a27f0b8b0 | [] | no_license | Saber-f/code | 6c36bff79a6590bf8eb4f740cf5c403d31ddb17e | 9e8646a320d91c0019493854f879bd6aefa6521d | refs/heads/master | 2021-07-15T21:53:54.440183 | 2019-05-20T07:18:37 | 2019-05-20T07:18:37 | 129,101,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,677 | py | '''三次样条插值,矩阵解'''
from numpy import *
import numpy as np
import matplotlib.pyplot as plt
# 将字符列表转换为浮点数列表
def str2float(L):
i = 0;
while i < len(L):
L[i] = float(L[i])
i += 1
# 得到m x n 的零矩阵
def get_mat(m,n):
A = []
i = 0
while i < m:
j = 0
A.append([])
while j < n:
A[i].append(0)
j += 1
i += 1
A = mat(A)
return A
# 加入f(xi)=yi
def F1(A, B, X, Y):
i = 1
while i < len(X):
A[2*i-2,4*i-4] = X[i-1]**3
A[2*i-2,4*i-3] = X[i-1]**2
A[2*i-2,4*i-2] = X[i-1]
A[2*i-2,4*i-1] = 1
B[2*i-2,0] = Y[i-1]
A[2*i-1,4*i-4] = X[i]**3
A[2*i-1,4*i-3] = X[i]**2
A[2*i-1,4*i-2] = X[i]
A[2*i-1,4*i-1] = 1
B[2*i-1,0] = Y[i]
i += 1
# 加入边界条件一二阶导数相等条件
def F2(A,X):
i = 2*(len(X)-1)
j = 1
while j < len(X) - 1:
A[i,4*j-4] = 3*X[j]**2
A[i,4*j-3] = 2*X[j]
A[i,4*j-2] = 1
A[i,4*j] = -3*X[j]**2
A[i,4*j+1] = -2*X[j]
A[i,4*j+2] = -1
A[i+len(X)-2,4*j-4] = 6 * X[j]
A[i+len(X)-2,4*j-3] = 2
A[i+len(X)-2,4*j] = -6 * X[j]
A[i+len(X)-2,4*j+1] = -2
j += 1
i += 1
# 加入边界条件
def F3(A, B, X):
print('边界条件::')
f10 = input('f\'('+str(X[0])+') = ')
f20 = input('f\'\'('+str(X[0])+') = ')
f1n = input('f\'('+str(X[len(X)-1])+') = ')
f2n = input('f\'\'('+str(X[len(X)-1])+') = ')
i = 1;
if f10 != '':
A[len(A)-i,0] = 3*X[0]**2
A[len(A)-i,1] = 2*X[0]
A[len(A)-i,2] = 1
B[len(A)-i,0] = f10
i += 1
if f1n != '':
A[len(A)-i,len(A)-4] = 3*X[len(X)-1]**2
A[len(A)-i,len(A)-3] = 2*X[len(X)-1]
A[len(A)-i,len(A)-2] = 1
B[len(A)-i,0] = f1n
i += 1
if f20 != '' and i < 3:
A[len(A)-i,0] = 6*X[0]
A[len(A)-i,1] = 2
B[len(A)-i,0] = f20
i += 1
if f2n != '' and i < 3:
A[len(A)-i,len(A)-4] = 6*X[len(X)-1]
A[len(A)-i,len(A)-3] = 2
B[len(A)-i,0] = f2n
i += 1
# 得到答案(系数矩阵)
def get_ans(A, B):
temp = A.I*B;
ANS = []
i = 0
while i < len(B) / 4:
ANS.append([]);
ANS[i].append(temp[i*4,0])
ANS[i].append(temp[i*4+1,0])
ANS[i].append(temp[i*4+2,0])
ANS[i].append(temp[i*4+3,0])
i += 1
return mat(ANS)
# 画图
def plot_ans(A,X):
i = 1
while i < len(X):
px = np.arange(min(X[i-1],X[i]),max(X[i-1],X[i])+abs(X[i]-X[i-1])/1000,abs(X[i]-X[i-1])/100)
j = 0
py = []
while j < len(px):
py.append(A[i-1,0]*px[j]**3+A[i-1,1]*px[j]**2+A[i-1,2]*px[j]+A[i-1,3])
j += 1
plt.plot(px,py)
i += 1
plt.show()
# 求导
def deriva(ANS):
i = 0
while i < len(ANS):
j = 3
while j > 0:
ANS[i,j] = ANS[i,j-1]*(4-j)
j -= 1
ANS[i,0] = 0;
i += 1
while True:
tX = input('请输入x::').split()
tY = input('请输入y::').split()
if len(tX) == len(tY):
s = np.argsort(tX)
X = []
Y = []
for t in s:
X.append(tX[t])
Y.append(tY[t])
X = tX
Y = tY
break
str2float(X)
str2float(Y)
A = get_mat(4*(len(X)-1),4*(len(X)-1))
B = get_mat(4*(len(X)-1),1)
F1(A, B, X, Y)
F2(A, X)
F3(A, B, X)
ANS = get_ans(A, B)
print(ANS)
plt.scatter(X,Y,marker = 'x')
plot_ans(ANS,X)
deriva(ANS)
plot_ans(ANS,X)
deriva(ANS)
plot_ans(ANS,X)
deriva(ANS)
plot_ans(ANS,X)
| [
"[email protected]"
] | |
9b655a2950c9d31a015a40bfc9dd87f0b5641102 | 9c47be5ddab07802e5812b8f1a2f6830b9323f10 | /untitled/modules/arr_procedures.py | eef9193968c70b9d41514f0724e83fe31f4609c7 | [] | no_license | hotriluc/pyth_diploma | db2c8d41dd20f69ac0028f8eb6dc08ab8efb9202 | e2a92802487cd4d67d7ab05f3f19254e08ed746b | refs/heads/master | 2021-07-10T06:05:34.582939 | 2020-10-30T20:24:04 | 2020-10-30T20:24:04 | 210,030,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,223 | py | import copy
import itertools
import pickle
import numpy as np
from modules.calculations import coprimes, printFullStat, getPair, getMax
#==============================BASIC MANIPULATION WITH ARRAYS==============================
# write string
def writeListInFile(aList:list,filepath):
f = open(filepath,"w")
for i in range(0,len(aList)):
f.write(str(i)+") "+str(aList[i])+"\n")
f.close()
# write binary mode
def writeListInBinFile(aList:list,file_name):
with open(file_name, 'wb') as F:
# Dump the list to file
pickle.dump(aList, F)
F.close()
def loadFromBinFile(file_name):
with open(file_name, 'rb') as F:
aList = pickle.load(F)
F.close()
return aList
def print_2d_arr(aList):
for row in aList:
for item in row:
print("{0:5d}".format(item), end="")
print("")
# print list of list(signals)
def print_sig_in_list(aList):
i = 0
for item in aList:
print(i, ") ", item)
i += 1
# using simple pop and insert we created Shifting list with zeroing
def ShiftRight(aList: list, steps: int):
# for negative steps
if steps < 0:
steps = abs(steps)
for i in range(steps):
# pop first element
# everything is shifted to the left after we pop
aList.pop(0)
# adding to the end 0
aList.append(0)
else:
for i in range(steps):
# insert zero to the 0 position
aList.insert(0, 0)
# poping last el
# everything is shifted to right
aList.pop()
def CyclicShiftRight(aList: list, steps: int):
# for negative steps
if steps < 0:
steps = abs(steps)
for i in range(steps):
# adding to the end popped 0th el
aList.append(aList.pop(0))
else:
for i in range(steps):
# adding to the beginning popped(last) el
aList.insert(0, aList.pop())
# def ShiftRight(aList:list,pos:int):
# return aList[pos:len(aList):]*0 + aList[0:pos:]
# for i in range(1,len(aList)-1):
#============================== ADVANCED MANIPULATION WITH ARRAYS(SIGNALS==============================
#============================== DECIMATION==============================
def decimation(a_List: list, b_List: list, d: int):
for i in range(0, len(a_List)):
pos = (d + d * i) % len(a_List)
b_List[i] = a_List[pos]
# Getting all signals created with decimation
# (number of signals = Euler totient function of signal length)
def getDecimation(source_signal):
#getting source singal length
sig_len = len(source_signal)
# Copy source signal (in order to not interfere source signal)
sig1_ = copy.deepcopy(source_signal)
# Creating list for storing signals
decimation_list = list()
#Geting coprimes of sign len e.g len= 256 coprimes = [1,3,5,7 ... 255]
# index 1 because of coprimes method return tuple of 2element
# (the number of total coprimes, and list of coprimes)
#
coprime_list = coprimes(sig_len)[1]
# For each coprime of source signal length we will create a signal using decimation
for i in range(len(coprime_list)):
# tmp for signal we are going to get with decimation
sig2_ = [0 for i in range(sig_len)]
# creating rest signals with decimation
decimation(sig1_, sig2_, coprime_list[i])
# appending decimation_list with list
# that contains decimated sig and
# decimation coefficient that used to create that sig
decimation_list.append([sig2_, coprime_list[i]])
return decimation_list
# decimation list consist lists that have elements signals and its decimtion coefficient
def get_decimated_signals(decimation_list):
only_decimated_signals = list()
for i in range(0, len(decimation_list)):
only_decimated_signals.append(decimation_list[i][0])
return only_decimated_signals
#============================== DERIVATIVE SIGNALS FORMATION==============================
# ansambles must contain same number of signals
# USING WITH HADAMAR DISCRETE SIGNALS in order to get derivative signals
def derivativeSig(ansamble_sig1: list, ansamble_sig2: list):
der_sig_list = []
for i in range(0, len(ansamble_sig1)):
tmp = np.array(ansamble_sig1[i]) * np.array(ansamble_sig2[i])
der_sig_list.append(tmp.tolist())
return der_sig_list
def derivativeSigALL(ansamble_sig1: list, ansamble_sig2: list):
der_sig_list = []
sig_comb_list = []
for i in range(0, len(ansamble_sig1)):
for j in range(0, len(ansamble_sig2)):
der_sig_list.append(np.multiply(ansamble_sig1[i], ansamble_sig2[j]).tolist())
sig_comb_list.append((i, j))
return der_sig_list, sig_comb_list
# Same as above but used starting and ending point of inner cycle (from to)
# of course you can used the first mentioned by passing slice of an array(list)
# but if you do that index of your arrays will stat from 0 for slice
# but this from to allows us to identify which exactly hadamar sinal was used to form
# derivative
def derivativeSigFromTo(ansamble_sig1: list, hadamar_sig: list,hadam_from,hadam_to):
der_sig_list = []
sig_comb_list = []
for i in range(0, len(ansamble_sig1)):
for j in range(hadam_from, hadam_to):
der_sig_list.append(np.multiply(ansamble_sig1[i], hadamar_sig[j]).tolist())
sig_comb_list.append((i, j))
return der_sig_list, sig_comb_list
# You should get combinations using function above (derivativeSigFromTo)
#
def print_derivative(dersig, combinations):
for i in range(0, len(dersig)):
print("CS#{0} and HADAMAR#{1}".format(combinations[i][0], combinations[i][1]))
print(dersig[i])
#============================== CORRELATION==============================
def calculate_correlation_coef(sig1_: list, sig2_: list):
R = 0
for i in range(0, len(sig1_)):
tmp = sig1_[i] * sig2_[i]
R += tmp
return R
# source_sig - source signal is NOT SHIFTED shifted_sig - copy of the signal WILL BE SHIFTED ( for pereodic auto
# correlation)/ other signal (for pereodic cross correlation) flag = true for APEREODIC correaltion
# else default for PEREODIC
def getCorellation(source_sig: list, shifted_sig: list, flag: bool = False):
# Creating copy of shifted signal to not trasform array from main program(for further usage)
# because in Python list contains not val but reference to objects
tmp_shifted_sig = copy.deepcopy(shifted_sig)
correl_list = list()
r = calculate_correlation_coef(source_sig, tmp_shifted_sig)
correl_list.append(r)
for i in range(0, len(source_sig)):
if flag == False:
CyclicShiftRight(tmp_shifted_sig, 1)
else:
ShiftRight(tmp_shifted_sig, 1)
r = calculate_correlation_coef(source_sig, tmp_shifted_sig)
correl_list.append(r)
return correl_list
# for i in range(0,len(sig1_)):
# E.g you have ansamble of signals and you want to know all cross-correlation between all possible pairs
# for derivative signals(with HADAMAR)
# ONLY TO GET CROSS CORRELATION
def cross_corel_btwn_pairs(list_with_signals: list, mode_name):
aList = list()
for pair in itertools.combinations(list_with_signals, 2):
a_sig, b_sig = pair
# print(pair)
if mode_name == "PFVK":
r = getCorellation(a_sig, b_sig)
if mode_name == "AFVK":
r = getCorellation(a_sig, b_sig, True)
aList.append(r)
return aList
# the same logic as above function
# but here you passing list pair
# this one without usin itertools
def cross_corel_btwn_pairs2(list_with_signals: list, pair_list: list, mode_name):
aList = list()
for x, y in pair_list:
# print(pair)
if mode_name == "PFVK":
r = getCorellation(list_with_signals[x], list_with_signals[y])
if mode_name == "AFVK":
r = getCorellation(list_with_signals[x], list_with_signals[y], True)
aList.append(r)
return aList
# Having list with signals
# getting list of list with signals' pereodic/apereodic auto correlation function
def auto_corel_all(list_with_signals: list, mode_name):
aList = list()
sig_num_list = list()
for item in list_with_signals:
if mode_name == "PFAK":
r = getCorellation(item, item)
if mode_name == "AFAK":
r = getCorellation(item, item, True)
aList.append(r)
return aList
# исходный сигнал с другими сигналами
# getting list of correlations of all signals
def corel_source_and_rest(source_sig, list_with_signals: list, mode_name):
aList = list()
for item in list_with_signals:
if mode_name == "PFVK":
r = getCorellation(source_sig, item)
if mode_name == "AFVK":
r = getCorellation(source_sig, item, True)
if mode_name == "PFAK":
r = getCorellation(item, item)
if mode_name == "AFAK":
r = getCorellation(item, item, True)
# print(r)
aList.append(r)
return aList
#============================== TESTING CORRELATION==============================
# Used for testing ensemble of signals and printed their statistics
# using closure to keep DRY
def ansamble_correlation(mode):
def fak_stat(ansamble_of_sig):
print(mode)
sig_len = len(ansamble_of_sig[0])
if len(ansamble_of_sig) > 0:
asnsam_fak_list = auto_corel_all(ansamble_of_sig, mode)
printFullStat(asnsam_fak_list, 1, sig_len - 1, True)
printFullStat(asnsam_fak_list, 1, sig_len - 1)
def fvk_stat(ansamble_of_sig):
print(mode)
sig_len = len(ansamble_of_sig[0])
if len(ansamble_of_sig) > 0:
pair_list = getPair([i for i in range(0, len(ansamble_of_sig))])
# cross_correl_btwn_pairs you can use anothe method correl_source_and_rest
fvk_sig_list = cross_corel_btwn_pairs(ansamble_of_sig, mode)
printFullStat(fvk_sig_list, 0, sig_len, True, list_of_num=pair_list)
printFullStat(fvk_sig_list, 0, sig_len, list_of_num=pair_list)
if (mode == 'PFAK') or (mode =='AFAK'):
return fak_stat
elif (mode == 'PFVK') or (mode =='AFVK'):
return fvk_stat
else:
print("Something wrong there is no mode like that")
# FOR 1 SIGNAL
def test_auto_correl (source_sig):
print("Signal: ",source_sig)
start_point = 1
finish_point = len(source_sig)-1
sig1_ = copy.deepcopy(source_sig)
sig2_ = copy.deepcopy(source_sig)
print("PFAK")
pereodic_auto_corel_list = getCorellation(sig1_, sig2_)
print("R = ", pereodic_auto_corel_list)
print("Rmax = ", getMax(pereodic_auto_corel_list, start_point, finish_point , True))
print("AFAK")
apereodic_auto_corel_list = getCorellation(sig1_, sig2_, True)
print("R = ", apereodic_auto_corel_list)
print("Rmax = ", getMax(apereodic_auto_corel_list, start_point, finish_point))
return pereodic_auto_corel_list, apereodic_auto_corel_list
# 1 source singal with each from ansamble
def test_cross_correl (source_sig, ansamble_of_sig):
sig1_ = copy.deepcopy(source_sig)
ansamble_of_pereodic_cross_corel_list = corel_source_and_rest(sig1_, ansamble_of_sig, "PFVK")
print("\nPFVK")
printFullStat(ansamble_of_pereodic_cross_corel_list, 0, len(source_sig), True)
printFullStat(ansamble_of_pereodic_cross_corel_list, 0, len(source_sig))
ansamble_of_apereodic_cross_corel_list = corel_source_and_rest(sig1_, ansamble_of_sig, "AFVK")
print("\nAFVK")
printFullStat(ansamble_of_apereodic_cross_corel_list, 0, len(source_sig), True)
printFullStat(ansamble_of_apereodic_cross_corel_list, 0, len(source_sig)) | [
"[email protected]"
] | |
8e1ea6a797d71d000461aeffa863481bf30c15f0 | f9364a60157f2d4eaaddb053980055d12ecf4c9e | /RL_routing/RL_ACRR.py | 316eccfb49f5728af60cea07e8f7cdc5a8f4ba95 | [
"MIT"
] | permissive | lorenzoviva/Tesi | de038a2d1b69076bc9e9f15b4c34a76d5e6e0c50 | a8a439e32f1d7484ff6b0bf96a0cfe877001640c | refs/heads/master | 2023-07-19T00:22:06.155791 | 2020-03-05T16:52:00 | 2020-03-05T16:52:00 | 245,185,242 | 0 | 0 | null | 2023-07-06T21:47:40 | 2020-03-05T14:31:28 | Python | UTF-8 | Python | false | false | 6,408 | py | import argparse
import gym
import numpy as np
from itertools import count
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
import matplotlib.pyplot as plt
from models import *
# Cart Pole
parser = argparse.ArgumentParser(description='PyTorch actor-critic example')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor (default: 0.99)')
parser.add_argument('--seed', type=int, default=543, metavar='N',
help='random seed (default: 543)')
parser.add_argument('--render', action='store_true',
help='render the environment')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='interval between training status logs (default: 10)')
args = parser.parse_args()
plt.ion()
env = gym.make('CartPole-v0')
env.seed(args.seed)
torch.manual_seed(args.seed)
SavedAction = namedtuple('SavedAction', ['log_prob', 'value'])
class Policy(nn.Module):
"""
implements both actor and critic in one model
"""
def __init__(self):
super(Policy, self).__init__()
self.affine1 = nn.Linear(4, 128)
# actor's layer
self.action_head = nn.Linear(128, 128) # 1
# critic's layer
self.value_head = nn.Linear(128, 32) # 1
self.state_action_routing = nn.Sequential(
Routing(d_cov=1, d_inp=4, d_out=4, n_out=8), #8
Routing(d_cov=1, d_inp=4, d_out=2, n_inp=8, n_out=2),
)
# action & reward buffer
self.saved_actions = []
self.rewards = []
def forward(self, x):
"""
forward of both actor and critic
"""
x = F.relu(self.affine1(x))
# actor: choses action to take from state s_t
# by returning probability of each action
action_caps = self.action_head(x).view(x.shape[0],-1,1,4)
# critic: evaluates being in the state s_t
state_caps = self.value_head(x)
for routing in self.state_action_routing:
state_caps, action_caps, _ = routing(state_caps, action_caps)
routed_state = state_caps.max(1)
state_caps = routed_state[0]
action_caps = action_caps[:,routed_state[1],:,:].squeeze()
action_prob = F.softmax(action_caps, dim=-1)
# return values for both actor and critic as a tupel of 2 values:
# 1. a list with the probability of each action over the action space
# 2. the value from state s_t
return action_prob, state_caps
model = Policy()
optimizer = optim.Adam(model.parameters(), lr=3e-2)
eps = np.finfo(np.float32).eps.item()
def select_action(state):
state = torch.from_numpy(state).float()
probs, state_value = model(state.unsqueeze(0))
# create a categorical distribution over the list of probabilities of actions
m = Categorical(probs)
# and sample an action using the distribution
action = m.sample()
# save to action buffer
model.saved_actions.append(SavedAction(m.log_prob(action), state_value))
# the action to take (left or right)
return action.item()
def finish_episode():
"""
Training code. Calcultes actor and critic loss and performs backprop.
"""
R = 0
saved_actions = model.saved_actions
policy_losses = [] # list to save actor (policy) loss
value_losses = [] # list to save critic (value) loss
returns = [] # list to save the true values
# calculate the true value using rewards returned from the environment
for r in model.rewards[::-1]:
# calculate the discounted value
R = r + args.gamma * R
returns.insert(0, R)
returns = torch.tensor(returns)
returns = (returns - returns.mean()) / (returns.std() + eps)
for (log_prob, value), R in zip(saved_actions, returns):
advantage = R - value.item()
# calculate actor (policy) loss
policy_losses.append(-log_prob * advantage)
# calculate critic (value) loss using L1 smooth loss
value_losses.append(F.smooth_l1_loss(value, torch.tensor([R])))
# reset gradients
optimizer.zero_grad()
# sum up all the values of policy_losses and value_losses
loss = torch.stack(policy_losses).sum() + torch.stack(value_losses).sum()
# perform backprop
loss.backward()
optimizer.step()
# reset rewards and action buffer
del model.rewards[:]
del model.saved_actions[:]
def plot_durations(episode_durations):
plt.figure(2)
plt.clf()
durations_t = torch.tensor(episode_durations, dtype=torch.float)
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(durations_t.numpy())
plt.show()
plt.pause(0.001)
def main():
running_reward = 10
# run inifinitely many episodes
episode_durations = []
for i_episode in count(1):
# reset environment and episode reward
state = env.reset()
ep_reward = 0
state_list = []
# for each episode, only run 9999 steps so that we don't
# infinite loop while learning
for t in range(1, 10000):
state_list.append(state)
# select action from policy
action = select_action(state)
# take the action
state, reward, done, _ = env.step(action)
if args.render:
env.render()
model.rewards.append(reward)
ep_reward += reward
if done:
episode_durations.append(t)
break
# update cumulative reward
running_reward = 0.05 * ep_reward + (1 - 0.05) * running_reward
# perform backprop
finish_episode()
# log results
if i_episode % args.log_interval == 0:
print('Episode {}\tLast reward: {:.2f}\tAverage reward: {:.2f}'.format(
i_episode, ep_reward, running_reward))
plot_durations(episode_durations)
# check if we have "solved" the cart pole problem
if running_reward > env.spec.reward_threshold:
print("Solved! Running reward is now {} and "
"the last episode runs to {} time steps!".format(running_reward, t))
break
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
afec745bb4473e5a96a1af30aa862110d6309a68 | db0633d8ec23937f087bfdccf53afe8d584906c2 | /datasets/harem/harem.py | 68ca465314e3be3910a1c30b445f013349cacf59 | [
"Apache-2.0"
] | permissive | pranavnt/datasets | 8b7d671c9f9a9a28097e361d43494b779dd2117b | f2439a743012055b4ad8d5c1b758658afe246f3e | refs/heads/master | 2023-02-23T01:14:13.066679 | 2021-01-27T02:03:36 | 2021-01-27T02:03:36 | 318,725,185 | 1 | 0 | Apache-2.0 | 2021-01-27T02:03:38 | 2020-12-05T07:05:31 | null | UTF-8 | Python | false | false | 11,684 | py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HAREM dataset"""
from __future__ import absolute_import, division, print_function
import json
import logging
import unicodedata
from typing import List, Tuple
import datasets
_CITATION = """
@inproceedings{santos2006harem,
title={Harem: An advanced ner evaluation contest for portuguese},
author={Santos, Diana and Seco, Nuno and Cardoso, Nuno and Vilela, Rui},
booktitle={quot; In Nicoletta Calzolari; Khalid Choukri; Aldo Gangemi; Bente Maegaard; Joseph Mariani; Jan Odjik; Daniel Tapias (ed) Proceedings of the 5 th International Conference on Language Resources and Evaluation (LREC'2006)(Genoa Italy 22-28 May 2006)},
year={2006}
}
"""
_DESCRIPTION = """
The HAREM is a Portuguese language corpus commonly used for Named Entity Recognition tasks. It includes about 93k words, from 129 different texts,
from several genres, and language varieties. The split of this dataset version follows the division made by [1], where 7% HAREM
documents are the validation set and the miniHAREM corpus (with about 65k words) is the test set. There are two versions of the dataset set,
a version that has a total of 10 different named entity classes (Person, Organization, Location, Value, Date, Title, Thing, Event,
Abstraction, and Other) and a "selective" version with only 5 classes (Person, Organization, Location, Value, and Date).
It's important to note that the original version of the HAREM dataset has 2 levels of NER details, namely "Category" and "Sub-type".
The dataset version processed here ONLY USE the "Category" level of the original dataset.
[1] Souza, Fábio, Rodrigo Nogueira, and Roberto Lotufo. "BERTimbau: Pretrained BERT Models for Brazilian Portuguese." Brazilian Conference on Intelligent Systems. Springer, Cham, 2020.
"""
_HOMEPAGE = "https://www.linguateca.pt/primeiroHAREM/harem_coleccaodourada_en.html"
_LICENSE = ""
_URLs = {
"default": {
"train": "https://raw.githubusercontent.com/neuralmind-ai/portuguese-bert/master/ner_evaluation/data/FirstHAREM-total-train.json",
"dev": "https://raw.githubusercontent.com/neuralmind-ai/portuguese-bert/master/ner_evaluation/data/FirstHAREM-total-dev.json",
"test": "https://raw.githubusercontent.com/neuralmind-ai/portuguese-bert/master/ner_evaluation/data/MiniHAREM-total.json",
},
"selective": {
"train": "https://raw.githubusercontent.com/neuralmind-ai/portuguese-bert/master/ner_evaluation/data/FirstHAREM-selective-train.json",
"dev": "https://raw.githubusercontent.com/neuralmind-ai/portuguese-bert/master/ner_evaluation/data/FirstHAREM-selective-dev.json",
"test": "https://raw.githubusercontent.com/neuralmind-ai/portuguese-bert/master/ner_evaluation/data/MiniHAREM-selective.json",
},
}
# method extracted from https://github.com/huggingface/transformers/blob/master/src/transformers/tokenization_utils.py#L77-L89
def _is_punctuation(char):
"""Checks whether `char` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
# method extracted from https://github.com/huggingface/transformers/blob/master/src/transformers/tokenization_utils.py#L53-L62
def _is_whitespace(char):
"""Checks whether `char` is a whitespace character."""
# \t, \n, and \r are technically control characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
class Token:
"""Info about a single token."""
def __init__(self, text: str, tail: str = ""):
if not isinstance(text, str) or not text:
raise TypeError("text should be a non-empty string.")
self.text = text
self.tail = tail
def __len__(self):
return len(self.text) + len(self.tail)
def __add__(self, char):
self.text += char
return self
def reconstruct_text_from_tokens(tokens: List[Token], include_last_tail: bool = False) -> str:
"""Concatenates the text of a sequence of tokens."""
def text_generator(tokens):
for i, token in enumerate(tokens):
yield token.text
if i < len(tokens) - 1 or include_last_tail:
yield token.tail
return "".join(piece for piece in text_generator(tokens))
def tokenize(text: str) -> Tuple[List[Token], List[int]]:
""" Perform whitespace and punctuation tokenization keeping track of char alignment"""
doc_tokens = []
char_to_word_offset = []
new_word = True
curr_token = None
def begin_new_token(doc_tokens, text):
token = Token(text=text)
doc_tokens.append(token)
return token
for offset, c in enumerate(text):
if _is_whitespace(c):
new_word = True
if curr_token:
curr_token.tail += c
else:
if _is_punctuation(c):
curr_token = begin_new_token(doc_tokens, c)
new_word = True
else:
if new_word:
curr_token = begin_new_token(doc_tokens, c)
else:
curr_token += c
new_word = False
# OBS: Whitespaces that appear before any tokens will have offset -1
# char_to_word_offset.append(len(doc_tokens) - 1)
char_to_word_offset.append(max(0, len(doc_tokens) - 1))
return doc_tokens, char_to_word_offset
class HAREM(datasets.GeneratorBasedBuilder):
"""HAREM dataset."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="default",
version=VERSION,
description="All the tags (PESSOA, ORGANIZACAO, LOCAL, TEMPO, VALOR, ABSTRACCAO, ACONTECIMENTO, COISA, OBRA, OUTRO) will be used",
),
datasets.BuilderConfig(
name="selective",
version=VERSION,
description="Only a subset of the tags (PESSOA, ORGANIZACAO, LOCAL, TEMPO, VALOR) will be used",
),
]
DEFAULT_CONFIG_NAME = "default"
def _info(self):
tags = [
"O",
"B-PESSOA",
"I-PESSOA",
"B-ORGANIZACAO",
"I-ORGANIZACAO",
"B-LOCAL",
"I-LOCAL",
"B-TEMPO",
"I-TEMPO",
"B-VALOR",
"I-VALOR",
]
if self.config.name == "default":
tags += [
"B-ABSTRACCAO",
"I-ABSTRACCAO",
"B-ACONTECIMENTO",
"I-ACONTECIMENTO",
"B-COISA",
"I-COISA",
"B-OBRA",
"I-OBRA",
"B-OUTRO",
"I-OUTRO",
]
features = datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(datasets.features.ClassLabel(names=tags)),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
my_urls = _URLs[self.config.name]
data_dir = dl_manager.download_and_extract(my_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": data_dir["train"], "split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": data_dir["test"], "split": "test"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": data_dir["dev"], "split": "dev"},
),
]
def _generate_examples(self, filepath, split):
""" Yields examples. """
logging.info("⏳ Generating examples from = %s", filepath)
with open(filepath, "r", encoding="utf-8") as f:
input_data = json.load(f)
id_ = 0
for document in input_data:
doc_text = document["doc_text"]
doc_id = document["doc_id"]
doc_tokens, char_to_word_offset = tokenize(doc_text)
tags = ["O"] * len(doc_tokens)
def set_label(index, tag):
if tags[index] != "O":
logging.warning(
"Overwriting tag %s at position %s to %s",
tags[index],
index,
tag,
)
tags[index] = tag
for entity in document["entities"]:
entity_text = entity["text"]
entity_type = entity["label"]
start_token = None
end_token = None
entity_start_offset = entity["start_offset"]
entity_end_offset = entity["end_offset"]
start_token = char_to_word_offset[entity_start_offset]
# end_offset is NOT inclusive to the text, e.g.,
# entity_text == doc_text[start_offset:end_offset]
end_token = char_to_word_offset[entity_end_offset - 1]
assert start_token <= end_token, "End token cannot come before start token."
reconstructed_text = reconstruct_text_from_tokens(doc_tokens[start_token : (end_token + 1)])
assert (
entity_text.strip() == reconstructed_text
), "Entity text and reconstructed text are not equal: %s != %s" % (
entity_text,
reconstructed_text,
)
for token_index in range(start_token, end_token + 1):
if token_index == start_token:
tag = "B-" + entity_type
else:
tag = "I-" + entity_type
set_label(token_index, tag)
yield id_, {
"id": doc_id,
"tokens": [x.text for x in doc_tokens],
"ner_tags": tags,
}
id_ += 1
| [
"[email protected]"
] | |
b31c59a0010e457b6542772a973b3e6da56bfc29 | 9cfd73a998d842d767071b26cefe0eb8efe39e90 | /learning_rates.py | 3b8df2efc6fd82922d321b26d58e4bf9e17144c4 | [] | no_license | boyko11/LogReg-DLAI | 829e9b4e6b8dd23d6f3b5f0f68550d83c080104d | d222f6501ec4f0ea427f42706bb98c28c832fdb8 | refs/heads/master | 2022-11-23T19:26:10.052482 | 2020-08-02T17:57:24 | 2020-08-02T17:57:24 | 284,513,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | from logistic_regression import model
import data_service
import matplotlib.pyplot as plt
import numpy as np
train_set_x, train_set_y, test_set_x, test_set_y, _ = data_service.load_and_preprocess_data()
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print ("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations (hundreds)')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
| [
"[email protected]"
] |
Subsets and Splits