blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
11674a3f0d3e56f5156c92dbc2833e200511d2f2
|
a38180435ac5786185c0aa48891c0aed0ab9d72b
|
/S4/S4 Library/simulation/situations/complex/single_job_situation.py
|
567ee8821eebf3d57e66f878dfe22edeb8aac7d7
|
[
"CC-BY-4.0"
] |
permissive
|
NeonOcean/Environment
|
e190b6b09dd5dbecba0a38c497c01f84c6f9dc7d
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
refs/heads/master
| 2022-12-03T13:17:00.100440 | 2021-01-09T23:26:55 | 2021-01-09T23:26:55 | 178,096,522 | 1 | 1 |
CC-BY-4.0
| 2022-11-22T20:24:59 | 2019-03-28T00:38:17 |
Python
|
UTF-8
|
Python
| false | false | 1,366 |
py
|
from role.role_state import RoleState
from sims4.tuning.tunable import TunableTuple
from situations.situation import Situation
from situations.situation_complex import SituationComplexCommon, SituationState, SituationStateData
from situations.situation_job import SituationJob
class SingleJobSituation(SituationComplexCommon):
INSTANCE_TUNABLES = {'job': TunableTuple(description='\n The job and role which the career Sim is placed into.\n ', situation_job=SituationJob.TunableReference(description='\n A reference to a SituationJob that can be performed at this Situation.\n '), role_state=RoleState.TunableReference(description='\n A role state the Sim assigned to the job will perform.\n '))}
REMOVE_INSTANCE_TUNABLES = Situation.NON_USER_FACING_REMOVE_INSTANCE_TUNABLES
@classmethod
def _states(cls):
return (SituationStateData(1, SingleJobSituationState),)
@classmethod
def _get_tuned_job_and_default_role_state_tuples(cls):
return [(cls.job.situation_job, cls.job.role_state)]
@classmethod
def default_job(cls):
return cls.job.situation_job
def start_situation(self):
super().start_situation()
self._change_state(SingleJobSituationState())
class SingleJobSituationState(SituationState):
pass
|
[
"[email protected]"
] | |
c88bbd34f0f67cb174f84f0b4cff4aa4f6cd855c
|
3969f8402eaa015eb850e041e3dede4978ab9a4c
|
/pkg/eventlet-0.12.1/tests/patcher_psycopg_test.py
|
80988e51fdde3eb2a4aa7f68b40ee7bb7f24f738
|
[
"MIT"
] |
permissive
|
seewindcn/pycocos2d
|
e333bf8ae29d8244e6540ed3d39d76d4002e2908
|
b88c8c5df127f9bf82f62c8b4365f4babcdee105
|
refs/heads/master
| 2023-03-07T10:07:47.167364 | 2013-06-03T10:45:19 | 2013-06-03T10:45:19 | 9,958,133 | 18 | 7 | null | 2013-05-14T03:57:47 | 2013-05-09T11:43:46 |
C
|
UTF-8
|
Python
| false | false | 1,811 |
py
|
import os
from tests import patcher_test, skip_unless
from tests import get_database_auth
from tests.db_pool_test import postgres_requirement
psycopg_test_file = """
import os
import sys
import eventlet
eventlet.monkey_patch()
from eventlet import patcher
if not patcher.is_monkey_patched('psycopg'):
print "Psycopg not monkeypatched"
sys.exit(0)
count = [0]
def tick(totalseconds, persecond):
for i in xrange(totalseconds*persecond):
count[0] += 1
eventlet.sleep(1.0/persecond)
dsn = os.environ['PSYCOPG_TEST_DSN']
import psycopg2
def fetch(num, secs):
conn = psycopg2.connect(dsn)
cur = conn.cursor()
for i in range(num):
cur.execute("select pg_sleep(%s)", (secs,))
f = eventlet.spawn(fetch, 2, 1)
t = eventlet.spawn(tick, 2, 100)
f.wait()
assert count[0] > 100, count[0]
print "done"
"""
class PatchingPsycopg(patcher_test.ProcessBase):
@skip_unless(postgres_requirement)
def test_psycopg_patched(self):
if 'PSYCOPG_TEST_DSN' not in os.environ:
# construct a non-json dsn for the subprocess
psycopg_auth = get_database_auth()['psycopg2']
if isinstance(psycopg_auth,str):
dsn = psycopg_auth
else:
dsn = " ".join(["%s=%s" % (k,v) for k,v, in psycopg_auth.iteritems()])
os.environ['PSYCOPG_TEST_DSN'] = dsn
self.write_to_tempfile("psycopg_patcher", psycopg_test_file)
output, lines = self.launch_subprocess('psycopg_patcher.py')
if lines[0].startswith('Psycopg not monkeypatched'):
print "Can't test psycopg2 patching; it's not installed."
return
# if there's anything wrong with the test program it'll have a stack trace
self.assert_(lines[0].startswith('done'), output)
|
[
"none@none"
] |
none@none
|
d58ae3c7d5f559290e4ad6aba0e009878635ebe6
|
625daac7e73b98935f9fe93e647eb809b48b712e
|
/Arcade/Intro/adjacentElementsProduct.py
|
07c848e4a7c18445ca0d1d6cf05d6044c620be21
|
[] |
no_license
|
aleksaa01/codefights-codesignal
|
19b2d70779cc60f62511b6f88ae5d049451eac82
|
a57a5589ab2c9d9580ef44900ea986c826b23051
|
refs/heads/master
| 2022-03-15T04:46:40.356440 | 2019-12-08T15:41:37 | 2019-12-08T15:41:37 | 112,034,380 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 473 |
py
|
def adjacentElementsProduct(arr):
max_pair = arr[0]*arr[1]
for i in range(1, len(arr)-1):
if arr[i]*arr[i+1] > max_pair:
max_pair = arr[i]*arr[i+1]
return max_pair
"""
Given an array of integers, find the pair of adjacent elements that has the
largest product and return that product.
Example
For inputArray = [3, 6, -2, -5, 7, 3], the output should be
adjacentElementsProduct(inputArray) = 21.
7 and 3 produce the largest product.
"""
|
[
"[email protected]"
] | |
c6492508982755a4e1e8b20b63f7fa75931cdd05
|
fbd4ecf7046171c4e96267c5982c964db54578f5
|
/business/p201904/110111_2300/server.py
|
0872fb79fab8ceec11b2a306e6bc2a815aee5719
|
[] |
no_license
|
Alvin2580du/alvin_py
|
6dddcfbfae214694e9f3dafd976101e681f2a66d
|
82d3e9808073f2145b039ccf464c526cb85274e3
|
refs/heads/master
| 2021-05-05T16:01:43.544783 | 2019-10-29T02:23:59 | 2019-10-29T02:23:59 | 117,328,713 | 12 | 2 | null | 2021-03-20T00:06:37 | 2018-01-13T08:51:49 |
Python
|
UTF-8
|
Python
| false | false | 3,200 |
py
|
import os.path
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
import sys
from gensim.models.word2vec import Word2Vec
import numpy as np
import jieba
from sklearn.externals import joblib
ports = sys.argv[1]
define("port", default=ports, help="run on the given port", type=int)
# 加载模型
imdb_w2v = Word2Vec.load('w2v_model.pkl')
clf = joblib.load('svm_model.pkl')
# 对每个句子的所有词向量取均值,来生成一个句子的vector
def build_sentence_vector(text, size, imdb_w2v):
vec = np.zeros(size).reshape((1, size))
count = 0.
for word in text:
try:
vec += imdb_w2v[word].reshape((1, size))
count += 1.
except KeyError:
continue
if count != 0:
vec /= count
return vec
# 构建待预测句子的向量
def get_predict_vecs(words, n_dim=300):
train_vecs = build_sentence_vector(words, n_dim, imdb_w2v)
return train_vecs
# 对单个句子进行情感判断
def svm_predict(string):
words = jieba.lcut(string)
words_vecs = get_predict_vecs(words)
result = clf.predict(words_vecs)
if int(result[0]) == 1:
return "positive"
else:
return "negative"
class IndexHandler(tornado.web.RequestHandler):
def get(self):
self.render("index.html")
class UserHandler(tornado.web.RequestHandler):
def post(self):
message = self.get_argument("message")
print("输入的句子是:{}".format(message))
res = svm_predict(message)
self.render("message.html", message="{}的情感极性是:\n{}".format(message, res))
handlers = [
(r"/", IndexHandler),
(r"/user", UserHandler)
]
if __name__ == "__main__":
""" 测试句子
坐牢,怎么可能轻易放过
把携程亲子园所有的老师全部全家处死一个不留
妈呀,光看视频就已经哭的不行,这些人还有没有人性啊,希望法律严惩,给家长们一个交代。
认错已经不是原谅的理由,必须严惩,孩子的伤害是无法弥补的
中国改改法律吧,就是因为他们以前这种幼师犯罪判个一两年就了事,才有这么多人更甚,最少十年以上,严重判死刑,看有几个还敢的
真应该给这些人判死刑啊
真的是心疼到无法呼吸!!!!!啊啊啊啊啊啊妈的比
没有职业道德就不用当幼师,承受不了孩子的吵闹各种调皮就不要当幼师,真的别当幼师,你都没爱心了,何必去当幼师,可怜的孩子遇见你真的是很可怜
打死都不可惜
我也是位母亲,看到这样的视频,真的是很揪心
简直不配做人!简直无法理解!谁招来的这畜生也得负责任吧!不,畜生都比她强!
这种人希望被国家拉黑
"""
template_path = os.path.join(os.path.dirname(__file__), "template")
tornado.options.parse_command_line()
app = tornado.web.Application(handlers, template_path)
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
|
[
"[email protected]"
] | |
f43b801df2a2396b5627c17b19e71a5d8c8eeef8
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_258/ch30_2019_03_10_21_05_56_563616.py
|
c59f95da9501a2311605f0176a60d8e35f2f4a9f
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 255 |
py
|
import math
v=int(input('Qual a velocidade do lançamento? '))
a=int(input('Qual o ângulo do lançamento? '))
d=(v**2)*math.sin(2*math.degrees(a))/9.8
if d<96:
print('Muito perto')
elif d>104:
print('Muito longe')
else:
print('Acertou!')
|
[
"[email protected]"
] | |
1f66c2f28360a924a6ad07d2b8c8af414203518d
|
50f8d8975b1f17c4c6bcb9be29d4f0ed49cb42a5
|
/Week_04/lemonade-change.py
|
5e567df5df1217ba3921dd4a56cf9268dd95ae3f
|
[] |
no_license
|
Jiangjao/-algorithm015
|
098491b7a9b80626c1d9e15a9125e4e460ee8668
|
a6969617f4cde1d948cb064c1078d4d510140758
|
refs/heads/master
| 2023-01-10T17:42:49.495871 | 2020-11-16T07:35:04 | 2020-11-16T07:35:04 | 289,441,446 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 608 |
py
|
class Solution(object):
def lemonadeChange(self, bills):
"""
:type bills: List[int]
:rtype: bool
"""
five = ten = 0
for bill in bills:
if bill == 5:
five += 1
elif bill == 10:
if not five: return False
five -= 1
ten += 1
else:
if ten and five:
ten -= 1
five -= 1
elif five >= 3:
five -= 3
else:
return False
return True
|
[
"[email protected]"
] | |
5f249cf5e48d2382470baa0a978bc3a0abafafc6
|
d2ca1ab6ed63983d1bd6497f26a63f0445451844
|
/2015/05/fc_2015_05_31.py
|
dc41dc9178467c3c8859ffc19ce4fdb301b45b7d
|
[
"MIT"
] |
permissive
|
mfwarren/FreeCoding
|
96636367f4f4a53351535372c5691d7805199f23
|
58ac87f35ad2004a3514782556762ee0ed72c39a
|
refs/heads/master
| 2021-01-19T14:30:09.057354 | 2015-07-05T05:59:53 | 2015-07-05T05:59:53 | 24,469,988 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 444 |
py
|
#!/usr/bin/env python3
# imports go here
#
# Free Coding session for 2015-05-31
# Written by Matt Warren
#
def factors(x):
values = []
cursor = x
i = 2
while i <= cursor:
v = cursor / i
if int(v) == v:
cursor = v
values.append(i)
else:
i += 1
return values
if __name__ == '__main__':
print(factors(302))
print(factors(304))
print(factors(30473456))
|
[
"[email protected]"
] | |
5229abb6be00316ff90cd09e352230cb2bc258fe
|
a2d5681a37be0d3b0753a0e979cb4fa7b0398f32
|
/indexedcorpus.py
|
84aa2ea572d830a6ae74aed8e35b0c416de90ad2
|
[] |
no_license
|
stephenroller/class-nlp-project
|
f7c09281336985ac55d25e886e7aa180e2225580
|
0362ec1182dc6d3ab54990bbb097339e7bc386a0
|
refs/heads/master
| 2020-05-29T23:26:56.024802 | 2011-05-13T18:30:49 | 2011-05-13T18:30:49 | 1,606,152 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,429 |
py
|
#!/usr/bin/evn python
import sqlite3
import os
from itertools import groupby
from util import context_windows
class IndexedCorpus(object):
def __init__(self, indexfile, corpus_directory=''):
self.indexfile = indexfile
self.corpus_directory = corpus_directory
self.conn = sqlite3.connect(indexfile)
def get_unique_words(self):
c = self.conn.cursor()
c.execute('select word from words order by word');
for row in c:
yield row[0]
c.close()
def get_contexts(self, query):
c = self.conn.cursor()
c.execute('''
SELECT F.filename, WA.pos
FROM words AS W
JOIN word_appearances as WA ON (W.id = WA.word)
JOIN filenames AS F ON (WA.file = F.id)
WHERE W.word = ?
ORDER BY WA.file, WA.pos
''',
[query]
)
for filename, positions in groupby(c, lambda x: x[0]):
f = open(os.path.join(self.corpus_directory, filename))
for filename, position in positions:
f.seek(position)
line = f.readline().strip()
yield line
f.close()
c.close()
def __len__(self):
c = self.conn.cursor()
c.execute('select count(*) from words');
for row in c:
count = row[0]
c.close()
return count
|
[
"[email protected]"
] | |
72de8eb136efd770ba9db06215d9ea846c6dd7c9
|
ceb4ac75c40cd53f24d8f7e0a2f763de309bcfdb
|
/main4.py
|
24ac83b28e32c4c98c305c1e1b012cf1ea9f8cf3
|
[] |
no_license
|
kevinelong/bo
|
c706d0771dbbf427a67d240f552eef4b7529b877
|
e08e2d0e07e240cab440733173578f627e0f25ec
|
refs/heads/master
| 2022-11-08T22:18:04.053714 | 2020-07-12T17:17:31 | 2020-07-12T17:17:31 | 279,112,257 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,184 |
py
|
class Coordinate:
def __init__(self, x, y):
self.x = x
self.y = y
class Size:
def __init__(self, width, height):
self.width = width
self.height = height
class Box:
def __init__(self, origin:Coordinate, box_size:Size):
self.origin = origin
self.size = box_size
class Item:
def __init__(self, name:str, location:Box):
self.name = name
self.location = location if location is not None else Box(Coordinate(0,0), Size(3,3))
def __str__(self):
return f"{self.name} {self.location}"
class World:
def __init__(self):
self.item_list = []
self.bounds = Box(Coordinate(-10,-10), Size(20,20))
def value_at(self,x,y):
pixel = Item("",Box(Coordinate(x,y),Size(1,1)))
for item in self.item_list:
if self.have_collided(pixel,item):
return item.name
return "."
def __str__(self):
rows = []
origin = self.bounds.origin
for r in range(0,self.bounds.size.height):
row = []
for c in range(0,self.bounds.size.width):
row.append(self.value_at(c + origin.x, r + origin.y))
rows.append(" ".join(row))
return "\n".join(rows)
def add_item(self, item):
self.item_list.append(item)
def have_collided(self, item1, item2):
if item1.location.origin.x + item1.location.size.width <= item2.location.origin.x:
return False
if item2.location.origin.x + item2.location.size.width <= item1.location.origin.x:
return False
if item1.location.origin.y + item1.location.size.width <= item2.location.origin.y:
return False
if item2.location.origin.y + item2.location.size.width <= item1.location.origin.y:
return False
return True
def get_collisions(self):
collisions = []
for item1 in self.item_list:
for item2 in self.item_list:
if item1 != item2 and (item2,item1) not in collisions:
if self.have_collided(item1,item2):
collisions.append((item1,item2))
return collisions
debugging = True
def log(text):
if debugging:
print(text)
w = World()
add_item = lambda item: w.add_item(item)
get_collisions = lambda : w.get_collisions()
# TESTS
add_item(Item("A",Box(Coordinate(0,0), Size(3,3))))
add_item(Item("B",Box(Coordinate(-3,-3), Size(4,4))))
print(w)
c = get_collisions()
log(c)
assert( len(c) == 1 )
|
[
"[email protected]"
] | |
54b883b64ef60b20fe3d570fc00563c41892ba76
|
0bc8d6abec44e1187499f93803f82514f2b53fc6
|
/Base/BaseReq1.py
|
fa5421d9d89f69e236d3949b433faf8e14ac7258
|
[] |
no_license
|
IamBiJav/auto_http_api
|
932db2b4f2e1b67f2c0760806afd086494d92007
|
5a7ff01845e43d441fef8ae955b056085ab2dd10
|
refs/heads/master
| 2023-03-16T22:20:50.102610 | 2021-03-16T13:41:07 | 2021-03-16T13:41:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,054 |
py
|
import requests
import json
import ast
from Base.BaseElementEnmu import Element
from Base.BaseParams import BaseFuzzParams
from Base.BaseStatistics import writeInfo
class Config(object):
def __init__(self, sessions):
self.sessions = sessions
def config_req(self, kw):
app = {}
header = {"Accept": "*/*", "Content-Type": "application/json;charset=utf-8"}
for item in kw:
url = "%s://%s" % (item["protocol"], item["url"])
print("==请求url:%s" % url)
print("==请求参数:%s" % item["params"])
params = "{}"
if item.get("params"):
params = item.get("params")
if item["method"] == "get":
res = self.sessions.get(url, data=json.dumps(ast.literal_eval(params)), headers=header, verify=False)
elif item["method"] == "post":
res = self.sessions.post(url, data=json.dumps(ast.literal_eval(params)), headers=header, verify=False)
else:
print("现在只针post和ge方法进行了测试,其他方法请自行扩展")
app["url"] = item["url"]
app["method"] = item["method"]
app["params"] = item["params"]
app["code"] = str(res.status_code)
app["msg"] = item["mark"]
app["hope"] = item.get("hope", "")
app["res"] = str(res.text)
app["ress"] = res # 传给检查函数进行解析
print("==响应结果:%s=" % app["res"])
app["result"] = self.__check(app["hope"], app["ress"])
print("==响应码:%s=" % app["code"])
writeInfo(app, Element.INFO_FILE)
def config_req_pict(self, kw, req=None):
app = {}
header = {"Accept": "*/*", "Content-Type": "application/json;charset=utf-8"}
for item in kw:
url = "%s://%s" % (item["protocol"], item["url"])
# 如果有参数才做模糊测试,没有做正向场景测试
if item.get("params"):
print("进行逆向场景测试")
params = BaseFuzzParams().param_fi(ast.literal_eval(item["params"]))
for i in params:
_info = ""
if i.get("info", "null") != "null":
_info = i.get("info", "参数正确")
i.pop("info")
if item["method"] == "get":
res = self.sessions.get(url, data=json.dumps(i), headers=header)
else:
res = self.sessions.post(url, data=json.dumps(i), headers=header)
app["url"] = item["url"]
app["method"] = item["method"]
app["params"] = str(i)
app["code"] = str(res.status_code)
app["msg"] = item["mark"] + "_" + _info
# app["hope"] = item.get("hope", "")
app["hope"] = ""
app["res"] = str(res.text)
app["result"] = ""
print("请求url:%s" % url)
print("请求参数:%s" % app["params"])
print("响应码:%s" % app["code"])
print("响应结果:%s" % app["res"])
writeInfo(app, Element.INFO_FILE)
else:
self.config_req(kw)
def __check(self, hope, res):
resp = json.dumps(json.loads(res.text), separators=(',', ':'))
is_check = 0 # 0表示期望值不存在,没有进行检查;1成功;-1失败
hopes = hope.split("|")
if len(hopes) and len(hope):
is_check = 1
# 循环检查期望值是否在实际值中能找到
for j in hopes:
if resp.find(j) == -1:
is_check = -1
break
if is_check == 0:
return "未检查"
elif is_check == 1:
return "成功"
else:
return "失败"
|
[
"[email protected]"
] | |
9db0cb4a0ab5668893f4ed5fcb8d6a4515118cab
|
9b64f0f04707a3a18968fd8f8a3ace718cd597bc
|
/huaweicloud-sdk-smn/huaweicloudsdksmn/v2/model/list_topics_item.py
|
941aeb67db97af3db8459de64cdaa873c70458bf
|
[
"Apache-2.0"
] |
permissive
|
jaminGH/huaweicloud-sdk-python-v3
|
eeecb3fb0f3396a475995df36d17095038615fba
|
83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b
|
refs/heads/master
| 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,993 |
py
|
# coding: utf-8
import re
import six
class ListTopicsItem:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'topic_urn': 'str',
'name': 'str',
'display_name': 'str',
'push_policy': 'int',
'enterprise_project_id': 'str'
}
attribute_map = {
'topic_urn': 'topic_urn',
'name': 'name',
'display_name': 'display_name',
'push_policy': 'push_policy',
'enterprise_project_id': 'enterprise_project_id'
}
def __init__(self, topic_urn=None, name=None, display_name=None, push_policy=None, enterprise_project_id=None):
"""ListTopicsItem - a model defined in huaweicloud sdk"""
self._topic_urn = None
self._name = None
self._display_name = None
self._push_policy = None
self._enterprise_project_id = None
self.discriminator = None
self.topic_urn = topic_urn
self.name = name
self.display_name = display_name
self.push_policy = push_policy
self.enterprise_project_id = enterprise_project_id
@property
def topic_urn(self):
"""Gets the topic_urn of this ListTopicsItem.
Topic的唯一的资源标识。
:return: The topic_urn of this ListTopicsItem.
:rtype: str
"""
return self._topic_urn
@topic_urn.setter
def topic_urn(self, topic_urn):
"""Sets the topic_urn of this ListTopicsItem.
Topic的唯一的资源标识。
:param topic_urn: The topic_urn of this ListTopicsItem.
:type: str
"""
self._topic_urn = topic_urn
@property
def name(self):
"""Gets the name of this ListTopicsItem.
创建topic的名字。
:return: The name of this ListTopicsItem.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ListTopicsItem.
创建topic的名字。
:param name: The name of this ListTopicsItem.
:type: str
"""
self._name = name
@property
def display_name(self):
"""Gets the display_name of this ListTopicsItem.
Topic的显示名,推送邮件消息时,作为邮件发件人显示。
:return: The display_name of this ListTopicsItem.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this ListTopicsItem.
Topic的显示名,推送邮件消息时,作为邮件发件人显示。
:param display_name: The display_name of this ListTopicsItem.
:type: str
"""
self._display_name = display_name
@property
def push_policy(self):
"""Gets the push_policy of this ListTopicsItem.
消息推送的策略,该属性目前不支持修改,后续将支持修改。0表示发送失败,保留到失败队列,1表示直接丢弃发送失败的消息。
:return: The push_policy of this ListTopicsItem.
:rtype: int
"""
return self._push_policy
@push_policy.setter
def push_policy(self, push_policy):
"""Sets the push_policy of this ListTopicsItem.
消息推送的策略,该属性目前不支持修改,后续将支持修改。0表示发送失败,保留到失败队列,1表示直接丢弃发送失败的消息。
:param push_policy: The push_policy of this ListTopicsItem.
:type: int
"""
self._push_policy = push_policy
@property
def enterprise_project_id(self):
"""Gets the enterprise_project_id of this ListTopicsItem.
企业项目ID。
:return: The enterprise_project_id of this ListTopicsItem.
:rtype: str
"""
return self._enterprise_project_id
@enterprise_project_id.setter
def enterprise_project_id(self, enterprise_project_id):
"""Sets the enterprise_project_id of this ListTopicsItem.
企业项目ID。
:param enterprise_project_id: The enterprise_project_id of this ListTopicsItem.
:type: str
"""
self._enterprise_project_id = enterprise_project_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListTopicsItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
7f7621b29075cba866d4c2b7508de19821719201
|
2c6bc39f2adf3731109519bfaf8a3a24ae913834
|
/admin/admin/settings.py
|
60c38e44516af552aee83c9bf875de446377cff1
|
[] |
no_license
|
aliensmart/django-admin
|
a1289e1a01d64b416f64db1ed435ba23f4c2b8ca
|
0732358e4ace57abbf621df66c75b85219226d07
|
refs/heads/master
| 2022-09-01T15:28:54.664846 | 2020-05-20T20:34:54 | 2020-05-20T20:34:54 | 265,679,957 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,085 |
py
|
"""
Django settings for admin project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y*wb35kj$9zpphxs5r)@*t)mer@+zc#6fol0ho29$#cis8r*ai'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'admin.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'admin.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
07cab7f377e53810bca7f3ea6fd25e8f93c45bf2
|
ae7884af1ec3965b7c0eec22edad6b74f78b7ba6
|
/server/src/uds/core/workers/stats_collector.py
|
23a2506832f5b1b824b8e41f3fa32e05c785c451
|
[] |
no_license
|
glyptodon/openuds
|
f4eefa319a3ead827dad999d24e5ee3854d1345d
|
3908c875d30ec332490fc8c049bb537e10f10d08
|
refs/heads/master
| 2021-07-12T20:58:49.281242 | 2021-03-05T22:42:55 | 2021-03-05T22:42:55 | 62,921,174 | 0 | 1 | null | 2016-07-08T22:33:44 | 2016-07-08T22:33:44 | null |
UTF-8
|
Python
| false | false | 4,456 |
py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013-2020 Virtual Cable S.L.U.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Virtual Cable S.L. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
@author: Adolfo Gómez, dkmaster at dkmon dot com
"""
import logging
import typing
from uds.models import ServicePool, Authenticator
from uds.core.util.state import State
from uds.core.util.stats import counters
from uds.core.managers import statsManager
from uds.core.jobs import Job
logger = logging.getLogger(__name__)
class DeployedServiceStatsCollector(Job):
"""
This Job is responsible for collecting stats for every deployed service every ten minutes
"""
frecuency = 599 # Once every ten minutes, 601 is prime, 599 also is prime
friendly_name = 'Deployed Service Stats'
def run(self):
logger.debug('Starting Deployed service stats collector')
servicePoolsToCheck: typing.Iterable[ServicePool] = ServicePool.objects.filter(
state=State.ACTIVE
).iterator()
for servicePool in servicePoolsToCheck:
try:
fltr = servicePool.assignedUserServices().exclude(
state__in=State.INFO_STATES
)
assigned = fltr.count()
inUse = fltr.filter(in_use=True).count()
counters.addCounter(servicePool, counters.CT_ASSIGNED, assigned)
counters.addCounter(servicePool, counters.CT_INUSE, inUse)
except Exception:
logger.exception(
'Getting counters for service pool %s', servicePool.name
)
for auth in Authenticator.objects.all():
fltr = auth.users.filter(userServices__isnull=False).exclude(
userServices__state__in=State.INFO_STATES
)
users = auth.users.all().count()
users_with_service = fltr.distinct().count()
number_assigned_services = fltr.count()
counters.addCounter(auth, counters.CT_AUTH_USERS, users)
counters.addCounter(
auth, counters.CT_AUTH_SERVICES, number_assigned_services
)
counters.addCounter(
auth, counters.CT_AUTH_USERS_WITH_SERVICES, users_with_service
)
logger.debug('Done Deployed service stats collector')
class StatsCleaner(Job):
"""
This Job is responsible of housekeeping of stats tables.
This is done by:
* Deleting all records
* Optimize table
"""
frecuency = 3600 * 24 * 15 # Ejecuted just once every 15 days
friendly_name = 'Statistic housekeeping'
def run(self):
logger.debug('Starting statistics cleanup')
try:
statsManager().cleanupCounters()
except Exception:
logger.exception('Cleaning up counters')
try:
statsManager().cleanupEvents()
except Exception:
logger.exception('Cleaning up events')
logger.debug('Done statistics cleanup')
|
[
"[email protected]"
] | |
115c7c7c5a07a0ed5e1214fc406d01cf55ee2eef
|
f1267f4a0fae414f16b2429a5c3b1cbd42df8794
|
/lib/Daemon.py
|
dded7072b2770aaa31cf9b096453386af2a21d63
|
[] |
no_license
|
oraant/learn_zabbix_odbm
|
3ff3b0318e802ebff9603c8daefdf67cda772b94
|
35a010b5dc0a8bc2989b4d3618f795b08a637063
|
refs/heads/master
| 2020-12-24T05:46:10.358982 | 2016-03-21T10:25:29 | 2016-03-21T10:25:29 | 73,452,172 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,800 |
py
|
# coding:utf-8
import sys,os
class Daemon:
def __init__(self,stdin='/dev/null',stdout='/dev/null', stderr='dev/null'):
'''初始化,指定标准输入输出文件'''
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
def daemonize(self):
'''Fork当前进程为守护进程,重定向标准文件描述符'''
#Perform first fork.
try:
pid = os.fork()
if pid > 0:
sys.exit(0) #first parent out
except OSError, e:
sys.stderr.write("fork #1 failed: (%d) %s\n" %(e.errno, e.strerror))
sys.exit(1)
#从母体环境脱离,更改路径,更改默认权限,以及创建新的SESSION(为了摆脱控制终端,防止响应原SESSION的sighup,sigint等信号)
os.chdir("/")
os.umask(0)
os.setsid()
#执行第二次fork,防止建立了新SESSION的进程(已成为无终端的会话领导)打开新的终端。
try:
pid = os.fork()
if pid > 0:
sys.exit(0) #second parent out
except OSError, e:
sys.stderr.write("fork #2 failed: (%d) %s]n" %(e.errno,e.strerror))
sys.exit(1)
#进程已经是守护进程了,重定向标准文件描述符
for f in sys.stdout, sys.stderr: f.flush()
si = file(self.stdin, 'r')
so = file(self.stdout,'a+')
se = file(self.stderr,'a+',0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
if __name__ == '__main__':
logfile = sys.argv[1]
d = Daemon('/dev/null',logfile,logfile)
d.daemonize()
while(True):
pass
|
[
"[email protected]"
] | |
f41facc51474c9c8b75bdf9eb8cbff2452c343ac
|
f409f0b5be2bccdc76041a308b28964b00565c2b
|
/untitled/urls.py
|
93f59be44255ae6fefe35db65a6c61417a4d3618
|
[] |
no_license
|
yingliufengpeng/demo_django_blog
|
b9df1e9176ffd66fe9cf6b8fcbad34092aaa8c53
|
27b3e88ebc7e84f8b4d2a8844abd35104bec2bdb
|
refs/heads/master
| 2021-01-17T07:50:52.081607 | 2017-06-26T18:48:56 | 2017-06-26T18:48:56 | 95,317,444 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,704 |
py
|
"""untitled URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.views import static
from demo import views
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r"^uploads/(?P<path>.*)$", static.serve, {"document_root": settings.MEDIA_ROOT}),
url(r"^login/", views.login, name='login'),
url(r"^logout/", views.logout, name='logout'),
url(r"^register/", views.register, name='register'),
url(r"^index/", views.index, name='index'),
url(r"^home/", views.home, name='home'),
url(r"^article/", views.article, name='article'),
url(r"^add_article/", views.add_article, name='add_article'),
url(r"^upload_img/", views.upload_img, name='upload_img'),
url(r"^article_ajax_add/", views.article_ajax_add, name='article_ajax_add'),
url(r"^modify_article/", views.modify_article, name='modify_article'),
url(r"^article_ajax_modify/", views.article_ajax_modify, name='article_ajax_modify'),
url(r"^article_ajax_delete/", views.article_ajax_delete, name='article_ajax_delete'),
]
|
[
"[email protected]"
] | |
c2113be94bd6ef86abbc7380563b0a18cabd088f
|
f45cc0049cd6c3a2b25de0e9bbc80c25c113a356
|
/LeetCode/动态规划法(dp)/背包问题/474. 一和零.py
|
ee1171e6057672507a105886d84a225938f263c0
|
[] |
no_license
|
yiming1012/MyLeetCode
|
4a387d024969bfd1cdccd4f581051a6e4104891a
|
e43ee86c5a8cdb808da09b4b6138e10275abadb5
|
refs/heads/master
| 2023-06-17T06:43:13.854862 | 2021-07-15T08:54:07 | 2021-07-15T08:54:07 | 261,663,876 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,788 |
py
|
"""
474. 一和零
给你一个二进制字符串数组 strs 和两个整数 m 和 n 。
请你找出并返回 strs 的最大子集的大小,该子集中 最多 有 m 个 0 和 n 个 1 。
如果 x 的所有元素也是 y 的元素,集合 x 是集合 y 的 子集 。
示例 1:
输入:strs = ["10", "0001", "111001", "1", "0"], m = 5, n = 3
输出:4
解释:最多有 5 个 0 和 3 个 1 的最大子集是 {"10","0001","1","0"} ,因此答案是 4 。
其他满足题意但较小的子集包括 {"0001","1"} 和 {"10","1","0"} 。{"111001"} 不满足题意,因为它含 4 个 1 ,大于 n 的值 3 。
示例 2:
输入:strs = ["10", "0", "1"], m = 1, n = 1
输出:2
解释:最大的子集是 {"0", "1"} ,所以答案是 2 。
提示:
1 <= strs.length <= 600
1 <= strs[i].length <= 100
strs[i] 仅由 '0' 和 '1' 组成
1 <= m, n <= 100
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/ones-and-zeroes
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
from typing import List
class Solution:
def findMaxForm1(self, strs: List[str], m: int, n: int) -> int:
"""
三维dp
@param strs:
@param m:
@param n:
@return:
"""
size = len(strs)
# dp[i][j][k]:表示前i个字符串构成j个0,k个1的最大子集
dp = [[[0] * (n + 1) for _ in range(m + 1)] for _ in range(size + 1)]
for i in range(1, size + 1):
zero = strs[i - 1].count('0')
one = strs[i - 1].count('1')
for j in range(m + 1):
for k in range(n + 1):
# 这里需要从前往后赋值
dp[i][j][k] = dp[i - 1][j][k]
if j >= zero and k >= one:
dp[i][j][k] = max(dp[i - 1][j][k], dp[i - 1][j - zero][k - one] + 1)
return dp[-1][-1][-1]
def findMaxForm2(self, strs: List[str], m: int, n: int) -> int:
"""
二维
@param strs:
@param m:
@param n:
@return:
"""
size = len(strs)
# dp[i][j][k]:表示前i个字符串构成j个0,k个1的最大子集
dp = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(1, size + 1):
zero = strs[i - 1].count('0')
one = strs[i - 1].count('1')
for j in range(m, zero - 1, -1):
for k in range(n, one - 1, -1):
dp[j][k] = max(dp[j][k], dp[j - zero][k - one] + 1)
return dp[-1][-1]
if __name__ == '__main__':
strs = ["10", "0001", "111001", "1", "0"]
m = 5
n = 3
print(Solution().findMaxForm1(strs, m, n))
print(Solution().findMaxForm2(strs, m, n))
|
[
"[email protected]"
] | |
56f16db5640a5744b67e7f88a950990ad72782a6
|
21b0b4c27193898207751c91b8b2ed168a1b1638
|
/py/py_0383_divisibility_comparison_between_factorials.py
|
6e66399ac758ee89f0245e09912ace51ce300130
|
[
"MIT"
] |
permissive
|
lcsm29/project-euler
|
67560a4e66968f1671a3d7ecf2dda6c956893dca
|
fab794ece5aa7a11fc7c2177f26250f40a5b1447
|
refs/heads/main
| 2023-07-04T11:45:24.374841 | 2021-08-07T08:20:41 | 2021-08-07T08:20:41 | 371,808,781 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 605 |
py
|
# Solution of;
# Project Euler Problem 383: Divisibility comparison between factorials
# https://projecteuler.net/problem=383
#
# Let f5(n) be the largest integer x for which 5x divides n. For example,
# f5(625000) = 7. Let T5(n) be the number of integers i which satisfy
# f5((2·i-1)!) < 2·f5(i!) and 1 ≤ i ≤ n. It can be verified that T5(103) = 68
# and T5(109) = 2408210. Find T5(1018).
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 383
timed.caller(dummy, n, i, prob_id)
|
[
"[email protected]"
] | |
da31943f12cab72657cccbf301ca3e51137991fa
|
6b29d66ba7927129b68bc00db769f0edf1babaea
|
/SoftLayer/CLI/mq/endpoints_list.py
|
179663919c224900057d00eea255084ae140b781
|
[
"MIT"
] |
permissive
|
tdurden82/softlayer-python
|
65f42923c347a164995dfc267829721032de261d
|
0eed20fa4adedd3228d91d929bb8befb1e445e49
|
refs/heads/master
| 2021-01-17T10:01:48.087450 | 2015-10-19T18:38:53 | 2015-10-19T18:38:53 | 46,301,339 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 699 |
py
|
"""List SoftLayer Message Queue Endpoints."""
# :license: MIT, see LICENSE for more details.
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
import click
@click.command()
@environment.pass_env
def cli(env):
"""List SoftLayer Message Queue Endpoints."""
manager = SoftLayer.MessagingManager(env.client)
regions = manager.get_endpoints()
table = formatting.Table(['name', 'public', 'private'])
for region, endpoints in regions.items():
table.add_row([
region,
endpoints.get('public') or formatting.blank(),
endpoints.get('private') or formatting.blank(),
])
env.fout(table)
|
[
"[email protected]"
] | |
92916397d8bf8d6741c6ac3a5ea1959e5458d171
|
4d87e41fa51a3f777512982553b9bf4f32325c2f
|
/Scripts/pip3-script.py
|
7e22278ba12539d9a302792add86e495297ccf05
|
[] |
no_license
|
Leno1993/RecommendSystem
|
75bc8a045abbd83a127133cac80feb3149ce2802
|
c97126126e86dd309804aa7b5da8df62b6491472
|
refs/heads/master
| 2020-05-09T12:59:28.410270 | 2019-03-24T13:53:48 | 2019-03-24T13:53:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 386 |
py
|
#!D:\PycharmWorkSpace\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3')()
)
|
[
"[email protected]"
] | |
6cd99aa856870945383ad551d176b967727db0ba
|
4851d160a423b4a65e81a75d5b4de5218de958ee
|
/Pig Sursurunga.py
|
cd4d4594217fdde6c6d1df1dd137ceb730f8f010
|
[] |
no_license
|
LarisaOvchinnikova/python_codewars
|
519508e5626303dcead5ecb839c6d9b53cb3c764
|
5399f4be17e4972e61be74831703a82ce9badffd
|
refs/heads/master
| 2023-05-05T14:52:02.100435 | 2021-05-25T18:36:51 | 2021-05-25T18:36:51 | 319,399,343 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 655 |
py
|
https://www.codewars.com/kata/5536aba6e4609cc6a600003d
import re
def sursurungal(txt):
arr = re.split(r'(\W+)', txt)
res = []
i = 0
while i < len(arr):
if arr[i].isdigit():
n = int(arr[i])
if n in [0,1]:
res.append(f"{arr[i]} {arr[i+2]}")
else:
word = arr[i+2]
word = word[:-1]
if n == 2: res.append(f"{n} bu{word}")
if 3<=n<=9: res.append(f"{n} {word}zo")
if n>=10: res.append(f"{n} ga{word}ga")
i+=3
else:
res.append(arr[i])
i+=1
return "".join(res)
|
[
"[email protected]"
] | |
8865db146159f578256de4ca7df771ec7049b312
|
d2f4eb41c95e35a21c257554efbaf18a557d4f4a
|
/KneiborsClassfier.py
|
9ebdbadd0a59fb28173de4d19d2b21347c5b7885
|
[
"Apache-2.0"
] |
permissive
|
madcow2021/Insect_Identification
|
1d7fbf5ce4a5d72d4994e5af2078701787eb08b4
|
ae9e30c09f47b343664b3cb18e893fedcd84b335
|
refs/heads/master
| 2022-02-03T22:31:17.108726 | 2019-06-05T01:34:46 | 2019-06-05T01:34:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,355 |
py
|
# coding=utf-8
import pandas as pd
# 创建特征列表
column_names = ['P_rect', 'P_extend', 'P_spherical', 'P_leaf', 'P_circle', 'Species']
# column_names = ['P_rect', 'P_extend', 'P_spherical', 'P_leaf', 'P_circle','P_complecate', 'Species']
data = pd.read_csv('data/data.csv', names=column_names)
# print data.shape
# 这个功能快要被抛弃了,分割训练和测试集
from sklearn.cross_validation import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(data[column_names[0:5]], data[column_names[5]], test_size=0.25,
random_state=33)
# print Y_train.value_counts()
# print Y_test.value_counts()
# 数据整理,但是整形的,需要注意
# from sklearn.preprocessing import StandardScaler
# ss = StandardScaler()
# X_train = ss.fit_transform(X_train)
# X_test = ss.transform(X_test)
from sklearn.neighbors import KNeighborsClassifier
knc = KNeighborsClassifier()
knc.fit(X_train, Y_train)
knc_y_predict = knc.predict(X_test)
from sklearn.metrics import classification_report
print "LR 精确度:" + str(knc.score(X_test, Y_test))
print classification_report(Y_test, knc_y_predict, target_names=[ 'fly','wo','jingui','zhang','zhizhu'])
# 保存训练结果,供后面直接使用
from sklearn.externals import joblib
joblib.dump(knc,'model/knc.model')
|
[
"[email protected]"
] | |
cea43589a7bb31e1bf0c658d9ea1813573b2e2bc
|
ab67bf011764b6c0b6803cd44985a5a2ad3f2593
|
/udpsocket.py
|
2b222871dc47eb1b8e436bd7d76fd4d52cdb877e
|
[] |
no_license
|
pr0logas/streamSockets
|
cba0616ead596bf331eda4f54b6112a212e462fc
|
3f759509dfcb556d3b6a25f11c9f512fb7be430b
|
refs/heads/master
| 2022-11-25T06:09:17.503818 | 2020-07-27T13:53:15 | 2020-07-27T13:53:15 | 285,097,509 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,970 |
py
|
import socket
import os, sys, time
from time import sleep
MCAST_GRP = '10.10.10.10'
MCAST_PORT = 9004
MULTICAST_TTL = 10
bytes_size_to_process = 1024
time_between_data_seconds = 5
time_between_packets_float = 0.0055
def startSocket():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, MULTICAST_TTL)
def sendDataOverSocket(data, sleeptime):
if data:
bytes_size_to_process = sys.getsizeof(data)
#print("Serving UDP multicast data to: " + str(MCAST_GRP) + ":" + str(MCAST_PORT) + " " +
# str(bytes_size_to_process) + " bytes" +
# " (file size: " + str(os.stat('channels/currentFile.ts').st_size) + ")")
s.sendto(data, (MCAST_GRP, MCAST_PORT))
sleep(sleeptime)
def adjustTimeForNewData(start, end, sleeptime):
result = (time_between_data_seconds - (end-start))
if result < 0:
print("No sleep needed we are {} seconds late to stream the data!".format(result) + " Next sleep: " + str(sleeptime))
else:
print("Sleeping for {} Waiting for next data...".format(result) + " Next sleep: " + str(sleeptime))
while True:
starttime = time.time()
with open("channels/currentFile.ts", "rb", buffering=1) as f:
byte = f.read(bytes_size_to_process)
expectedPackets = os.stat('channels/currentFile.ts').st_size / bytes_size_to_process
print(expectedPackets)
sleepTime = (time_between_data_seconds / expectedPackets) - 0.000120256
sendDataOverSocket(byte, sleepTime)
while byte:
byte = f.read(bytes_size_to_process)
sendDataOverSocket(byte, sleepTime)
f.close()
endtime = time.time()
adjustTimeForNewData(starttime, endtime, sleepTime)
#sleep(time_between_packets_float)
|
[
"[email protected]"
] | |
4ca7dd8882f263f5749f1eecebddf59f13b12871
|
0969f7c85e5ae0a19982077d6bb702c41b2b1e1f
|
/nets/mobilenet/mobilenet_v2.py
|
02f5fa0510270fecc4ea3bd20c7f4da25bad20b1
|
[
"MIT"
] |
permissive
|
353622088/tianchi
|
544e49bb6720c4978188cdbddd88a0ebe9f5669c
|
e1f378e5fd783eb4cfbfaf8ecdd944b8fcfdd733
|
refs/heads/master
| 2020-04-19T09:06:35.946147 | 2019-01-30T09:30:05 | 2019-01-30T09:30:05 | 168,099,897 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,434 |
py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Mobilenet V2.
Architecture: https://arxiv.org/abs/1801.04381
The base model gives 72.2% accuracy on ImageNet, with 300MMadds,
3.4 M parameters.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import tensorflow as tf
from nets.mobilenet import conv_blocks as ops
from nets.mobilenet import mobilenet as lib
import functools
slim = tf.contrib.slim
op = lib.op
expand_input = ops.expand_input_by_factor
# pyformat: disable
# Architecture: https://arxiv.org/abs/1801.04381
V2_DEF = dict(
defaults={
# Note: these parameters of batch norm affect the architecture
# that's why they are here and not in training_scope.
(slim.batch_norm,): {'center': True, 'scale': True},
(slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
'normalizer_fn': slim.batch_norm, 'activation_fn': tf.nn.relu6
},
(ops.expanded_conv,): {
'expansion_size': expand_input(6),
'split_expansion': 1,
'normalizer_fn': slim.batch_norm,
'residual': True
},
(slim.conv2d, slim.separable_conv2d): {'padding': 'SAME'}
},
spec=[
op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3]),
op(ops.expanded_conv,
expansion_size=expand_input(1, divisible_by=1),
num_outputs=16),
op(ops.expanded_conv, stride=2, num_outputs=24),
op(ops.expanded_conv, stride=1, num_outputs=24),
op(ops.expanded_conv, stride=2, num_outputs=32),
op(ops.expanded_conv, stride=1, num_outputs=32),
op(ops.expanded_conv, stride=1, num_outputs=32),
op(ops.expanded_conv, stride=2, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=2, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=320),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1280)
],
)
# pyformat: enable
@slim.add_arg_scope
def mobilenet(input_tensor,
num_classes=1001,
depth_multiplier=1.0,
scope='MobilenetV2',
conv_defs=None,
finegrain_classification_mode=False,
min_depth=None,
divisible_by=None,
**kwargs):
"""Creates mobilenet V2 network.
Inference mode is created by default. To create training use training_scope
below.
with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
Args:
input_tensor: The input tensor
num_classes: number of classes
depth_multiplier: The multiplier applied to scale number of
channels in each layer. Note: this is called depth multiplier in the
paper but the name is kept for consistency with slim's model builder.
scope: Scope of the operator
conv_defs: Allows to override default conv def.
finegrain_classification_mode: When set to True, the model
will keep the last layer large even for small multipliers. Following
https://arxiv.org/abs/1801.04381
suggests that it improves performance for ImageNet-type of problems.
*Note* ignored if final_endpoint makes the builder exit earlier.
min_depth: If provided, will ensure that all layers will have that
many channels after application of depth multiplier.
divisible_by: If provided will ensure that all layers # channels
will be divisible by this number.
**kwargs: passed directly to mobilenet.mobilenet:
prediction_fn- what prediction function to use.
reuse-: whether to reuse variables (if reuse set to true, scope
must be given).
Returns:
logits/endpoints pair
Raises:
ValueError: On invalid arguments
"""
if conv_defs is None:
conv_defs = V2_DEF
if 'multiplier' in kwargs:
raise ValueError('mobilenetv2 doesn\'t support generic '
'multiplier parameter use "depth_multiplier" instead.')
if finegrain_classification_mode:
conv_defs = copy.deepcopy(conv_defs)
if depth_multiplier < 1:
conv_defs['spec'][-1].params['num_outputs'] /= depth_multiplier
depth_args = {}
# NB: do not set depth_args unless they are provided to avoid overriding
# whatever default depth_multiplier might have thanks to arg_scope.
if min_depth is not None:
depth_args['min_depth'] = min_depth
if divisible_by is not None:
depth_args['divisible_by'] = divisible_by
with slim.arg_scope((lib.depth_multiplier,), **depth_args):
return lib.mobilenet(
input_tensor,
num_classes=num_classes,
conv_defs=conv_defs,
scope=scope,
multiplier=depth_multiplier,
**kwargs)
def wrapped_partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
mobilenet_v2_100 = wrapped_partial(mobilenet, depth_multiplier=1.00)
mobilenet_v2_140 = wrapped_partial(mobilenet, depth_multiplier=1.40)
@slim.add_arg_scope
def mobilenet_base(input_tensor, depth_multiplier=1.0, **kwargs):
"""Creates base of the mobilenet (no pooling and no logits) ."""
return mobilenet(input_tensor,
depth_multiplier=depth_multiplier,
base_only=True, **kwargs)
def training_scope(**kwargs):
"""Defines MobilenetV2 training scope.
Usage:
with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
with slim.
Args:
**kwargs: Passed to mobilenet.training_scope. The following parameters
are supported:
weight_decay- The weight decay to use for regularizing the model.
stddev- Standard deviation for initialization, if negative uses xavier.
dropout_keep_prob- dropout keep probability
bn_decay- decay for the batch norm moving averages.
Returns:
An `arg_scope` to use for the mobilenet v2 model.
"""
return lib.training_scope(**kwargs)
__all__ = ['training_scope', 'mobilenet_base', 'mobilenet', 'V2_DEF']
|
[
"[email protected]"
] | |
e74010a40ad06fe82916fea9a7e6c222b087a685
|
cb83b02ead1cb77c87e117118f7e5cd3ecf46ba1
|
/sistema_plantilla/settings/settings.py
|
6652e9955e4d55b35976610917e962c7d8b0c985
|
[] |
no_license
|
danielhuamani/sistema-plantilla-saas
|
f834d90157b3d0ab1724fe7d3be5e9224cf753ae
|
8802a4b429fdce9ce433539684b52e2177042c35
|
refs/heads/master
| 2020-12-11T01:48:45.313743 | 2016-01-18T16:10:24 | 2016-01-18T16:10:24 | 48,857,325 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,996 |
py
|
"""
Django settings for settings project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from os.path import dirname, join, realpath
BASE_DIR = dirname(dirname(realpath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vhgbr0j26ii9t4juw%_z)_^wm8st_#1$8zrj4yq7!5b)7-@554'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.productos',
'apps.clientes',
'apps.configuracion',
'apps.theme',
'apps.theme_admin',
'debug_toolbar',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'settings.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': (join(BASE_DIR, 'templates'),),
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.static',
],
},
},
]
WSGI_APPLICATION = 'settings.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
MEDIA_ROOT = join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = ''
STATIC_URL = '/static/'
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
|
[
"[email protected]"
] | |
80793db4fcb6d003bcd7f9d86fe4acae5bc1a6c0
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/bob/6ae12eacdae24553a91c0270cb101e66.py
|
5d6e3848a1fa367f700bac002a2b381e701f99cc
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null |
UTF-8
|
Python
| false | false | 281 |
py
|
#
# Skeleton file for the Python "Bob" exercise.
#
def hey(what):
if what is None or what.strip() == "":
return "Fine. Be that way!"
if what.isupper():
return "Whoa, chill out!"
if what.endswith("?") or what.endswith(" "):
return "Sure."
return "Whatever."
|
[
"[email protected]"
] | |
89e0353d4de23f2ac613d436afbbec0a40354e19
|
e8ef02248600525a114c9ed9a6098e95d8007063
|
/qtlab/scripts/sal/ff_powersweep.py
|
7966c043a04204757185031af05d8a6ff6e2df04
|
[] |
no_license
|
mgely/papyllon
|
ac264e202298728f6ca69d566c1fe45a9de0dc1c
|
490c756da8f08c971864dcd983ea82c944bc8c85
|
refs/heads/master
| 2021-01-10T06:28:17.250944 | 2016-02-26T13:49:21 | 2016-02-26T13:49:21 | 46,259,620 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,120 |
py
|
#prepare environment
import qt
import visa
import numpy as np
from numpy import pi, random, arange, size, array, sin, cos, diff, absolute,zeros, sign,ceil,sqrt,absolute
from time import time, sleep, localtime, strftime
execfile('metagen.py')
#Check and load instrument plugins
instlist = qt.instruments.get_instrument_names()
print "installed instruments: "+" ".join(instlist)
#install the drivers no check
if 'med' not in instlist:
med = qt.instruments.create('med','med')
#if 'adwin' not in instlist:
# adwin= qt.instruments.create('adwin', 'ADwin_DAC',address=0x255)
if 'ff' not in instlist:
ff=visa.instrument('TCPIP0::192.168.1.151::inst0::INSTR')
instlist = qt.instruments.get_instrument_names()
print "Available instruments: "+" ".join(instlist)
#measurement information stored in manual in MED instrument
#med.set_device('ShunDevice')
#med.set_setup('BF_4, conversion is 1uA/V')
#med.set_user('Shun')
qt.mstart()
spyview_process(reset=True) #clear old meta-settings
filename = 'EOS8_C_FF'
data = qt.Data(name=filename)
data.add_coordinate('Probe Frequency [MHz]')
data.add_coordinate('Voltage [uA]')
data.add_value('S21 [abs]')
data.add_value('S21 [rad]')
#data.create_file()
data.create_file(name=filename, datadirs='D:\\data\\Sal\\EOS8_C\\temp_powersweep')
data.copy_file('FF_powersweep.py')
kHz = 1e3
MHz = 1e6
GHz = 1e9
####Settings:
#Current temperature
# 18mK
## 10dB on VNA out
## miteq on the input port 2
## I to V conversion 100uA/1Volt
##
######### Variables for NA
pinit=-45
bw=30
f_start=5.272*GHz
f_stop=5.372*GHz
f_pts=401
##hanger_f0=5900.59*MHz
##hanger_span=1000*kHz
##f1_start=hanger_f0-hanger_span/2
##f1_stop=hanger_f0+hanger_span/2
### Variables for field
#v_start=0
#v_stop=1.5
#v_pts=1501
### Variables for power
p_start = -45
p_stop =0
p_pts =10
### Preparing NA
ff.write('INST:SEL "NA";*OPC?')
ff.write('FREQ:STOP '+str(f_stop)+'\n')
ff.write('FREQ:STAR '+str (f_start)+'\n')
ff.write('BWID '+str(bw)+'\n')
ff.write('SOUR:POW '+str(pinit)+'\n')
ff.write('SWE:POIN '+str(f_pts)+'\n')
ff.write('CALC:PAR:DEF S21 \r')
### Prepare ADWIN for current sweep
#adwin.start_process()
########### making lists of values to be measured ###########
f_list=np.linspace(f_start,f_stop,f_pts)
#v_list=np.linspace(v_start,v_stop,v_pts)
p_list = np.linspace(p_start,p_stop,p_pts)
##################################################
qt.msleep(0.1)
for p in p_list:
print 'current power '+str(p)+' power'
ff.write('SOUR:POW ' +str(p)+'\n')
print ff.ask('SOUR:POW?')
#adwin.set_DAC_2(v)
qt.msleep(2)
#setting tarce 1
ff.write('INIT \r')
qt.msleep(15)
ff.write('CALC:FORM MLOG \r')
qt.msleep(2)
trace_mlog = eval(ff.ask('CALC:DATA:FDAT? \r'))
qt.msleep(2)
ff.write('CALC:FORM PHAS \r')
qt.msleep(2)
trace_phase = eval(ff.ask('CALC:DATA:FDAT? \r'))
v_dummy=np.linspace(p,p,len(f_list))
data.add_data_point(v_dummy,f_list,trace_mlog, np.gradient(np.unwrap(np.deg2rad(trace_phase),np.pi)))
data.new_block()
spyview_process(data,f_start,f_stop,p)
qt.msleep(0.01)
data.close_file()
qt.mend()
|
[
"[email protected]"
] | |
eddb1083f72d566a9ba78588b02c0de1582230e7
|
8cb101991346bd6403cfaca88b0445f917e52254
|
/tuneuptechnology/tickets.py
|
d5ccf178b5ba56d3c933e59ce7abdad16b3a0163
|
[
"MIT",
"Python-2.0"
] |
permissive
|
TrendingTechnology/tuneuptechnology-python
|
a06742fbf404fb1afc525ccf1d432c4c374866f1
|
479bbece1722f7e233dbc0f7642205e1afa971c1
|
refs/heads/main
| 2023-06-08T17:26:41.108769 | 2021-06-22T02:15:45 | 2021-06-22T02:15:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,201 |
py
|
class Tickets():
def __init__(self, base_url, make_http_request):
self.base_url = base_url
self.make_http_request = make_http_request
def create(self, data):
"""Create a ticket based on the data passed"""
endpoint = f'{self.base_url}/tickets'
response = self.make_http_request('post', endpoint, data)
return response
def all(self):
"""Retrieve all tickets"""
endpoint = f'{self.base_url}/tickets'
response = self.make_http_request('get', endpoint)
return response
def retrieve(self, id):
"""Retrieve a single ticket"""
endpoint = f'{self.base_url}/tickets/{id}'
response = self.make_http_request('get', endpoint)
return response
def update(self, id, data):
"""Update a ticket with the passed params"""
endpoint = f'{self.base_url}/tickets/{id}'
response = self.make_http_request('patch', endpoint, data)
return response
def delete(self, id):
"""Delete a ticket with the ID passed"""
endpoint = f'{self.base_url}/tickets/{id}'
response = self.make_http_request('delete', endpoint)
return response
|
[
"[email protected]"
] | |
3ba4d9a3323a8bb7a9dd944f28bff4943cd98968
|
266947fd84eed629ed0c21f6d91134239512afd9
|
/BeginnerContest_B/061.py
|
8605568528a2b38ba911c5cdf7aae2aba95aad32
|
[] |
no_license
|
SkiMsyk/AtCoder
|
c86adeec4fa470ec14c1be7400c9fc8b3fb301cd
|
8102b99cf0fb6d7fa304edb942d21cf7016cba7d
|
refs/heads/master
| 2022-09-03T01:23:10.748038 | 2022-08-15T01:19:55 | 2022-08-15T01:19:55 | 239,656,752 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 173 |
py
|
N, M = map(int, input().split())
res = [0]*N
for _ in range(M):
a, b = map(int, input().split())
res[a-1] += 1
res[b-1] += 1
for _ in range(N):
print(res[_])
|
[
"[email protected]"
] | |
3740b278f395768c4a255c2166677022992d93a9
|
85574bab97569bae7368dc4e2d2aa73c73743a9b
|
/DSPFromGroundUp/Python/016RunningSumV2/main.py
|
9bf1bb6d344340923a786a4e595a379f76fda9cf
|
[] |
no_license
|
saradhimpardha/UdemyDSPFromGroundUpOnARMProcessors
|
3c0fcd7272e892f222871dc412fc214851477aea
|
576d4a38992533ed0733278d6b4b6444db58706b
|
refs/heads/main
| 2023-05-04T15:45:30.184864 | 2021-05-28T14:40:46 | 2021-05-28T14:40:46 | 458,248,148 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 922 |
py
|
#
# Imports
#
from matplotlib import pyplot as plt
from matplotlib import style
import mysignals as sigs
#
# Global variables
#
#
# Private functions
#
#
def calc_running_sum(sig_src_arr,sig_dest_arr):
for x in range(len(sig_dest_arr)):
sig_dest_arr[x] = 0
for x in range(len(sig_src_arr)):
sig_dest_arr[x] = sig_dest_arr[x-1]+sig_src_arr[x]
#
# main
#
if __name__ == "__main__":
output_signal =[None]*320
calc_running_sum(sigs.InputSignal_1kHz_15kHz,output_signal)
#
style.use('ggplot')
#style.use('dark_background')
f,plt_arr = plt.subplots(2,sharex=True)
f.suptitle("Running Sum")
plt_arr[0].plot(sigs.InputSignal_1kHz_15kHz,color='red')
plt_arr[0].set_title("Input Signal")
plt_arr[1].plot(output_signal,color ='magenta')
plt_arr[1].set_title("Output Signal")
plt.show()
|
[
"[email protected]"
] | |
0751238896833b73c9818850c8150c8aff389c4b
|
f4b74154a7e50a9cfd325b45046b6c86c1682847
|
/src/settings.py
|
ccb98801ea3144dc52ce825ead5c542150f3330b
|
[] |
no_license
|
toxicOxygen/personal_website-
|
826225a979ef0e62aaddf9730d1fd5d533400310
|
1826ef3de43fc4d162a509f48a1f90392ac136e5
|
refs/heads/master
| 2021-09-23T09:28:51.103637 | 2020-03-30T02:12:58 | 2020-03-30T02:12:58 | 251,178,977 | 0 | 0 | null | 2021-09-22T18:54:41 | 2020-03-30T02:13:38 |
HTML
|
UTF-8
|
Python
| false | false | 3,619 |
py
|
"""
Django settings for src project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=$%2g7+8uw(qd3##ayde181009u=1$40xpz=aqg4#)5&80oji7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'widget_tweaks',
'js_projects',
'pages'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'src.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates'),],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'src.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR,'static'),]
STATIC_ROOT = os.path.join(BASE_DIR,'staticfiles')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = 'uksoiwcaeargewci'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# django_heroku.settings(locals())
|
[
"[email protected]"
] | |
27cb43ce03426ae33a2a613a5b551d5332371f3c
|
4a995ce459f42c372d548eb397e95a7793b8b965
|
/cursoshandler/models.py
|
fe22f8853dc98344edddb248959e587b5692a3c5
|
[] |
no_license
|
astandre/chatbot-system
|
edb1d1835fe61a2140bad53e7f68ce2bc724018a
|
99aab3e1e63a05bd475c5af8733b8c771d5e69f5
|
refs/heads/master
| 2022-12-12T01:37:13.498987 | 2018-10-13T23:03:03 | 2018-10-13T23:03:03 | 145,641,189 | 0 | 0 | null | 2022-12-08T02:47:10 | 2018-08-22T01:49:28 |
Python
|
UTF-8
|
Python
| false | false | 2,784 |
py
|
from neomodel import *
from neomodel import install_all_labels, remove_all_labels
# remove_all_labels()
# install_all_labels()
# clear_neo4j_database(db)
# TODO set label and help_text
class Curso(StructuredNode):
uid = UniqueIdProperty()
nombre = StringProperty(required=True, unique_index=True)
cod = StringProperty(unique=True, required=True)
descripcion = StringProperty(required=False)
pre_requisitos = StringProperty(required=False)
edicion = StringProperty(required=False)
oferta = StringProperty(required=False)
tematica = StringProperty(required=False)
fecha_inscripcion = DateProperty(default_now=True)
fecha_inicio = DateProperty(default_now=True)
esfuerzo_estimado = StringProperty(default=0)
duracion = StringProperty(required=False)
link = StringProperty(default="http://opencampus.utpl.edu.ec/")
INSTITUCIONES = {
"U": "UTPL",
"O": "Otro",
}
institucion = StringProperty(choices=INSTITUCIONES, default="U")
archivado = BooleanProperty(default=False)
docente = RelationshipTo('Docente', 'HAS_A_DOCENTE', cardinality=OneOrMore)
competencia = RelationshipTo('Competencia', 'HAS_A_COMPETENCIA', cardinality=OneOrMore)
reto = RelationshipTo('Reto', 'HAS_A_RETO', cardinality=OneOrMore)
contenido = RelationshipTo('Contenido', 'HAS_A_CONTENIDO', cardinality=OneOrMore)
sinonimo = RelationshipTo('Sinonimo', 'HAS_A_SINONIMO', cardinality=OneOrMore)
class Docente(StructuredNode):
uid = UniqueIdProperty()
nombre = StringProperty(unique_index=True, required=True)
N_ACADEMICO = {
"TN": "Nivel Técnico",
"CN": "Tercer Nivel",
"T": "Cuarto Nivel",
}
nivel_academico = StringProperty(default="T", choices=N_ACADEMICO)
email = EmailProperty(required=False)
resumen = StringProperty(required=False)
curso = RelationshipTo('Curso', 'TEACHES', cardinality=OneOrMore)
class Competencia(StructuredNode):
competencia = StringProperty(unique=True, required=True)
curso = RelationshipTo(Curso, 'IS_FROM', cardinality=OneOrMore)
class Reto(StructuredNode):
titulo_reto = StringProperty(unique=True, required=True)
fecha_inicio = DateTimeProperty(default_now=True)
fecha_fin = DateTimeProperty(default_now=True)
descripcion = StringProperty(required=False)
curso = RelationshipTo(Curso, 'IS_FROM', cardinality=OneOrMore)
class Contenido(StructuredNode):
orden = StringProperty(required=True)
contenido = StringProperty(unique=True, required=True)
curso = RelationshipTo(Curso, 'IS_FROM', cardinality=OneOrMore)
class Sinonimo(StructuredNode):
sinonimo = StringProperty(required=True, unique_index=True)
curso = RelationshipTo(Curso, 'IS_FROM', cardinality=OneOrMore)
|
[
"[email protected]"
] | |
45da61cb3415eb8e07c8366c7b8f0ed58e3c101e
|
982539edb302b6bee5dd9285e9de00ad866b4cfd
|
/Tongji/Mode/PlatUserConf.py
|
0128446c9127bba03b5fb549b02ac0e89e624e1b
|
[] |
no_license
|
chennqqi/OpenSaaSProj
|
2149a2066c607636ce2106801be2cb722cc0934d
|
0f861a61d1bd1499599207a70a8e180930d96573
|
refs/heads/master
| 2020-04-04T16:14:08.943396 | 2017-06-01T06:50:32 | 2017-06-01T06:50:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,183 |
py
|
# -*- coding: utf-8 -*-
from datetime import date
from pony.orm import *
def define_user_conf(db):
class Plat_user_conf(db.Entity):
id = PrimaryKey(int, sql_type="int(20)", auto=True)
tm = Optional(date)
ver = Optional(str)
pub = Optional(str)
nameid = Optional(str)
vshow = Optional(str)
vtype = Optional(str)
return Plat_user_conf
if __name__ == "__main__":
a = Database()
define_user_conf(a)
a.bind("mysql", host="outjhkj01.mysql.rds.aliyuncs.com", port=3306, user="jhkj", passwd="jhkj_jhkj", db="saas_meta")
a.generate_mapping(create_tables=True)
b = Database()
define_user_conf(b)
b.bind("mysql", host="outjhkj01.mysql.rds.aliyuncs.com", port=3306, user="jhkj", passwd="jhkj_jhkj", db="guaengdemo")
a.disconnect()
b.disconnect()
b.generate_mapping(create_tables=True)
# db.drop_table("plat_event")
# tester = Plat_event()
# b = Database()
# setDB(b)
# db.bind("mysql", host="outjhkj01.mysql.rds.aliyuncs.com", port=3306, user="jhkj", passwd="jhkj_jhkj", db="guaengdemo")
# db.generate_mapping(create_tables=True)
# tester = Plat_event()
|
[
"[email protected]"
] | |
94b48fd60ae2a1848557d45847013a281ca0bb72
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/VoucherAvailableOutItemInfo.py
|
d0133f8f8df9195637a3cad5457d1610e907a92c
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 |
Apache-2.0
| 2023-04-25T04:54:02 | 2018-05-14T09:40:54 |
Python
|
UTF-8
|
Python
| false | false | 1,440 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class VoucherAvailableOutItemInfo(object):
def __init__(self):
self._item_app_id = None
self._out_item_id = None
@property
def item_app_id(self):
return self._item_app_id
@item_app_id.setter
def item_app_id(self, value):
self._item_app_id = value
@property
def out_item_id(self):
return self._out_item_id
@out_item_id.setter
def out_item_id(self, value):
self._out_item_id = value
def to_alipay_dict(self):
params = dict()
if self.item_app_id:
if hasattr(self.item_app_id, 'to_alipay_dict'):
params['item_app_id'] = self.item_app_id.to_alipay_dict()
else:
params['item_app_id'] = self.item_app_id
if self.out_item_id:
if hasattr(self.out_item_id, 'to_alipay_dict'):
params['out_item_id'] = self.out_item_id.to_alipay_dict()
else:
params['out_item_id'] = self.out_item_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = VoucherAvailableOutItemInfo()
if 'item_app_id' in d:
o.item_app_id = d['item_app_id']
if 'out_item_id' in d:
o.out_item_id = d['out_item_id']
return o
|
[
"[email protected]"
] | |
bcf1581afef31e7569bc8ef68a094cb8fad143ea
|
70f5f279e051360310f95be895320d8fa6cd8d93
|
/extraPackages/matplotlib-3.0.2/examples/userdemo/connectionstyle_demo.py
|
1ea2bf5fe8fd2ff9ac9da4674adfb762654d93bd
|
[
"BSD-3-Clause"
] |
permissive
|
spacetime314/python3_ios
|
4b16ab3e81c31213b3db1e1eb00230621b0a7dc8
|
e149f1bc2e50046c8810f83dae7739a8dea939ee
|
refs/heads/master
| 2020-05-09T20:39:14.980041 | 2019-04-08T15:07:53 | 2019-04-08T15:07:53 | 181,415,024 | 2 | 0 |
BSD-3-Clause
| 2019-04-15T05:00:14 | 2019-04-15T05:00:12 | null |
UTF-8
|
Python
| false | false | 1,845 |
py
|
"""
====================
Connectionstyle Demo
====================
"""
import matplotlib.pyplot as plt
fig, axs = plt.subplots(3, 5, figsize=(8, 4.8))
x1, y1 = 0.3, 0.3
x2, y2 = 0.7, 0.7
def demo_con_style(ax, connectionstyle, label=None):
x1, y1 = 0.3, 0.2
x2, y2 = 0.8, 0.6
ax.plot([x1, x2], [y1, y2], ".")
ax.annotate("",
xy=(x1, y1), xycoords='data',
xytext=(x2, y2), textcoords='data',
arrowprops=dict(arrowstyle="->",
color="0.5",
shrinkA=5, shrinkB=5,
patchA=None,
patchB=None,
connectionstyle=connectionstyle,
),
)
ax.text(.05, .95, connectionstyle.replace(",", ",\n"),
transform=ax.transAxes, ha="left", va="top")
demo_con_style(axs[0, 0], "angle3,angleA=90,angleB=0")
demo_con_style(axs[1, 0], "angle3,angleA=0,angleB=90")
demo_con_style(axs[0, 1], "arc3,rad=0.")
demo_con_style(axs[1, 1], "arc3,rad=0.3")
demo_con_style(axs[2, 1], "arc3,rad=-0.3")
demo_con_style(axs[0, 2], "angle,angleA=-90,angleB=180,rad=0")
demo_con_style(axs[1, 2], "angle,angleA=-90,angleB=180,rad=5")
demo_con_style(axs[2, 2], "angle,angleA=-90,angleB=10,rad=5")
demo_con_style(axs[0, 3], "arc,angleA=-90,angleB=0,armA=30,armB=30,rad=0")
demo_con_style(axs[1, 3], "arc,angleA=-90,angleB=0,armA=30,armB=30,rad=5")
demo_con_style(axs[2, 3], "arc,angleA=-90,angleB=0,armA=0,armB=40,rad=0")
demo_con_style(axs[0, 4], "bar,fraction=0.3")
demo_con_style(axs[1, 4], "bar,fraction=-0.3")
demo_con_style(axs[2, 4], "bar,angle=180,fraction=-0.2")
for ax in axs.flat:
ax.set(xlim=(0, 1), ylim=(0, 1), xticks=[], yticks=[], aspect=1)
fig.tight_layout(pad=0)
plt.show()
|
[
"[email protected]"
] | |
7d2794d66c8af7463d80b6feb07f0a139da4daf6
|
6f54ce52f08806075f0445e7dd206baae96ebdca
|
/IssueTracker/controllers/default.py
|
f6f0ad38bfb5a2d5fa0f37f28e66d7e27f9f3aff
|
[
"BSD-3-Clause"
] |
permissive
|
ykanggit/web2py-appliances
|
a93d318a214aa5b3e5cd6b47b642f2c12addba46
|
5ca7a04d5403f04aad9e90e99e10dbc05a08a50a
|
refs/heads/master
| 2022-05-06T08:55:11.089350 | 2022-04-14T19:25:02 | 2022-04-14T19:25:02 | 49,680,074 | 0 | 0 | null | 2016-01-14T22:41:45 | 2016-01-14T22:41:45 | null |
UTF-8
|
Python
| false | false | 7,707 |
py
|
# -*- coding: utf-8 -*-
def index():
return dict(message=T('Hello World'))
def projects():
#COLUMNS=('project.name','project.author','project.repo','project.license')
FIELDS=(db.project.id,db.project.name,db.project.created_by,db.project.manager,db.project.phase,db.project.repo)
LINKS=[lambda row: A('Subprojects',_href=URL('projects',args=row.id)),
lambda row: A('Issues',_href=URL('issues',args=row.id)),
lambda row: A('Team',_href=URL('teams',args=row.id)) ]
def check(row): return ((row.created_by == auth.user_id)|(row.manager == auth.user_id))
if (request.args(0)):
query = (db.project.super_project==request.args(0))
#name = 'The subprojects of: '+ str(db(db.project.id==request.args(0)).select(db.project.name)).lstrip('project.name ')
else:
query = db.project
#name = 'Project directory'
grid = SQLFORM.grid(query,editable=check,deletable=check,
fields = FIELDS,links=LINKS)
return dict(grid=grid)#name=name)
def teams():
def check(row):
return (row.team_lead == auth.user_id)
if (request.args(0)):
query = (db.team.assigned_projects==request.args(0))
else:
query = db.team
grid=SQLFORM.grid(query,editable=check,deletable=check)
return dict(grid=grid)
@auth.requires_membership('manager')
def roles():
manager_id = db(db.auth_group.role == 'manager').select().first().id
query = (db.auth_membership.group_id == manager_id)
grid = SQLFORM.grid(query,editable=False)
return dict(grid=grid)
def issues():
project = db.project(request.args(0)) or redirect(URL('projects'))
status = request.args(2)
#TODO- show issues of the subprojects
query = (db.issue.project == project.id)&(db.issue.is_last==True)
if (request.args(1)):
query = query&(db.issue.super_issue==request.args(1))
if not status or status=='Open':
query = query&(db.issue.status.belongs(['New','Assigned','Accepted','Started']))
elif status=='Closed':
query = query&(db.issue.status.belongs(
['Fixed','Verified','Invalid','Duplicate','WontFix','Done']))
elif status!='All':
query = query&(db.issue.status==status)
"""comment"""
from gluon.utils import web2py_uuid
db.issue.project.default = project.id
db.issue.uuid.default = web2py_uuid()
db.issue.is_last.default = True
db.issue.owner.default = project.created_by.email
db.issue.description.default = DESCRIPTION
db.issue.labels.represent = lambda v,r: ', '.join(v or [])
if not auth.user or not (
auth.user.id == project.created_by or \
auth.user.email in (project.members_email or [])):
db.issue.owner.writable = False
db.issue.status.writable = False
FIELDS=(db.issue.id,db.issue.uuid,db.issue.status,db.issue.summary,db.issue.created_on,db.issue.author,db.issue.labels,)
LINKS=[lambda row: A('Details',_href=URL('issue',args=row.uuid)),
lambda row: A('Sub-issues',_href=URL('issues',args=[project.id,row.id])),
lambda row2:A('Assignment',_href=URL('assign',args=row2.id)),
lambda row3: A('Escalate', _href=URL('escalate',args=row3.id))]
grid = SQLFORM.grid(query, fields = FIELDS,links=LINKS,
details=False,editable=False,
deletable=project.created_on==auth.user_id,
create=auth.user_id,args=[project.id],
oncreate=lambda form:do_mail([db.issue(form.vars.id)]))
return dict(grid=grid, project=project)
def issue():
last = db(db.issue.uuid==request.args(0))\
(db.issue.is_last==True).select().first()
project = db.project(last.project) or redirect(URL('projects'))
if auth.user:
db.issue.status.default = last.status
db.issue.summary.default = last.summary
db.issue.project.default = last.project
db.issue.uuid.default = last.uuid
db.issue.is_last.default = True
db.issue.owner.default = last.owner
db.issue.labels.default = last.labels
if not (auth.user.id == project.created_by or \
auth.user.email == last.owner or \
auth.user.email in (project.members_email or [])):
db.issue.owner.default = project.created_by
db.issue.owner.writable = False
db.issue.status.writable = False
form = SQLFORM(db.issue)
if form.process().accepted:
last.update_record(is_last=False)
else:
form = DIV('login to comment')
items = db(db.issue.uuid==request.args(0)).select(
orderby=db.issue.created_on)
if isinstance(form,FORM) and form.accepted: do_mail(items)
return dict(project=project,form=form,items=items,last=last)
@auth.requires_membership('manager')
def assign():
from datetime import datetime
if (request.args(0)):
query= (db.issue_assignment.issue==request.args(0))
else:
query=(db.issue_assignment)
FIELDS=(db.issue_assignment.issue,db.issue_assignment.assigned_by,\
db.issue_assignment.assigned_to,db.issue_assignment.assigned_date)
db.issue_assignment.assigned_by.default='%(first_name)s %(last_name)s' % auth.user
db.issue_assignment.assigned_by.writable=False
db.issue_assignment.assigned_date.default=datetime.now()
db.issue_assignment.assigned_date.writable=False
grid=SQLFORM.grid(query)
return dict(grid=grid)
@auth.requires_membership('manager')
def escalate():
issueID=request.args(0)
reference_project= db(db.issue.id==issueID).select().first()
super_proj = db(db.project.id==reference_project.project).select(db.project.super_project).first()
query = (db.issue.id==issueID)
if super_proj.super_project == None:
message = "Already a top level project"
else:
db(query).update(project=super_proj.super_project)
message= "The issue has been escalated"
session.flash = message
redirect(URL('projects'))
return dict()
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
return dict(form=auth())
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request,db)
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
@auth.requires_signature()
def data():
"""
http://..../[app]/default/data/tables
http://..../[app]/default/data/create/[table]
http://..../[app]/default/data/read/[table]/[id]
http://..../[app]/default/data/update/[table]/[id]
http://..../[app]/default/data/delete/[table]/[id]
http://..../[app]/default/data/select/[table]
http://..../[app]/default/data/search/[table]
but URLs bust be signed, i.e. linked with
A('table',_href=URL('data/tables',user_signature=True))
or with the signed load operator
LOAD('default','data.load',args='tables',ajax=True,user_signature=True)
"""
return dict(form=crud())
|
[
"[email protected]"
] | |
52327f791bad53af1e5f123f7f1b3f296bffe0bb
|
dc940e2aa628eff693af36584cfad935990ebe7d
|
/v3.1.0/tool/SaveBookInfoToMySqlTool.py
|
c32721874dd569c804662a6f57f96fbcb50f3b77
|
[] |
no_license
|
520wsl/getXs8Novels
|
865572ea488e0bf3d4e21664eb576237b6dd18be
|
ecf6d0bc5dfdbe4b5c3e8a9aac313bf7abce614b
|
refs/heads/master
| 2020-04-18T00:59:56.777416 | 2019-02-15T08:52:11 | 2019-02-15T08:52:11 | 167,101,111 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,620 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = '书籍数据存储工具类'
__author__ = 'Mad Dragon'
__mtime__ = '2019/1/24'
# 我不懂什么叫年少轻狂,只知道胜者为王
┏┓ ┏┓
┏┛┻━━━┛┻┓
┃ ☃ ┃
┃ ┳┛ ┗┳ ┃
┃ ┻ ┃
┗━┓ ┏━┛
┃ ┗━━━┓
┃ 神兽保佑 ┣┓
┃ 永无BUG! ┏┛
┗┓┓┏━┳┓┏┛
┃┫┫ ┃┫┫
┗┻┛ ┗┻┛
"""
import time
import moment
from tool.GetBookInfoTool import GetBookInfoTool
from public.DataTool import DataTool
from public.Logger import Logger
from public.MySqlTool import MySqlTool
class SaveBookInfoToMySqlToo():
def __init__(self, second, logger, getBookInfoToo, mySql, dataToo):
self.b_second = second
self.m_saveText = "INSERT INTO `links` (`url`,article) VALUES (%s, %s) ON DUPLICATE KEY UPDATE article = VALUES (article), nex = nex+1"
self.getBookInfoToo = getBookInfoToo
self.dataToo = dataToo
self.mySql = mySql
self.logger = logger
def saveText(self, link):
time.sleep(self.b_second)
content = self.getBookInfoToo.getTxtInfo(link)
if len(content) <= 0: return False
self.logger.debug('书籍 [ %s ] 文章存储' % (link))
return self.mySql.batchAdd(sql=self.m_saveText, data_info=[(link, content)])
def saveCatalog(self,bookId):
jsonData = self.getBookInfoToo.getCatalogInfo(bookId=bookId)
self.logger.debug(jsonData)
if __name__ == '__main__':
b_title = 'GetBookInfoToo'
b_second = 1
b_timeStr = moment.now().format('YYYY-MM-DD-HH-mm-ss')
dataToo = DataTool(logName=b_title, second=b_second, timeStr=b_timeStr)
logger = Logger(logname=dataToo.initLogName(), loglevel=1, logger=b_title).getlog()
mySql = MySqlTool(logName=dataToo.initLogName())
getBookInfoToo = GetBookInfoTool(second=b_second, dataToo=dataToo, logger=logger)
saveBookInfoToMySqlToo = SaveBookInfoToMySqlToo(second=b_second, logger=logger,
getBookInfoToo=getBookInfoToo,
mySql=mySql, dataToo=dataToo)
|
[
"[email protected]"
] | |
2e4d4ad192fac1e61c9f8874b8b0b4a41791f5d5
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_4/Areseye/D_a.py
|
e133bb845631ab5660737f81985ed9f3e3c0f065
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 |
Python
|
UTF-8
|
Python
| false | false | 585 |
py
|
#encoding:utf8
import os
import pdb
def solve(K,C,S):
ret = [1,]
gap = K**(C-1)
cur = 1
for i in range(0,K-1):
cur += gap
ret.append(cur)
return ret;
if __name__ == '__main__':
with open('d.in','r') as fin:
for ind,line in enumerate(fin):
if ind is 0:
T = int(line)
else:
strnum = line.split(' ')
param = map(int,strnum)
res = solve(*param)
resstr = map(str,res)
print 'Case #{}: {}'.format(ind,' '.join(resstr))
|
[
"[[email protected]]"
] | |
6b688c2274d062b107eef215f2f6857853970569
|
1333357d463006bb6540fb6f68f140c383d4e676
|
/data/data_clean.py
|
3bcfe42405065c6c3c2b46f2e42f29c6764ba91d
|
[] |
no_license
|
markWJJ/classifynetwork
|
ced1ff5eaa9e1c7e9e6440e08e6744070689a305
|
d65f22486434fdfbdce38d063e176eb31c5d7354
|
refs/heads/master
| 2023-01-09T03:12:01.540254 | 2018-09-17T07:50:02 | 2018-09-17T07:50:02 | 149,088,397 | 0 | 1 | null | 2022-12-21T03:34:27 | 2018-09-17T07:48:54 |
Python
|
UTF-8
|
Python
| false | false | 8,090 |
py
|
# -*- coding: UTF-8 -*-
import re
from collections import OrderedDict
import jieba
import codecs
from hanziconv import HanziConv
import os
import string
import json
import jieba.posseg as pseg
import numpy as np
FH_NUM = (
(u"0", u"0"), (u"1", u"1"), (u"2", u"2"), (u"3", u"3"), (u"4", u"4"),
(u"5", u"5"), (u"6", u"6"), (u"7", u"7"), (u"8", u"8"), (u"9", u"9"),
)
FH_NUM = dict(FH_NUM)
FH_ALPHA = (
(u"a", u"a"), (u"b", u"b"), (u"c", u"c"), (u"d", u"d"), (u"e", u"e"),
(u"f", u"f"), (u"g", u"g"), (u"h", u"h"), (u"i", u"i"), (u"j", u"j"),
(u"k", u"k"), (u"l", u"l"), (u"m", u"m"), (u"n", u"n"), (u"o", u"o"),
(u"p", u"p"), (u"q", u"q"), (u"r", u"r"), (u"s", u"s"), (u"t", u"t"),
(u"u", u"u"), (u"v", u"v"), (u"w", u"w"), (u"x", u"x"), (u"y", u"y"), (u"z", u"z"),
(u"A", u"A"), (u"B", u"B"), (u"C", u"C"), (u"D", u"D"), (u"E", u"E"),
(u"F", u"F"), (u"G", u"G"), (u"H", u"H"), (u"I", u"I"), (u"J", u"J"),
(u"K", u"K"), (u"L", u"L"), (u"M", u"M"), (u"N", u"N"), (u"O", u"O"),
(u"P", u"P"), (u"Q", u"Q"), (u"R", u"R"), (u"S", u"S"), (u"T", u"T"),
(u"U", u"U"), (u"V", u"V"), (u"W", u"W"), (u"X", u"X"), (u"Y", u"Y"), (u"Z", u"Z"),
)
FH_ALPHA = dict(FH_ALPHA)
NUM = (
(u"一", "1"), (u"二" ,"2"), (u"三", "3"), (u"四", "4"), (u"五", "5"), (u"六", "6"), (u"七", "7"),
(u"八", "8"), (u"九", "9"), (u"零", "0"), (u"十", "10")
)
NUM = dict(NUM)
CH_PUNCTUATION = u"["#$%&',:;@[\]^_`{|}~⦅⦆「」、 、〃〈〉《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏﹑﹔·!?。。]"
EN_PUNCTUATION = u"['!#$%&\'()*+,-/:;<=>?@[\\]^_`{|}~']"
sub_dicit = {u"老师好":"",
u"老师":u"", u"你好":u"", u"您好":u"",
u"请问":u"", u"请":u"", u"谢谢":u"",
u""":u""}
class DataCleaner(object):
def __init__(self, params_path):
self.params_path = params_path
self.read_word()
self.read_synonym_word()
self.read_non_words()
def read_non_words(self):
word_path = self.params_path.get("non_words", "")
print("----non word path----", word_path)
if os.path.exists(word_path):
with codecs.open(word_path, "r", "utf-8") as f:
self.non_word = f.read().splitlines()
else:
self.non_word = None
print(self.non_word,"----non word----")
def calculate_non_word(self, input_string):
non_cnt = 0
if self.non_word:
word_cut = list(jieba.cut(input_string))
for word in self.non_word:
if word in word_cut:
non_cnt += 1
if np.mod(non_cnt, 2) == 0:
return 0
else:
return 1
def synthom_replacement(self, input_string):
cut_word_list = list(jieba.cut(input_string))
normalized_word_list = cut_word_list
for index, word in enumerate(cut_word_list):
if word in self.synonym_dict:
normalized_word_list[index] = self.synonym_dict[word]
return "".join(normalized_word_list)
def remove_stop_word(self, input_string):
cut_word_list = list(jieba.cut(input_string))
normalized_word_list = []
for word in cut_word_list:
if word in self.stop_word:
continue
else:
normalized_word_list.append(word)
return "".join(normalized_word_list)
def remove_symbol(self, input_string):
cn_text = re.sub(CH_PUNCTUATION, "", input_string)
en_text = re.sub(EN_PUNCTUATION, "", cn_text)
return en_text
def poc_clean(self, input_string):
tmp = self.upper2lower(input_string)
tmp = self.tra2sim(tmp)
tmp = self.full2half(tmp)
if self.synonym_dict:
tmp = self.synthom_replacement(tmp)
if self.stop_word:
nonstop_text = self.remove_stop_word(tmp)
if len(nonstop_text) >= 1:
tmp = nonstop_text
non_symbol_text = self.remove_symbol(tmp)
if len(non_symbol_text) >= 1:
tmp = non_symbol_text
char_pattern = re.compile(u"[\u4e00-\u9fa5,0-9,a-z,A-Z]+")
tmp = "".join(char_pattern.findall(tmp))
output = ""
for token in tmp:
if len(token) >= 1:
output += token
return output
def clean(self, input_string):
tmp = self.upper2lower(input_string)
tmp = self.tra2sim(tmp)
tmp = self.full2half(tmp)
return tmp
def read_word(self):
word_path = self.params_path.get("stop_word", "")
if os.path.exists(word_path):
with codecs.open(word_path, "r", "utf-8") as f:
self.stop_word = f.read().splitlines()
else:
print("not exiting params_path".format(word_path))
self.stop_word = None
def read_synonym_word(self):
self.synonym_dict = {}
synonym_path = self.params_path.get("synthom_path", "")
if os.path.exists(synonym_path):
with codecs.open(synonym_path, "r", "utf-8") as f:
data = f.read().splitlines()
for item in data:
content = item.split()
self.synonym_dict[content[0]] = content[1]
print(content[0], content[1])
else:
self.synonym_dict = None
def synonym_word_mapping(self):
self.synonym2standard = OrderedDict()
for key in self.synonym_dict:
for item in self.synonym_dict[key]:
self.synonym2standard[item] = key
def upper2lower(self, input_string):
return input_string.lower()
def subtoken(self, input_string):
tmp_string = input_string
for key in sub_dicit:
tmp_string = re.sub(key, sub_dicit[key], tmp_string)
return tmp_string
def lower2upper(self, input_string):
return input_string.upper()
def replace_phrase(input_string, phrase_dict):
s = input_string
for key in phrase_dict.keys():
s = re.sub(key, phrase_dict[key], s)
return s
def tra2sim(self, input_string):
s = HanziConv.toSimplified(input_string)
return s
def full2half(self, input_string):
s = ""
for uchar in input_string:
if uchar in FH_NUM:
half_char = FH_NUM[uchar]
if uchar in FH_ALPHA:
half_char = FH_ALPHA[uchar]
if uchar in NUM:
half_char = NUM[uchar]
else:
half_char = uchar
s += half_char
return s
def detect_en(self, input_string,
en_pattern=re.compile(u'[\u4e00-\u9fa5]'),
alphabet_pattern=re.compile(u"[a-cA-C]")):
s = []
for var in en_pattern.split(input_string.decode("utf-8")):
if len(var) > 1:
"""
if len(var) >= 1 it is a word or sentence
"""
s.append(var)
elif len(var) == 1:
"""
if len(var) == 1 it may be a alphabet and usually it is a choice for a given question
"""
tmp_var = alphabet_pattern.findall(var)
if len(tmp_var) == 1:
s.append(self.upper2lower(var))
return s
def detect_ch(self, input_string, ch_pattern = re.compile(u"[\u4e00-\u9fa5]+")):
s = ch_pattern.findall(input_string.decode("utf-8"))
s = " ".join(s)
return s
def sentence_segmentation(self, input_string, symbol_pattern=re.compile(CH_PUNCTUATION)):
"""
based on CH_PUNCTUATION to segment sentence
"""
return symbol_pattern.split(input_string.decode("utf-8"))
|
[
"[email protected]"
] | |
ac9bc264069f3b02a22624cafb6308e8ec8ae4bf
|
79e19819aec49b500825f82a7de149eb6a0ba81d
|
/leetcode/303.py
|
f778311ff580a2a44b295e3a1440ef7bab29626f
|
[] |
no_license
|
seoyeonhwng/algorithm
|
635e5dc4a2e9e1c50dc0c75d9a2a334110bb8e26
|
90406ee75de69996e666ea505ff5d9045c2ad941
|
refs/heads/master
| 2023-05-03T16:51:48.454619 | 2021-05-26T00:54:40 | 2021-05-26T00:54:40 | 297,548,218 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 558 |
py
|
class NumArray:
def __init__(self, nums: List[int]):
self.nums = nums
if nums:
self.memo()
def memo(self):
self.dp = [0] * len(self.nums)
self.dp[0] = self.nums[0]
for i in range(1, len(self.nums)):
self.dp[i] = self.dp[i-1] + self.nums[i]
def sumRange(self, i: int, j: int) -> int:
return self.dp[j] - self.dp[i-1] if i > 0 else self.dp[j]
# Your NumArray object will be instantiated and called as such:
# obj = NumArray(nums)
# param_1 = obj.sumRange(i,j)
|
[
"[email protected]"
] | |
723c8b2001a43c9aa112cd5eba3a02f98544b6f5
|
58ade65dffc7cbe103d93d7c769096a20d9f9815
|
/src/smach_based_introspection_framework/online_part/data_collection/StoreVectorToRedisProc.py
|
d94e5e8102adc6af5a29654d7877be2d0b34a276
|
[
"BSD-3-Clause"
] |
permissive
|
birlrobotics/smach_based_introspection_framework
|
2cff69ecec030a5b5046dea99f9e15105f52361b
|
f16742339cddfc86effba4dbf6e5062304704b89
|
refs/heads/master
| 2021-05-09T12:02:26.946473 | 2019-05-29T02:46:47 | 2019-05-29T02:46:47 | 119,001,821 | 7 | 1 | null | 2018-07-05T04:58:40 | 2018-01-26T03:37:58 |
Python
|
UTF-8
|
Python
| false | false | 1,512 |
py
|
import multiprocessing
from ConvertTagTopicToInterestedVectorProc import (
data_frame_idx,
smach_state_idx,
data_header_idx,
)
class StoreVectorToRedisProc(multiprocessing.Process):
def __init__(
self,
com_queue,
node_name="StoreVectorToRedisProc_node",
):
multiprocessing.Process.__init__(self)
self.com_queue = com_queue
self.node_name = node_name
def run(self):
import rospy
rospy.init_node(self.node_name, anonymous=True)
try:
import redis
import Queue
r = redis.Redis(host='localhost', port=6379, db=0)
rospy.loginfo('delete key \"tag_multimodal_msgs\": %s'%r.delete("tag_multimodal_msgs"))
while not rospy.is_shutdown():
try:
latest_data_tuple = self.com_queue.get(timeout=1)
except Queue.Empty:
continue
except KeyboardInterrupt:
break
data_frame = latest_data_tuple[data_frame_idx]
smach_state = latest_data_tuple[smach_state_idx]
data_header = latest_data_tuple[data_header_idx]
score = data_header.stamp.to_sec()
value = data_frame
r.zadd("tag_multimodal_msgs", value, score)
except Exception as e:
rospy.logerr("StoreVectorToRedisProc error: %s"%e)
rospy.loginfo("StoreVectorToRedisProc exits")
|
[
"[email protected]"
] | |
7b5ebbb6b02299b7f47b6077cba156000ceeb9c3
|
8efe9a6c9489d798b5f5b610eb531d86924a1548
|
/src/wix/urls.py
|
c74a0f134076e607c3999dbed8538b6643de2a2f
|
[] |
no_license
|
MarekBiczysko/naklisze_public
|
e8e6f7e61cdb83b74ea68862b40c061c0253767b
|
e53c0e8fefffbcfc3a8859976eb7b81cf6270847
|
refs/heads/master
| 2022-12-12T02:27:09.824803 | 2019-07-23T10:54:47 | 2019-07-23T10:54:47 | 198,410,666 | 0 | 0 | null | 2022-12-08T01:03:08 | 2019-07-23T10:46:57 |
Python
|
UTF-8
|
Python
| false | false | 288 |
py
|
from django.views.generic import RedirectView
from django.conf.urls import url
from .views import wix_page
urlpatterns = [
# url(r'^$', RedirectView.as_view(url='https://biczysko.wix.com/foto')),
url(r'^$', wix_page, name='wix'),
url(r'^', RedirectView.as_view(url='/')),
]
|
[
"[email protected]"
] | |
61abe84b1c8861332157ee57244832fe731b1498
|
f9bcdd8fe51e94b884752574229bc592a84be6bd
|
/python/315_Count_of_Smaller_Numbers_After_Self.py
|
33e899cb414d1e0faa68834085f58c7d725813e5
|
[] |
no_license
|
HankerZheng/LeetCode-Problems
|
cf46a24444cfc3e6bcff38c10a5bb5945e410b5b
|
d308e0e41c288f23a846b8505e572943d30b1392
|
refs/heads/master
| 2021-01-12T17:49:40.072069 | 2017-08-17T04:37:20 | 2017-08-17T04:37:20 | 69,397,987 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,656 |
py
|
# You are given an integer array nums and you have to return a new counts array. The counts array has the property where counts[i] is the number of smaller elements to the right of nums[i].
# Example:
# Given nums = [5, 2, 6, 1]
# To the right of 5 there are 2 smaller elements (2 and 1).
# To the right of 2 there is only 1 smaller element (1).
# To the right of 6 there is 1 smaller element (1).
# To the right of 1 there is 0 smaller element.
# Return the array [2, 1, 1, 0].
class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
self.smallerCnt = 0
self.selfCnt = 1
def insert(self, val):
cnt = 0
tmp = self
while tmp:
if val < tmp.val:
tmp.smallerCnt += 1
if not tmp.left:
tmp.left = TreeNode(val)
break
tmp = tmp.left
elif val > tmp.val:
cnt += tmp.smallerCnt + tmp.selfCnt
if not tmp.right:
tmp.right = TreeNode(val)
break
tmp = tmp.right
else:
tmp.selfCnt += 1
cnt += tmp.smallerCnt
break
return cnt
class Solution(object):
def countSmaller(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
if len(nums) <= 1: return [0] * len(nums)
ans = [0]
dataTree = TreeNode(nums[-1])
for num in nums[-2::-1]:
ans.insert(0,dataTree.insert(num))
return ans
|
[
"[email protected]"
] | |
688bac0891c7135030e8bf35d07f7a9518baae31
|
c5d87c7f25e3fe9b17c1e88993b0ed6831e52acb
|
/Socket/GPS_Server_Test/GPS_Server_testData.py
|
2d6e0b37aa0f879b89e87aa831bf512762c6fe1c
|
[] |
no_license
|
GIS90/python_base_use
|
e55d55f9df505dac45ddd332fb65dcd08e8e531f
|
7166ca85975bb7c56a5fbb6b723fd8300c4dd5d1
|
refs/heads/master
| 2020-04-02T08:33:49.461307 | 2018-10-23T03:33:41 | 2018-10-23T03:33:41 | 154,249,857 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,941 |
py
|
# -*- coding: utf-8 -*-
"""
------------------------------------------------
describe:
this tool be used to
------------------------------------------------
"""
import SocketServer
import codecs
import datetime
import os
import threading
from SocketServer import BaseRequestHandler
SOCKET_DATA_MAX = 16 * 1024 * 1024
FORMMAT = "%Y%m%d%H%M%S"
def __get_cur_folder():
# if getattr(sys, "frozen", False):
return os.path.dirname(os.path.abspath(__file__))
# else:
# cur_folder = os.path.dirname(inspect.getfile(inspect.currentframe()))
# return os.path.abspath(cur_folder)
_cur_folder = __get_cur_folder()
_gps_file_folder = os.path.abspath(os.path.join(_cur_folder, "liveGPS"))
if not os.path.exists(_gps_file_folder):
os.makedirs(_gps_file_folder)
class TCPRequestHandler(BaseRequestHandler):
"""
The RequestHandler class for my server.
It is instantiated once per connection to the server, and must
override the handle method to implement communication to the
client.
"""
def setup(self):
BaseRequestHandler.setup(self)
def handle(self):
while True:
try:
data = self.request.recv(SOCKET_DATA_MAX).strip()
if data:
print data
gps_file_name = "gps.dat"
gps_file = os.path.join(_gps_file_folder, gps_file_name)
gps = codecs.open(gps_file, 'wb', 'utf-8')
gps.write(data)
gps.close()
except Exception as e:
print e.message
if __name__ == "__main__":
host = ""
port = 1991
addr = (host, port)
print "Server start ......"
# It use to
server = SocketServer.ThreadingTCPServer(addr, TCPRequestHandler)
server.allow_reuse_address = True
server.serve_forever()
|
[
"[email protected]"
] | |
7e45a200414423d396becba56436abd46f1d731e
|
66862c422fda8b0de8c4a6f9d24eced028805283
|
/slambook2/3rdparty/opencv-3.3.0/samples/python/floodfill.py
|
1b988d3763ef61c3f84e1e5039da4e6540f9914f
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
zhh2005757/slambook2_in_Docker
|
57ed4af958b730e6f767cd202717e28144107cdb
|
f0e71327d196cdad3b3c10d96eacdf95240d528b
|
refs/heads/main
| 2023-09-01T03:26:37.542232 | 2021-10-27T11:45:47 | 2021-10-27T11:45:47 | 416,666,234 | 17 | 6 |
MIT
| 2021-10-13T09:51:00 | 2021-10-13T09:12:15 | null |
UTF-8
|
Python
| false | false | 2,007 |
py
|
#!/usr/bin/env python
'''
Floodfill sample.
Usage:
floodfill.py [<image>]
Click on the image to set seed point
Keys:
f - toggle floating range
c - toggle 4/8 connectivity
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2
if __name__ == '__main__':
import sys
try:
fn = sys.argv[1]
except:
fn = '../data/fruits.jpg'
print(__doc__)
img = cv2.imread(fn, True)
if img is None:
print('Failed to load image file:', fn)
sys.exit(1)
h, w = img.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
seed_pt = None
fixed_range = True
connectivity = 4
def update(dummy=None):
if seed_pt is None:
cv2.imshow('floodfill', img)
return
flooded = img.copy()
mask[:] = 0
lo = cv2.getTrackbarPos('lo', 'floodfill')
hi = cv2.getTrackbarPos('hi', 'floodfill')
flags = connectivity
if fixed_range:
flags |= cv2.FLOODFILL_FIXED_RANGE
cv2.floodFill(flooded, mask, seed_pt, (255, 255, 255), (lo,)*3, (hi,)*3, flags)
cv2.circle(flooded, seed_pt, 2, (0, 0, 255), -1)
cv2.imshow('floodfill', flooded)
def onmouse(event, x, y, flags, param):
global seed_pt
if flags & cv2.EVENT_FLAG_LBUTTON:
seed_pt = x, y
update()
update()
cv2.setMouseCallback('floodfill', onmouse)
cv2.createTrackbar('lo', 'floodfill', 20, 255, update)
cv2.createTrackbar('hi', 'floodfill', 20, 255, update)
while True:
ch = cv2.waitKey()
if ch == 27:
break
if ch == ord('f'):
fixed_range = not fixed_range
print('using %s range' % ('floating', 'fixed')[fixed_range])
update()
if ch == ord('c'):
connectivity = 12-connectivity
print('connectivity =', connectivity)
update()
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
fe547b0d6f92c919781366e3a1059ab975ea9b14
|
725abfa74e3800622837e60615dc15c6e91442c0
|
/venv/Lib/site-packages/django/contrib/messages/storage/session.py
|
7dbd24a8da5c105a8955f5695fe53d22b05df70b
|
[] |
no_license
|
Malak-Abdallah/TODOlist
|
4840e2e0a27e6499ae6b37524bb3e58455d08bfb
|
fd35754e8eac9b262fae17ec16ad9fb510a12f5d
|
refs/heads/master
| 2023-07-16T11:38:48.759232 | 2021-08-31T09:43:11 | 2021-08-31T09:43:11 | 401,600,246 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,669 |
py
|
import json
from django.contrib.messages.storage.base import BaseStorage
from django.contrib.messages.storage.cookie import MessageDecoder, MessageEncoder
class SessionStorage(BaseStorage):
"""
Store messages in the session (that is, django.contrib.sessions).
"""
session_key = "_messages"
def __init__(self, request, *args, **kwargs):
assert hasattr(request, "session"), (
"The session-based temporary "
"message storage requires session middleware to be installed, "
"and come before the message middleware in the "
"MIDDLEWARE list."
)
super().__init__(request, *args, **kwargs)
def _get(self, *args, **kwargs):
"""
Retrieve a list of messages from the request's session. This storage
always stores everything it is given, so return True for the
all_retrieved flag.
"""
return (
self.deserialize_messages(self.request.session.get(self.session_key)),
True,
)
def _store(self, messages, response, *args, **kwargs):
"""
Store a list of messages to the request's session.
"""
if messages:
self.request.session[self.session_key] = self.serialize_messages(messages)
else:
self.request.session.pop(self.session_key, None)
return []
def serialize_messages(self, messages):
encoder = MessageEncoder()
return encoder.encode(messages)
def deserialize_messages(self, data):
if data and isinstance(data, str):
return json.loads(data, cls=MessageDecoder)
return data
|
[
"[email protected]"
] | |
0c49b984bf9f2ac8bae5046c1f435df4c90cd46f
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/contrib/cv/detection/SSD/mmdet/models/builder.py
|
05efb838ed26ce7d0c12f1cdf8a678b15d583bdd
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 |
Apache-2.0
| 2022-10-15T09:29:12 | 2022-04-20T04:11:18 |
Python
|
UTF-8
|
Python
| false | false | 2,225 |
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mmcv.utils import Registry, build_from_cfg
from torch import nn
BACKBONES = Registry('backbone')
NECKS = Registry('neck')
ROI_EXTRACTORS = Registry('roi_extractor')
SHARED_HEADS = Registry('shared_head')
HEADS = Registry('head')
LOSSES = Registry('loss')
DETECTORS = Registry('detector')
def build(cfg, registry, default_args=None):
"""Build a module.
Args:
cfg (dict, list[dict]): The config of modules, is is either a dict
or a list of configs.
registry (:obj:`Registry`): A registry the module belongs to.
default_args (dict, optional): Default arguments to build the module.
Defaults to None.
Returns:
nn.Module: A built nn module.
"""
if isinstance(cfg, list):
modules = [
build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
]
return nn.Sequential(*modules)
else:
return build_from_cfg(cfg, registry, default_args)
def build_backbone(cfg):
"""Build backbone."""
return build(cfg, BACKBONES)
def build_neck(cfg):
"""Build neck."""
return build(cfg, NECKS)
def build_roi_extractor(cfg):
"""Build roi extractor."""
return build(cfg, ROI_EXTRACTORS)
def build_shared_head(cfg):
"""Build shared head."""
return build(cfg, SHARED_HEADS)
def build_head(cfg):
"""Build head."""
return build(cfg, HEADS)
def build_loss(cfg):
"""Build loss."""
return build(cfg, LOSSES)
def build_detector(cfg, train_cfg=None, test_cfg=None):
"""Build detector."""
return build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
|
[
"[email protected]"
] | |
d06ab34fea0bac11e8aa864a35184490730e2a5a
|
02b495111594a367405b2bfbf220e38da3a5f7b0
|
/devel/lib/python2.7/dist-packages/brics_actuator/msg/_JointValue.py
|
0723b3357a381bbe1f9fbd1dbb79f58932d32bef
|
[
"BSD-2-Clause"
] |
permissive
|
Ashuditya/Rebellious-Cowards
|
3f7c6afd314e4bf2ffb72b99ecf58be23f309e97
|
56ec395147f2fc59a26669a74a04fe02227bc7b7
|
refs/heads/master
| 2023-01-24T10:57:47.533839 | 2020-10-01T15:58:07 | 2020-10-01T15:58:07 | 218,202,193 | 0 | 3 |
BSD-2-Clause
| 2020-10-01T17:07:44 | 2019-10-29T04:09:46 |
Makefile
|
UTF-8
|
Python
| false | false | 6,583 |
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from brics_actuator/JointValue.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import genpy
class JointValue(genpy.Message):
_md5sum = "c8dad5a006889ad7de711a684999f0c6"
_type = "brics_actuator/JointValue"
_has_header = False #flag to mark the presence of a Header object
_full_text = """time timeStamp #time of the data
string joint_uri
string unit #if empy expects si units, you can use boost::unit
float64 value
"""
__slots__ = ['timeStamp','joint_uri','unit','value']
_slot_types = ['time','string','string','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
timeStamp,joint_uri,unit,value
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(JointValue, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.timeStamp is None:
self.timeStamp = genpy.Time()
if self.joint_uri is None:
self.joint_uri = ''
if self.unit is None:
self.unit = ''
if self.value is None:
self.value = 0.
else:
self.timeStamp = genpy.Time()
self.joint_uri = ''
self.unit = ''
self.value = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_2I().pack(_x.timeStamp.secs, _x.timeStamp.nsecs))
_x = self.joint_uri
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.unit
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_d().pack(self.value))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.timeStamp is None:
self.timeStamp = genpy.Time()
end = 0
_x = self
start = end
end += 8
(_x.timeStamp.secs, _x.timeStamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.joint_uri = str[start:end].decode('utf-8')
else:
self.joint_uri = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.unit = str[start:end].decode('utf-8')
else:
self.unit = str[start:end]
start = end
end += 8
(self.value,) = _get_struct_d().unpack(str[start:end])
self.timeStamp.canon()
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_2I().pack(_x.timeStamp.secs, _x.timeStamp.nsecs))
_x = self.joint_uri
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.unit
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_d().pack(self.value))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.timeStamp is None:
self.timeStamp = genpy.Time()
end = 0
_x = self
start = end
end += 8
(_x.timeStamp.secs, _x.timeStamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.joint_uri = str[start:end].decode('utf-8')
else:
self.joint_uri = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.unit = str[start:end].decode('utf-8')
else:
self.unit = str[start:end]
start = end
end += 8
(self.value,) = _get_struct_d().unpack(str[start:end])
self.timeStamp.canon()
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
_struct_d = None
def _get_struct_d():
global _struct_d
if _struct_d is None:
_struct_d = struct.Struct("<d")
return _struct_d
|
[
"[email protected]"
] | |
f32e61acab543b074d8350bb2c926e937628cbb7
|
97f285b6f8016a8d1d2d675fffb771df3c9e37b9
|
/study/algorithms/sorting/selection_sort.py
|
b1177b6f5b9e1b1dd7feb0d3974b2999b7447124
|
[] |
no_license
|
oskomorokhov/python
|
ef5408499840465d18852954aee9de460d0e7250
|
8909396c4200bd2fca19d3f216ed5f484fb2192a
|
refs/heads/master
| 2021-05-14T09:27:25.413163 | 2019-12-12T21:00:05 | 2019-12-12T21:00:05 | 116,327,306 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,256 |
py
|
# selection sort
def ssort(lst):
""" The algorithm divides the input list into two parts: the sublist of items already sorted, which is built up from left to right at the front (left) of the list,
and the sublist of items remaining to be sorted that occupy the rest of the list. Initially, the sorted sublist is empty and the unsorted sublist is the entire input list.
The algorithm proceeds by finding the smallest (or largest, depending on sorting order) element in the unsorted sublist, exchanging (swapping) it with the leftmost unsorted element (putting it in sorted order),
and moving the sublist boundaries one element to the right.
"""
pivot = 0
while pivot < len(lst):
current_min = lst[pivot]
new_min = None
for num in lst[pivot+1:]:
if num < current_min:
current_min = new_min = num
if new_min:
lst[lst.index(new_min)
], lst[pivot] = lst[pivot], lst[lst.index(new_min)]
pivot += 1
return lst
if __name__ == '__main__':
print("original list", [3, 44, 38, 5, 47,
15, 36, 26, 27, 2, 46, 4, 19, 50, 48])
print(ssort([3, 44, 38, 5, 47, 15, 36, 26, 27, 2, 46, 4, 19, 50, 48]))
|
[
"[email protected]"
] | |
8ca1e09fb7ee173a14faeb5049dd0aa0737a9ba0
|
eff2fc11905f6118dcd70050392f168cd7aea086
|
/leetcode/40_combination_sum_ii/solution1.py
|
df0fa9abba6a73cfa6548fd39c14982c906e75fb
|
[] |
no_license
|
algobot76/leetcode-python
|
28f1e1107fa941a3b40006f074eec6231e674ac1
|
ec8bff8978d6915bfdf187c760b97ee70f7515af
|
refs/heads/master
| 2021-07-05T17:06:40.581977 | 2020-09-19T22:02:38 | 2020-09-19T22:02:38 | 199,255,699 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 737 |
py
|
class Solution:
def combinationSum2(self, candidates, target):
candidates.sort()
combs = []
self.dfs(candidates, target, 0, [], combs)
return combs
def dfs(self, candidates, target, start, comb, combs):
if target < 0:
return
if target == 0:
return combs.append(list(comb))
prev = 0
while start < len(candidates) and candidates[start] <= target:
if prev != candidates[start]:
comb.append(candidates[start])
self.dfs(candidates, target - candidates[start], start + 1,
comb, combs)
comb.pop()
prev = candidates[start]
start += 1
|
[
"[email protected]"
] | |
0703e5f22212b00ffaf7e02dd00eeaa7b1966ce3
|
cc578cec7c485e2c1060fd075ccc08eb18124345
|
/cs15211/TopKFrequentWords.py
|
7733837228f8d83367a4b89021aa264f1154d5e3
|
[
"Apache-2.0"
] |
permissive
|
JulyKikuAkita/PythonPrac
|
18e36bfad934a6112f727b4906a5e4b784182354
|
0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c
|
refs/heads/master
| 2021-01-21T16:49:01.482561 | 2019-02-07T06:15:29 | 2019-02-07T06:15:29 | 91,907,704 | 1 | 1 |
Apache-2.0
| 2019-02-07T06:15:30 | 2017-05-20T18:12:53 |
Python
|
UTF-8
|
Python
| false | false | 5,923 |
py
|
__source__ = 'https://leetcode.com/problems/top-k-frequent-words/'
# Time: O()
# Space: O()
#
# Description: Leetcode # 692. Top K Frequent Words
#
# Given a non-empty list of words, return the k most frequent elements.
#
# Your answer should be sorted by frequency from highest to lowest.
# If two words have the same frequency, then the word with the lower alphabetical order comes first.
#
# Example 1:
# Input: ["i", "love", "leetcode", "i", "love", "coding"], k = 2
# Output: ["i", "love"]
# Explanation: "i" and "love" are the two most frequent words.
# Note that "i" comes before "love" due to a lower alphabetical order.
# Example 2:
# Input: ["the", "day", "is", "sunny", "the", "the", "the", "sunny", "is", "is"], k = 4
# Output: ["the", "is", "sunny", "day"]
# Explanation: "the", "is", "sunny" and "day" are the four most frequent words,
# with the number of occurrence being 4, 3, 2 and 1 respectively.
# Note:
# You may assume k is always valid, 1 <= k <= number of unique elements.
# Input words contain only lowercase letters.
# Follow up:
# Try to solve it in O(n log k) time and O(n) extra space.
#
import heapq
import unittest
import collections
#
# Approach #1: Sorting [Accepted]
# Time Complexity: O(NlogN), where N is the length of words.
# We count the frequency of each word in O(N) time, then we sort the given words in O(NlogN) time.
#
# Space Complexity: O(N), the space used to store our candidates.
class Solution(object):
def topKFrequent(self, words, k):
"""
:type words: List[str]
:type k: int
:rtype: List[str]
"""
count = collections.Counter(words)
candidates = count.keys()
candidates.sort(key = lambda w: (-count[w], w))
return candidates[:k]
# In Python, we improve this to O(N+klogN): our heapq.heapify operation and counting operations are O(N),
# and each of kk heapq.heappop operations are O(logN).
# Space Complexity: O(N)O(N), the space used to store our count.
class Solution2(object):
def topKFrequent(self, words, k):
"""
:type words: List[str]
:type k: int
:rtype: List[str]
"""
count = collections.Counter(words)
heap = [(-freq, word) for word, freq in count.items()]
heapq.heapify(heap)
return [heapq.heappop(heap)[1] for _ in xrange(k)]
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/top-k-frequent-words/solution/
# Approach #1: Sorting [Accepted]
# 68ms 11.37%
class Solution {
public List<String> topKFrequent(String[] words, int k) {
Map<String, Integer> count = new HashMap<>();
for (String word: words) {
count.put(word, count.getOrDefault(word, 0) + 1);
}
List<String> candidates = new ArrayList(count.keySet());
Collections.sort(candidates, (w1, w2) -> count.get(w1).equals(count.get(w2))?
w1.compareTo(w2) : count.get(w2) - count.get(w1)); //if w1 - w2,
// sorting in increasing order, thus return least frequent words
return candidates.subList(0, k);
}
}
# Approach #2: Heap [Accepted] PQ
# 11ms 99.80%
# Time Complexity: O(Nlogk), where N is the length of words.
# We count the frequency of each word in O(N) time, then we add N words to the heap,
# each in O(logk) time. Finally, we pop from the heap up to k times. As k <= N, this is O(Nlogk) in total.
/*
Lambda expression
https://www.mkyong.com/java8/java-8-lambda-comparator-example/
*/
# 13ms 81.92%
class Solution {
public List<String> topKFrequent(String[] words, int k) {
List<String> res = new ArrayList<>();
Map<String, Integer> map = new HashMap<>();
for (String word: words) {
map.put(word, map.getOrDefault(word, 0) + 1);
}
PriorityQueue<Map.Entry<String, Integer>> pq = new PriorityQueue<>(new Checker());
for (Map.Entry<String, Integer> entry : map.entrySet()) {
pq.offer(entry);
if (pq.size() > k) pq.poll();
}
while (pq.size() != 0) {
res.add(0, pq.poll().getKey());
}
return res;
}
}
class Checker implements Comparator<Map.Entry<String, Integer>> {
public int compare(Map.Entry<String, Integer> o1, Map.Entry<String, Integer> o2) {
if (o1.getValue() == o2.getValue()) {
return o2.getKey().compareTo(o1.getKey());
} else {
return o1.getValue() - o2.getValue();
}
}
}
# 10ms 99.34%
class Solution {
private class Point implements Comparable<Point> {
private String str;
private int count;
public Point(String str) {
this.str = str;
this.count = 1;
}
@Override
public int hashCode() {
return str.hashCode();
}
@Override
public int compareTo(Point b) {
if(count != b.count) {
return b.count - count;
}
else {
return str.compareTo(b.str);
}
}
public void addCount() {
count++;
}
public String getStr() {
return str;
}
}
public List<String> topKFrequent(String[] words, int k) {
Map<String, Point> map = new HashMap<>();
for(String word: words) {
if(map.containsKey(word)) {
map.get(word).addCount();
}
else map.put(word, new Point(word));
}
PriorityQueue<Point> pq = new PriorityQueue<>(map.values());
int count = 0;
List<String> res = new ArrayList<>();
while(!pq.isEmpty() && count < k) {
res.add(pq.poll().getStr());
count++;
}
return res;
}
}
'''
|
[
"[email protected]"
] | |
01651216a026d86c1a68fac21316efefe8e285b4
|
6b05bddf2e294c8e1b39846aecadfa06b4ff805d
|
/kubevirt/models/v1_secret_volume_source.py
|
a4149f175fdbc18ed8d07833b30451edf27ea370
|
[
"Apache-2.0"
] |
permissive
|
kubevirt/client-python
|
5ca82fe55d48c07f62796d2bed3605a7c189922c
|
235fe17f58d41165010be7e4122cb67bdc866fe7
|
refs/heads/master
| 2023-09-03T12:25:27.272479 | 2023-08-17T00:33:31 | 2023-08-17T00:33:31 | 105,017,761 | 29 | 25 |
Apache-2.0
| 2022-10-20T13:52:10 | 2017-09-27T12:51:32 |
Python
|
UTF-8
|
Python
| false | false | 5,318 |
py
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1SecretVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'optional': 'bool',
'secret_name': 'str',
'volume_label': 'str'
}
attribute_map = {
'optional': 'optional',
'secret_name': 'secretName',
'volume_label': 'volumeLabel'
}
def __init__(self, optional=None, secret_name=None, volume_label=None):
"""
V1SecretVolumeSource - a model defined in Swagger
"""
self._optional = None
self._secret_name = None
self._volume_label = None
if optional is not None:
self.optional = optional
if secret_name is not None:
self.secret_name = secret_name
if volume_label is not None:
self.volume_label = volume_label
@property
def optional(self):
"""
Gets the optional of this V1SecretVolumeSource.
Specify whether the Secret or it's keys must be defined
:return: The optional of this V1SecretVolumeSource.
:rtype: bool
"""
return self._optional
@optional.setter
def optional(self, optional):
"""
Sets the optional of this V1SecretVolumeSource.
Specify whether the Secret or it's keys must be defined
:param optional: The optional of this V1SecretVolumeSource.
:type: bool
"""
self._optional = optional
@property
def secret_name(self):
"""
Gets the secret_name of this V1SecretVolumeSource.
Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
:return: The secret_name of this V1SecretVolumeSource.
:rtype: str
"""
return self._secret_name
@secret_name.setter
def secret_name(self, secret_name):
"""
Sets the secret_name of this V1SecretVolumeSource.
Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
:param secret_name: The secret_name of this V1SecretVolumeSource.
:type: str
"""
self._secret_name = secret_name
@property
def volume_label(self):
"""
Gets the volume_label of this V1SecretVolumeSource.
The volume label of the resulting disk inside the VMI. Different bootstrapping mechanisms require different values. Typical values are \"cidata\" (cloud-init), \"config-2\" (cloud-init) or \"OEMDRV\" (kickstart).
:return: The volume_label of this V1SecretVolumeSource.
:rtype: str
"""
return self._volume_label
@volume_label.setter
def volume_label(self, volume_label):
"""
Sets the volume_label of this V1SecretVolumeSource.
The volume label of the resulting disk inside the VMI. Different bootstrapping mechanisms require different values. Typical values are \"cidata\" (cloud-init), \"config-2\" (cloud-init) or \"OEMDRV\" (kickstart).
:param volume_label: The volume_label of this V1SecretVolumeSource.
:type: str
"""
self._volume_label = volume_label
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1SecretVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"[email protected]"
] | |
08777ef56a0df912e73d6c15c9f138bd8b2e87c3
|
f4434c85e3814b6347f8f8099c081ed4af5678a5
|
/sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_recognize_pii_entities_async.py
|
7c580718d21294e4c46f62a5a71fbf2a0867ba92
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
yunhaoling/azure-sdk-for-python
|
5da12a174a37672ac6ed8e3c1f863cb77010a506
|
c4eb0ca1aadb76ad892114230473034830116362
|
refs/heads/master
| 2022-06-11T01:17:39.636461 | 2020-12-08T17:42:08 | 2020-12-08T17:42:08 | 177,675,796 | 1 | 0 |
MIT
| 2020-03-31T20:35:17 | 2019-03-25T22:43:40 |
Python
|
UTF-8
|
Python
| false | false | 4,031 |
py
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_recognize_pii_entities_async.py
DESCRIPTION:
This sample demonstrates how to recognize personally identifiable information in a batch of documents.
The endpoint recognize_pii_entities is only available for API version v3.1-preview and up.
In this sample, we will be working for a company that handles loan payments. To follow privacy guidelines,
we need to redact all of our information before we make it public.
USAGE:
python sample_recognize_pii_entities_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_TEXT_ANALYTICS_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_TEXT_ANALYTICS_KEY - your Text Analytics subscription key
"""
import os
import asyncio
class RecognizePiiEntitiesSampleAsync(object):
async def recognize_pii_entities_async(self):
print(
"In this sample we will be going through our customer's loan payment information and redacting "
"all PII (personally identifable information) before storing this information on our public website. "
"I'm also looking to explicitly extract the SSN information, so I can update my database with SSNs for "
"our customers"
)
# [START recognize_pii_entities_async]
from azure.core.credentials import AzureKeyCredential
from azure.ai.textanalytics.aio import TextAnalyticsClient
endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"]
key = os.environ["AZURE_TEXT_ANALYTICS_KEY"]
text_analytics_client = TextAnalyticsClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
documents = [
"""Parker Doe has repaid all of their loans as of 2020-04-25.
Their SSN is 859-98-0987. To contact them, use their phone number
555-555-5555. They are originally from Brazil and have Brazilian CPF number 998.214.865-68"""
]
async with text_analytics_client:
result = await text_analytics_client.recognize_pii_entities(documents)
docs = [doc for doc in result if not doc.is_error]
print(
"Let's compare the original document with the documents after redaction. "
"I also want to comb through all of the entities that got redacted"
)
for idx, doc in enumerate(docs):
print("Document text: {}".format(documents[idx]))
print("Redacted document text: {}".format(doc.redacted_text))
for entity in doc.entities:
print("...Entity '{}' with category '{}' got redacted".format(
entity.text, entity.category
))
# [END recognize_pii_entities_async]
print("All of the information that I expect to be redacted is!")
print(
"Now I want to explicitly extract SSN information to add to my user SSN database. "
"I also want to be fairly confident that what I'm storing is an SSN, so let's also "
"ensure that we're > 60% positive the entity is a SSN"
)
ssns = []
for doc in docs:
for entity in doc.entities:
if entity.category == 'U.S. Social Security Number (SSN)' and entity.confidence_score >= 0.6:
ssns.append(entity.text)
print("We have extracted the following SSNs as well: '{}'".format(
"', '".join(ssns)
))
async def main():
sample = RecognizePiiEntitiesSampleAsync()
await sample.recognize_pii_entities_async()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
[
"[email protected]"
] | |
414558f8f2f2f959546e50c46144100f193f178d
|
6d429c1bc185fc8180fc69f1d49fd781e9a90748
|
/appuser/codemanager.py
|
98381f12400d2cfb23c1cb65a163547d03f84290
|
[] |
no_license
|
FirayMa/store
|
6bc5d350da4170d0ef87d25748635cd1a32aa717
|
542a955451f78f9f904010383b1c661e2fbef471
|
refs/heads/master
| 2023-05-28T05:33:13.867339 | 2017-09-07T01:00:30 | 2017-09-07T01:00:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,766 |
py
|
from django.db import models
import pdb
import random
import string
from django.conf import settings
from common.e_mail import EmailEx
class CodeManager(models.Manager):
"""
验证码的manager
"""
email = EmailEx()
def send_code(self, email):
result={}
if not self.email.EMAIL_REGEX.match(email):
result['status'] = 1
result['msg'] = '电子邮件格式不正确'
else:
code = ''.join(random.choice(string.ascii_lowercase + string.digits) for i in range(4))
Subject = settings.PROJECTNAME+'注册邮箱验证'
content = '您好, 欢迎您注册, 欢迎加入我们, 您的邮箱验证码是: ' + code
try:
self.email.send_text_email(Subject, content, email)
try:
verify_code = self.model.objects.get(email__exact = email, type ='0')
verify_code.code = code
verify_code.save()
except self.model.DoesNotExist:
verify_code = self.model(email=email, code=code, type ='0')
verify_code.save()
result['status'] = 2
result['msg'] = '验证码已发至您的邮箱中, 请到邮箱中查看您的验证码!'
except Exception as e:
result['status'] = 3
result['msg'] = '发送邮件的过程中发生错误: '+ str(e)
return result
def veirfy_code(self, code, email):
try:
verify_code = self.model.objects.get(email__exact = email, code =code)
return True
except self.model.DoesNotExist:
return False
|
[
"[email protected]"
] | |
cbfdc2132564815458464e1f64c86110d7b3e056
|
db3d4aa39bc6b3f521ba21afbfedd8164a68e4d5
|
/asgiref/conformance_async.py
|
5aeeeeacffef2d0bc4747777b7306d1a0c04b24e
|
[
"BSD-3-Clause"
] |
permissive
|
EdwardBetts/asgiref
|
808c55b5089d9c9d8ae33583b0a1728a6313f930
|
0ad52409735109a85238b5b068c77c0f4e60e59e
|
refs/heads/master
| 2021-01-21T22:19:00.404420 | 2017-08-23T03:33:56 | 2017-08-23T03:33:56 | 102,147,619 | 0 | 0 | null | 2017-09-01T19:45:30 | 2017-09-01T19:45:30 | null |
UTF-8
|
Python
| false | false | 743 |
py
|
import asyncio
def test_receive_async(self):
"""
Tests that the asynchronous receive() method works.
"""
# Make sure we can run asyncio code
self.skip_if_no_extension("async")
try:
import asyncio
except ImportError:
raise unittest.SkipTest("No asyncio")
# Test that receive works
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
@asyncio.coroutine
def test():
self.channel_layer.send("test_async", {"is it": "working"})
channel, message = yield from self.channel_layer.receive_async(["test_async"])
self.assertEqual(channel, "test_async")
self.assertEqual(message, {"is it": "working"})
self.loop.run_until_complete(test())
|
[
"[email protected]"
] | |
de6ff1b606ca0939e9cc25ea37d7b88e7f76c315
|
b9b19792e1890b56679dc167fb99f9612af477f7
|
/deeppy/graph/nodes.py
|
17893ad9ede4ed472d8bf3fcd5e5d7a6a94a5bf0
|
[
"MIT"
] |
permissive
|
fullstackenviormentss/deeppy_experimental
|
7990674a8eda0655671940d3baf25256af8a384b
|
dc06e294e37a30340c7d02ac12c4d00653baf96c
|
refs/heads/master
| 2020-03-18T22:01:01.964338 | 2015-08-25T18:15:28 | 2015-08-25T18:15:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 667 |
py
|
from ..base import Model, ParamMixin, PickleMixin, PhaseMixin
class Node(PhaseMixin, PickleMixin):
def _setup(self, **shapes):
pass
def fprop(self, **arrays):
pass
def bprop(self, **arrays):
pass
def out_shapes(self, **shapes):
pass
class SupervisedBatch(Node):
def __init__(self):
self.name = 'input'
pass
def _setup(self, x_shape, y_shape):
pass
def fprop(self, x, y):
return {'samples': x, 'labels': y}
def bprop(self, samples_grad, labels_grad):
pass
def out_shapes(self, x_shape, y_shape):
return {'samples': x_shape, 'labels': y_shape}
|
[
"[email protected]"
] | |
98809dfea4ff4dba9a3ba0d6f49603d5b7cd8938
|
f1d67722dcd4c2209eedc0a61e5ea0ee27c95470
|
/examples/farmer/farmer_ama.py
|
00a79662b473eef48f1d277a7ec361a36bbfb408
|
[] |
no_license
|
wangcj05/mpi-sppy
|
08204019b466da5e0812b16dd5cb53da1bdbd793
|
42aff4c11dc42fcba8a9520da00e48c6e9ab7d85
|
refs/heads/main
| 2023-08-25T04:36:58.606490 | 2021-11-01T21:40:14 | 2021-11-01T21:40:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 951 |
py
|
# Copyright 2021 by B. Knueven, D. Mildebrath, C. Muir, J-P Watson, and D.L. Woodruff
# This software is distributed under the 3-clause BSD License.
"""
An example of using amalgomator and solving directly the EF
To execute this:
python farmer_ama.py --num-scens=10 --crops-multiplier=3 --farmer-with-integer
WARNING:
num-scens must be specified !
"""
import mpisppy.utils.amalgomator as amalgomator
def main():
solution_files = {"first_stage_solution":"farmer_first_stage.csv",
}
ama_options = {"EF-2stage": True, # We are solving directly the EF
"write_solution":solution_files}
#The module can be a local file
ama = amalgomator.from_module("afarmer", ama_options)
ama.run()
print("first_stage_solution=", ama.first_stage_solution)
print("inner bound=", ama.best_inner_bound)
print("outer bound=", ama.best_outer_bound)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
4ede039a5f8e824cee79fba2efaf8cbcedf0a1bc
|
11195ea809c363f834f3fb31eb7de26437e2eb53
|
/course3/reachability.py
|
a1a09b13ad880b57067f789a2d3918fe4ab64d7b
|
[
"MIT"
] |
permissive
|
ropable/algorithmic_toolbox
|
e8d517dbc00541ef10fdc8c3e586194ebbd1b30b
|
b4dcf4fda19c394da2baa6eced0732bf50585237
|
refs/heads/master
| 2021-09-09T12:15:37.378207 | 2018-03-16T01:58:41 | 2018-03-16T01:58:41 | 110,786,531 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,117 |
py
|
# python3
import sys
def reach(adj, x, y):
# Determine if x can reach y by exploring all of the nodes that x can reach.
visited = [False] * len(adj) # List of all the edges, and whether they have been visited.
return explore(adj, x, y, visited)
def explore(adj, x, y, visited):
# Explore each edge pair.
if x == y: # Nodes are the same: we've reached y.
return 1
visited[x] = True
for i in range(len(adj[x])):
if not visited[adj[x][i]]: # Recurse into each node of the pair.
if explore(adj, adj[x][i], y, visited):
return 1
return 0
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n, m = data[0:2] # No. of vertices and edges.
data = data[2:]
edges = list(zip(data[0:(2 * m):2], data[1:(2 * m):2]))
x, y = data[2 * m:] # u and v - is there a path between these?
x, y = x - 1, y - 1 # They are zero-indexed.
adj = [[] for _ in range(n)]
for (a, b) in edges:
adj[a - 1].append(b - 1)
adj[b - 1].append(a - 1)
print(reach(adj, x, y))
|
[
"[email protected]"
] | |
f4a6ff61bd09f097e3f78db368e0296793dad68d
|
f1e9f557c5d724dcabbfa17903de93bb82767e35
|
/py_opencv_playrtsp.py
|
48961e3539f940982eb4128f70fc2a9f5ce1a858
|
[] |
no_license
|
gregsheu/python
|
e5e9ff83dc0ce90541591e726c940e8a1f71a3d4
|
4a77295d58a522974ee85b201ab99cdbe410fd08
|
refs/heads/master
| 2023-08-18T08:30:15.611727 | 2023-08-08T06:55:44 | 2023-08-08T06:55:44 | 181,270,205 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 261 |
py
|
import cv2
import ffmpeg
import time
vcap = cv2.VideoCapture("rtsp://admin:[email protected]:554/cam/realmonitor?channel=1&subtype=0")
while(1):
ret, frame = vcap.read()
print(frame.tobytes())
cv2.imshow('channel2', frame)
cv2.waitKey(1)
|
[
"[email protected]"
] | |
c78e0f7af5816b19efcea2334f9803e925c03c0c
|
d25eebb25595c25b73fdc64447f7cf5998204b0d
|
/gtkApi/ReportEditor.py
|
ca6d3ae8746a0c2d9fb7a526f2f18423739f3bc5
|
[] |
no_license
|
BackupTheBerlios/baseui
|
a3867c0cc4aa30cf2a7b0dcaf9dbeec68dc5ef0b
|
a8296aa42f0de42c18f7dfb5d20966bad695709b
|
refs/heads/master
| 2021-01-15T22:28:52.114731 | 2012-12-05T16:31:03 | 2012-12-05T16:31:03 | 39,894,612 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,685 |
py
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
#===============================================================================
# ReportEditor module.
# by Mark Muzenhardt, published under LGPL-License.
#===============================================================================
import pygtk
pygtk.require('2.0')
import gtk
class ReportEditor:
def __init__(self):
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.set_title("Translation Editor")
window.connect("destroy", lambda w: gtk.main_quit())
vbox = gtk.VBox()
window.add(vbox)
toolbar = gtk.Toolbar()
vbox.pack_start(toolbar, expand=False, fill=True)
button_print = gtk.Button('Druck')
button_print.connect("clicked", self.on_button_print_clicked)
toolbar.add(button_print)
button_backward = gtk.Button('<-')
toolbar.add(button_backward)
button_forward = gtk.Button('->')
toolbar.add(button_forward)
button_cancel = gtk.Button('Abbruch')
button_cancel.connect("clicked", lambda w: gtk.main_quit())
toolbar.add(button_cancel)
label = gtk.Label('NIIX')
vbox.add(label)
window.show_all()
# This methods are doing the initial --------------------------------------
def on_button_print_clicked(self, widget=None, data=None):
pass
# Start the GTK mainloop ------------------------------------------------------
def main():
gtk.main()
return 0
if __name__ == "__main__":
ReportEditor()
main()
|
[
"devnull@localhost"
] |
devnull@localhost
|
97dc0dee0ef8ce0ada8c9102b035a98d5717adee
|
e0045eec29aab56212c00f9293a21eb3b4b9fe53
|
/account_voucher/__manifest__.py
|
34480401b13ad5043af7067acd03109289d910d1
|
[] |
no_license
|
tamam001/ALWAFI_P1
|
a3a9268081b9befc668a5f51c29ce5119434cc21
|
402ea8687c607fbcb5ba762c2020ebc4ee98e705
|
refs/heads/master
| 2020-05-18T08:16:50.583264 | 2019-04-30T14:43:46 | 2019-04-30T14:43:46 | 184,268,686 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,459 |
py
|
# -*- coding: utf-8 -*-
# Part of ALWAFI. See LICENSE file for full copyright and licensing details.
{
'name' : 'Sale & Purchase Vouchers',
'version' : '1.0',
'summary': 'Manage your debts and credits thanks to simple sale/purchase receipts',
'description': """
TODO
old description:
Invoicing & Payments by Accounting Voucher & Receipts
=====================================================
The specific and easy-to-use Invoicing system in ALWAFI allows you to keep track of your accounting, even when you are not an accountant. It provides an easy way to follow up on your vendors and customers.
You could use this simplified accounting in case you work with an (external) account to keep your books, and you still want to keep track of payments.
The Invoicing system includes receipts and vouchers (an easy way to keep track of sales and purchases). It also offers you an easy method of registering payments, without having to encode complete abstracts of account.
This module manages:
* Voucher Entry
* Voucher Receipt [Sales & Purchase]
* Voucher Payment [Customer & Vendors]
""",
'category': 'Accounting',
'sequence': 20,
'depends' : ['account'],
'demo' : [],
'data' : [
'security/ir.model.access.csv',
'views/account_voucher_views.xml',
'security/account_voucher_security.xml',
'data/account_voucher_data.xml',
],
'auto_install': False,
'installable': True,
}
|
[
"[email protected]"
] | |
b56d4fe821cd8462bbda70acd89752b0fbce8a74
|
7c91f92d2d82e0d9fd85af09f9d18226c747f7fa
|
/rhoci/forms/test.py
|
bb9d6fe3cf671e23e1b037366251aa9886986d9a
|
[
"Apache-2.0"
] |
permissive
|
bregman-arie/rhoci
|
5488afe8d884cb72a3475eef68ebc54944b45453
|
bae1f1d737a12ede50d263a6496faf2b698515b5
|
refs/heads/master
| 2023-02-25T10:53:01.642377 | 2022-12-10T14:37:40 | 2022-12-10T14:37:40 | 90,493,854 | 12 | 8 |
Apache-2.0
| 2023-02-16T07:11:11 | 2017-05-06T22:06:20 |
CSS
|
UTF-8
|
Python
| false | false | 1,117 |
py
|
# Copyright 2019 Arie Bregman
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from flask_wtf import FlaskForm
from wtforms import BooleanField
from wtforms import StringField
from wtforms import SubmitField
class TestSearch(FlaskForm):
class_name = StringField('class name')
test_name = StringField('test name')
status = StringField('status')
failed_since = StringField('failed since')
skipped_message = StringField('skipped message')
stdout = StringField('stdout')
stderr = StringField('stderr')
skipped = BooleanField()
submit = SubmitField('Search')
|
[
"[email protected]"
] | |
0fa2b8c8ec819233bc34543f46cd4fd13fe8509b
|
7d75018c3d8e2ac85ea0f5bbaf52ce5eae9761ca
|
/project/gohelp/settings.py
|
3bfa30ab59e9abf68240589b9a17501126484713
|
[] |
no_license
|
AVS18/sdp-sem5
|
fff484331d9b588558b928e557a974f05652adcb
|
238dcc7dfe50dda9678383590a43b23bbcd99553
|
refs/heads/main
| 2023-01-14T01:01:18.297711 | 2020-11-14T13:43:55 | 2020-11-14T13:43:55 | 288,098,284 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,850 |
py
|
"""
Django settings for gohelp project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6-rp4=_omlx$ya3@dms@a8jnpamp#$dl^y(bx!0ptji47ag!qk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'base',
'worker',
'customer',
'storages'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gohelp.urls'
AUTH_USER_MODEL = 'base.User'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gohelp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'gohelp',
'USER': 'postgres',
'PASSWORD': 'kamakshi@1234',
'HOST': 'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
import os
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'static'),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'assets')
AWS_ACCESS_KEY_ID = 'replace the credentials'
AWS_SECRET_ACCESS_KEY = "replace the credentials"
AWS_STORAGE_BUCKET_NAME = "gohelp"
AWS_S3_REGION_NAME = 'ap-south-1'
AWS_S3_FILE_OVERWRITE = False
AWS_DEFAULT_ACL = None
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER='[email protected]'
EMAIL_HOST_PASSWORD='aditya12345'
EMAIL_USE_TLS = True
|
[
"[email protected]"
] | |
4443aa6863038875ca5ad3372f122475c4993118
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2015_04_01/aio/_monitor_management_client.py
|
5640ee566505865cb91ec42008e9408f5e7a74d8
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 |
MIT
| 2023-09-14T21:48:49 | 2012-04-24T16:46:12 |
Python
|
UTF-8
|
Python
| false | false | 5,526 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models as _models
from ..._serialization import Deserializer, Serializer
from ._configuration import MonitorManagementClientConfiguration
from .operations import (
ActivityLogsOperations,
AlertRulesOperations,
AutoscaleSettingsOperations,
EventCategoriesOperations,
Operations,
TenantActivityLogsOperations,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class MonitorManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""Monitor Management Client.
:ivar activity_logs: ActivityLogsOperations operations
:vartype activity_logs: azure.mgmt.monitor.v2015_04_01.aio.operations.ActivityLogsOperations
:ivar autoscale_settings: AutoscaleSettingsOperations operations
:vartype autoscale_settings:
azure.mgmt.monitor.v2015_04_01.aio.operations.AutoscaleSettingsOperations
:ivar event_categories: EventCategoriesOperations operations
:vartype event_categories:
azure.mgmt.monitor.v2015_04_01.aio.operations.EventCategoriesOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.monitor.v2015_04_01.aio.operations.Operations
:ivar tenant_activity_logs: TenantActivityLogsOperations operations
:vartype tenant_activity_logs:
azure.mgmt.monitor.v2015_04_01.aio.operations.TenantActivityLogsOperations
:ivar alert_rules: AlertRulesOperations operations
:vartype alert_rules: azure.mgmt.monitor.v2015_04_01.aio.operations.AlertRulesOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = MonitorManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client: AsyncARMPipelineClient = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.activity_logs = ActivityLogsOperations(self._client, self._config, self._serialize, self._deserialize)
self.autoscale_settings = AutoscaleSettingsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.event_categories = EventCategoriesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.tenant_activity_logs = TenantActivityLogsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.alert_rules = AlertRulesOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "MonitorManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details: Any) -> None:
await self._client.__aexit__(*exc_details)
|
[
"[email protected]"
] | |
712dba93a2621c8f100b375020d6fe1a26b33587
|
155cfef4bb35d20bc574f63f3443039bfcc1ab7e
|
/srcs/mahjong/admin/admin/admin.py
|
dae8ebe48a3a7b3d835c15ac939a653d4622e62b
|
[] |
no_license
|
rolllyroman/fish_web
|
3116481a6a16484283f428eb7c98ecea7cee99d4
|
eb5a05ea3d56c7d9f599009e8ab6f4cb322e9023
|
refs/heads/master
| 2020-03-28T01:08:57.346228 | 2018-09-06T03:34:05 | 2018-09-06T03:34:05 | 147,480,922 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 840 |
py
|
#-*- coding:utf-8 -*-
#!/usr/bin/python
"""
Author:$Author$
Date:$Date$
Revision:$Revision$
Description:
后台APP应用入口
"""
from bottle import Bottle
from common.install_plugin import install_redis_plugin,install_session_plugin
admin_app = Bottle()
install_redis_plugin(admin_app)
install_session_plugin(admin_app)
import admin_index
import admin_auth
#会员模块
import admin_member
# 数据统计模块
import admin_statistics
# 个人信息模块
import admin_self
# 代理模块
import admin_agent
# 用户权限模块
import admin_power
#游戏模块
import admin_game
#订单模块
import admin_order
#商品模块
import admin_goods
#系统设置
import admin_setting
#消息设置
import admin_notic
#捕鱼模块
import admin_fish
#福利模块
import admin_benefit
'''
金币场模块
'''
import admin_gold
|
[
"[email protected]"
] | |
911744a0becf71a9d8142dc9e796c3949f6243a8
|
26c0f80688f75a188097a232c229a73c8e7cc6ed
|
/user/migrations/0016_auto_20210511_1700.py
|
c17235302b993169c5ae1b568f59d2271a6b2144
|
[] |
no_license
|
creep1g/DjangoWebstore
|
8207d7ea53c478fb7e5745e1c6ae6699102b5df5
|
bd27340b86bf2289b8c14216462d932ccdf4986d
|
refs/heads/main
| 2023-05-06T09:50:04.846489 | 2021-05-28T14:40:40 | 2021-05-28T14:40:40 | 371,730,158 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 444 |
py
|
# Generated by Django 3.2 on 2021-05-11 17:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user', '0015_auto_20210511_1655'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='searches',
field=models.ManyToManyField(null=True, to='user.SearchHistory'),
),
]
|
[
"[email protected]"
] | |
31c63484ece90ef1a58d4d8a1c917875e71e42ba
|
0729bc2e2236fadb8fb2eac8b30534d939a45b2e
|
/DistAnnot/Annot/tests.py
|
e0c741e72f672231d4fd71b9ee91a723a70a444e
|
[] |
no_license
|
JudoWill/pyMutF
|
8ecdc24fbb2efe2a0a721aab164a2b060de11832
|
aaf41ab41eb897c10a721c62913bb49c79f2cefc
|
refs/heads/master
| 2021-01-16T20:34:06.705933 | 2010-10-11T16:55:08 | 2010-10-11T16:55:08 | 710,208 | 8 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 535 |
py
|
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
from django.core.urlresolvers import reverse
from DistAnnot.Interaction.models import *
from forms import AnnotForm, InteractionEffectForm
from django.forms.formsets import formset_factory
class SimpleTest(TestCase):
fixtures = ['Interaction.simple_data.yaml']
|
[
"[email protected]"
] | |
37321511f55b483428e71701554e9e17bf1df771
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_7/hrnali002/question1.py
|
bddd72a0b19d90ef62d339aa08a5e015b73c2dc2
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 593 |
py
|
"""A program to print a list with not duplicated words
Alison Hoernle
HRNALI002
27 April 2014"""
# get input and convert to a list
list = []
strings = input("Enter strings (end with DONE):\n")
while strings != "DONE":
list.append(strings)
strings = input()
print()
print("Unique list:")
# create an empty string and then go through list. Add each word to empty string and if in string already then don't print that word again
counted_words = ''
for string in list:
if string in counted_words:
continue
else:
print(string)
counted_words += string
|
[
"[email protected]"
] | |
f3a2ad5c32de8876caeae5f5f9095fdd0ef824c5
|
400c569b19d003d0b9d1b31bc1b698ae510cbc46
|
/Celestial classification/models.py
|
d4b60dffc8e997aebb887787f6bf21975ed96fb3
|
[] |
no_license
|
as950118/dacon
|
05a203ab36375a69549ac39ba3b02a90431c860a
|
a1489a55a7a53a755d6cf50081522bd7c1c48b4f
|
refs/heads/master
| 2021-02-13T20:06:38.169482 | 2020-03-03T19:51:51 | 2020-03-03T19:51:51 | 244,727,899 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,054 |
py
|
import pandas as pd
from sklearn.model_selection import train_test_split
from catboost import CatBoostClassifier
from lightgbm import LGBMClassifier
from xgboost import XGBClassifier
from data_processing import DataProcessing
random_seed = 0
train_data_path = "./data/train.csv"
test_data_path = "./data/test.csv"
sample_submission_data_path = "./data/sample_submission.csv"
data_processing = DataProcessing(train_data_path, test_data_path, sample_submission_data_path)
train_data, test_data, sample_submission_data = data_processing.load_file()
x_train, x_valid, y_train, y_valid = data_processing.set_data(train_data, test_data)
'''
# catboost
cat_clf = CatBoostClassifier(iterations = 20000, random_state = random_seed, task_type="GPU")
cat_clf.fit(x_train, y_train, eval_set = [(x_train, y_train), (x_valid, y_valid)])
cat_pred = cat_clf.predict_proba(test_data)
submission = pd.DataFrame(data=cat_pred, columns=sample_submission_data.columns, index=sample_submission_data.index)
submission.to_csv('./results/cat_boost2.csv', index=True)
'''
# lgbm
#lgbm_clf = LGBMClassifier(n_estimators = 1000, n_jobs=-1, random_state = random_seed, device = 'gpu')
lgbm_clf = LGBMClassifier(n_estimators = 1000, n_jobs=-1, random_state = random_seed)
lgbm_clf.fit(x_train, y_train, eval_set = [(x_train, y_train), (x_valid, y_valid)])
lgbm_pred = lgbm_clf.predict_proba(test_data)
submission = pd.DataFrame(data=lgbm_pred, columns=sample_submission_data.columns, index=sample_submission_data.index)
submission.to_csv('./results/light_gbm2.csv', index=True)
# xgboost
#xgb_clf = XGBClassifier(n_estimators = 1000, n_jobs=-1, random_state=random_seed, tree_method='gpu_exact')
xgb_clf = XGBClassifier(n_estimators = 1000, n_jobs=-1, random_state=random_seed)
xgb_clf.fit(x_train, y_train, eval_set = [(x_train, y_train), (x_valid, y_valid)])
xgb_pred = xgb_clf.predict_proba(test_data)
submission = pd.DataFrame(data=xgb_pred, columns=sample_submission_data.columns, index=sample_submission_data.index)
submission.to_csv('./results/xg_boost2.csv', index=True)
|
[
"[email protected]"
] | |
3994ec01676f94e3b0ed9d34c4e51522f1548082
|
6b3ec47ee410a7d2ed2102cc5bcfa13c7a6342e2
|
/bin/easy_install-3.6
|
5d6f8c4e10d68c760d508456eeaaa31b7e59754b
|
[] |
no_license
|
makkar-nishant123/Refermeframework
|
fddb912304bdb4ffe3e169fda2d60b4171d8b6c1
|
a152f42f6ab63c037bf3f117aa5be1ceb3a1d178
|
refs/heads/master
| 2020-05-15T23:29:18.684101 | 2019-04-28T17:31:22 | 2019-04-28T17:31:22 | 182,555,118 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 460 |
6
|
#!/Users/nishantmakkar/PycharmProjects/RefermeFramework/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.6')()
)
|
[
"[email protected]"
] | |
f529c2813ffd27be60a2c246cf2853fcf650896f
|
78912badbaa634d84a93ac03872f18b3f14092a0
|
/photosorter-readbuckets.py
|
21e4410b93a348af18e57021e9ae46609456fa81
|
[] |
no_license
|
mperry8889/photosorter
|
fc556054ce2af1a50c91c585c80eb6d65ff23f4f
|
d20c7a51a6e0e7aef4e4eb9260a344d54c52e539
|
refs/heads/master
| 2021-05-29T06:55:32.482767 | 2011-05-08T17:04:59 | 2011-05-08T17:04:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 442 |
py
|
#!/usr/bin/env python
from photosorter import PhotoSorter
from photosorter import Bucket
from photosorter import Photo
if __name__ == "__main__":
p = PhotoSorter()
for bucket in p.buckets:
for state in ["during", "after", "before", "unknown", "unsorted"]:
for photo in getattr(bucket, state):
print "%s %s %s %s %s" % (state, bucket.year, photo.filename, photo.rotation, photo.flip_horizontal)
|
[
"none@none"
] |
none@none
|
da8775e18d3b0e6f3cfa5b7ce00126f7f11d9688
|
b819632a899cc4919c4efb097b87009a9d07d209
|
/testbed_nodel11_vm_container.py
|
a54514a0093d7fb87304a63cdeb2ee24793ed008
|
[] |
no_license
|
NuthanChandra/ctools
|
bb2570786d9b1a584c5b08800f48b02ed8664480
|
bcb967c53375104e32b32c8f0d2b3ca25ed69e49
|
refs/heads/master
| 2022-11-28T04:25:30.092129 | 2020-04-14T12:38:27 | 2020-04-14T12:38:27 | 255,604,269 | 1 | 1 | null | 2020-07-23T16:29:45 | 2020-04-14T12:34:11 |
Python
|
UTF-8
|
Python
| false | false | 4,050 |
py
|
from fabric.api import env
import os
host1 = '[email protected]'
host2 = '[email protected]'
host3 = '[email protected]'
host4 = '[email protected]'
host5 = '[email protected]'
host6 = '[email protected]'
kvm_nodel12 = '10.204.216.114'
ext_routers = [('hooper','10.204.217.240')]
router_asn = 64512
public_vn_rtgt = 2225
public_vn_subnet = '10.204.221.160/28'
host_build = '[email protected]'
{env_roledefs}
#env.roledefs = {
# 'all': [host1,host2,host3,host4,host5,host6],
# 'cfgm': [host1, host2],
# 'webui': [host1],
# 'openstack': [host1],
# 'control': [host2, host3],
# 'collector': [host1],
# 'database': [host1, host2, host3],
# 'compute': [host4, host5, host6],
# 'build': [host_build]
#}
env.physical_routers={
'hooper' : { 'vendor': 'juniper',
'model' : 'mx',
'asn' : '64512',
'name' : 'hooper',
'ssh_username' : 'root',
'ssh_password' : 'c0ntrail123',
'mgmt_ip' : '10.204.217.240',
}
}
env.hostnames = {
'all': ['nodel12-vm1', 'nodel12-vm2', 'nodel12-vm3', 'nodel12-vm4', 'nodel12-vm5', 'nodel12-vm6']
}
env.openstack_admin_password = 'contrail123'
env.password = 'c0ntrail123'
env.passwords = {
host1: 'c0ntrail123',
host2: 'c0ntrail123',
host3: 'c0ntrail123',
host4: 'c0ntrail123',
host5: 'c0ntrail123',
host6: 'c0ntrail123',
host_build: 'stack@123',
}
reimage_param = 'ubuntu-14.04.5'
vm_node_details = {
'default': {
'image_dest' : '/mnt/disk1/images/',
'ram' : '32768',
'server': kvm_nodel12,
'vcpus' : '4',
'disk_format' : 'qcow2',
'image_source' : 'http://10.204.217.158/images/node_vm_images/%s-256G.img.gz' % (reimage_param),
},
host1 : {
'name' : 'nodel12-vm1',
'network' : [{'bridge' : 'br1', 'mac':'52:53:59:01:00:01'}
],
},
host2 : { 'name' : 'nodel12-vm2',
'network' : [{'bridge' : 'br1', 'mac':'52:53:59:01:00:02'}
]
},
host3 : { 'name' : 'nodel12-vm3',
'network' : [{'bridge' : 'br1', 'mac':'52:53:59:01:00:03'}
]
},
host4 : { 'name' : 'nodel12-vm4',
'network' : [{'bridge' : 'br1', 'mac':'52:53:59:01:00:04'}
]
},
host5 : { 'name' : 'nodel12-vm5',
'network' : [{'bridge' : 'br1', 'mac':'52:53:59:01:00:05'}
]
},
host6 : { 'name' : 'nodel12-vm6',
'network' : [{'bridge' : 'br1', 'mac':'52:53:59:01:00:06'}
]
}
}
env.keystone = {'admin_password': 'c0ntrail123'}
env.openstack = {'manage_amqp': "true"}
minimum_diskGB=32
env.kernel_upgrade=False
env.rsyslog_params = {'port':19876, 'proto':'tcp', 'collector':'dynamic', 'status':'enable'}
env.test_repo_dir='/home/stack/multi_interface_parallel/centos65/icehouse/contrail-test'
env.mail_from='[email protected]'
env.mail_to='[email protected]'
multi_tenancy=True
env.interface_rename = True
env.enable_lbaas = True
enable_ceilometer = True
ceilometer_polling_interval = 60
env.encap_priority = "'VXLAN','MPLSoUDP','MPLSoGRE'"
env.log_scenario='Multi-Node Nodel12 Contrainer Sanity[mgmt, ctrl=data]'
env.ntp_server = '10.204.217.158'
|
[
"[email protected]"
] | |
8f377dbae4bdfac6f266dec47f88176f4f0e1eca
|
b50f07920a48df36c5303e6bbd35ff1eafbece16
|
/jms/expression.py
|
0668bf0f294aef112c6ee929ab72cafc5af0faa2
|
[] |
no_license
|
principia12/project_re
|
ed21cd369412d440ae794fd7ff422400988be5e3
|
d165026e08cd1efd27ed9a0147aaf790f9374916
|
refs/heads/master
| 2020-08-27T19:39:08.872522 | 2019-11-07T09:31:04 | 2019-11-07T09:31:04 | 217,472,878 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,657 |
py
|
from abc import ABC, abstractmethod
from .common import ConsumeFailException, is_valid_char, is_whitespace, is_word_char, is_numeric
from .tokenizer import TokenType
class Expr(ABC):
@abstractmethod
def consume(self, text, idx):
pass
@classmethod
def from_token(cls, token):
if token.token_type == TokenType.CHAR:
return Term(token.value)
elif token.token_type == TokenType.ANCHOR_CHAR:
return AnchorTerm(token.value)
elif token.token_type in [TokenType.CLASS_CHAR, TokenType.WILDCARD_CHAR]:
return ClassTerm(token.value)
else:
raise ValueError()
@classmethod
def with_and(cls, exprs):
return AndExpr(exprs)
@classmethod
def with_or(cls, exprs):
return OrExpr(exprs)
@staticmethod
def get_char(text, idx):
if idx >= len(text):
raise ConsumeFailException()
return text[idx]
class EmptyTerm(Expr):
def consume(self, text, idx):
return idx
class Term(Expr):
def __init__(self, c):
self.c = c
def consume(self, text, idx):
c = self.get_char(text, idx)
if c == self.c:
return idx + 1
else:
raise ConsumeFailException()
class AnchorTerm(Expr):
check_funcs = {
'^': lambda text, idx: idx == 0,
'$': lambda text, idx: idx == len(text)
}
def __init__(self, c):
self.check_func = self.check_funcs[c]
def consume(self, text, idx):
if self.check_func(text, idx):
return idx
else:
raise ConsumeFailException()
class ClassTerm(Expr):
check_funcs = {
'.': is_valid_char,
'd': is_numeric,
'w': is_word_char,
's': is_whitespace,
}
def __init__(self, c: str):
self.positive = c == '.' or c.islower()
self.check_func = self.check_funcs[c.lower()]
def consume(self, text, idx):
c = self.get_char(text, idx)
if self.check_func(c) == self.positive:
return idx + 1
else:
raise ConsumeFailException()
class AndExpr(Expr):
def __init__(self, exprs):
self.exprs = exprs
def consume(self, text, idx):
for expr in self.exprs:
idx = expr.consume(text, idx)
return idx
class OrExpr(Expr):
def __init__(self, exprs):
self.exprs = exprs
def consume(self, text, idx):
for expr in self.exprs:
try:
return expr.consume(text, idx)
except ConsumeFailException:
pass
raise ConsumeFailException()
|
[
"[email protected]"
] | |
75d3392dc40e06676c640968578a29a6e4230e6b
|
1e139784a36ce2a26dafaac0bb795b168ca91776
|
/electron_project/abstract/migrations/0003_delete_workeraccount.py
|
bda3728b90ddb267ad2ad6addfa863d7ca628b2e
|
[] |
no_license
|
TestAccount2077/Mas-Electronics
|
a9f4431be7ea740b99616cb4ce4acf9bba46096f
|
6bb887805900affdcd905deb33b341892bebd41f
|
refs/heads/master
| 2020-03-28T15:11:57.044686 | 2019-01-26T16:01:55 | 2019-01-26T16:01:55 | 148,566,318 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 363 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-10-20 04:57
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('abstract', '0002_workeraccount'),
]
operations = [
migrations.DeleteModel(
name='WorkerAccount',
),
]
|
[
"[email protected]"
] | |
c82c8b8c6b31aa7a2fbea0a59b5f32fd34dcd6e1
|
d9f4400d47e0ce914be636698365e26f836b766c
|
/apps/screen/urls.py
|
71d2c6af754bfffc99bcf77e2a85e73445a03918
|
[] |
no_license
|
otonelunico/prevex
|
deffc3cfd82354b20e61ac636b2b7fb4dd48d360
|
e32efb317a05031a5e0c454d3343748ea7ff534e
|
refs/heads/master
| 2021-01-22T08:39:26.389747 | 2017-06-24T20:52:27 | 2017-06-24T20:52:27 | 92,628,288 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 481 |
py
|
from django.conf.urls import url, include
from apps.screen.views import index, Settings, Prevent_, Video_
from django.contrib.auth.decorators import login_required
urlpatterns = [
url(r'^$', index, name='index'),
url(r'^settings/$', login_required(Settings), name="settings"),
url(r'^prevent/(?P<funct>\w+)/(?P<type>\d+)/(?P<id>\d+)$', login_required(Prevent_), name="prevent"),
url(r'^video/(?P<funct>\w+)/(?P<type>\d+)$', login_required(Video_), name="video"),
]
|
[
"[email protected]"
] | |
734a353a9b4a5f50f3a72adeae60c79376b0e30d
|
e82245a9e623ef3e2b4b9c02f0fd932c608c4484
|
/firecode.io/08-find_the_transpose_of_a_square_matrix.py
|
3d2ba5c22dcb19e7aba1339638498c7d1921455a
|
[] |
no_license
|
Zylophone/Programming-for-Sport
|
33e8161028cfddce3b7a1243eb092070107342e3
|
193d6184f939303d8661f68d6fd06bdec95df351
|
refs/heads/master
| 2020-06-16T23:11:44.719286 | 2017-05-21T17:10:46 | 2017-05-21T17:10:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 450 |
py
|
'''
Args:
- matrix (list of lists)
- a square matrix
Modifies:
- arg into its transpose in-place
Returns:
- nothing (None)
Complexity:
- O(n^2) time
- O(1) extra space, in-place
'''
def transpose_matrix(matrix):
if matrix is None:
return None
n= len(matrix)
for i in range(n):
for j in range(i):
matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]
|
[
"[email protected]"
] | |
5db1fbf5131e9fcb3b1160d38c497df02b701c2d
|
12a5b72982291ac7c074210afc2c9dfe2c389709
|
/online_judges/Codeforces/113/A/code.py
|
6a269bbf442e5a4f164b88db14eb1cdb942cc845
|
[] |
no_license
|
krantirk/Algorithms-and-code-for-competitive-programming.
|
9b8c214758024daa246a1203e8f863fc76cfe847
|
dcf29bf976024a9d1873eadc192ed59d25db968d
|
refs/heads/master
| 2020-09-22T08:35:19.352751 | 2019-05-21T11:56:39 | 2019-05-21T11:56:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 270 |
py
|
s = ["lios","liala","etr","etra","initis","inites"]
input_string = raw_input().split()
answer = True
for e in input_string:
flag = False
for k in s:
if e.endswith(k):
flag = True
if not flag:
answer = False
break
if (answer): print "YES"
else: print "NO"
|
[
"[email protected]"
] | |
653c6ee77e55fe39bf26522d6e3e04161daa0ce3
|
022104aa2456429356bdd26c701a2949381a83cf
|
/install/lib/python2.7/dist-packages/robotnik_msgs/msg/_SetElevatorFeedback.py
|
fe9731c70b42e53a2afd11197435c3aea3f8e08d
|
[] |
no_license
|
nachocz/campero_ws
|
204f313d5fbdb81d1f7cc568341a1170ddd2b4cf
|
f2b09f96165166c0e867e3f5f3dcd092dbac1c1b
|
refs/heads/master
| 2023-02-02T03:25:56.603172 | 2020-12-11T11:28:42 | 2020-12-11T11:28:42 | 320,539,897 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,506 |
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from robotnik_msgs/SetElevatorFeedback.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import robotnik_msgs.msg
class SetElevatorFeedback(genpy.Message):
_md5sum = "47e3f709643220443260a9d8c1f901ea"
_type = "robotnik_msgs/SetElevatorFeedback"
_has_header = False # flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
robotnik_msgs/ElevatorStatus status
================================================================================
MSG: robotnik_msgs/ElevatorStatus
# state
string RAISING=raising
string LOWERING=lowering
string IDLE=idle
string ERROR_G_IO=error_getting_io
string ERROR_S_IO=error_setting_io
string ERROR_TIMEOUT=error_timeout_in_action
# position
string UP=up
string DOWN=down
string UNKNOWN=unknown
# IDLE, RAISING, LOWERING
string state
# UP, DOWN, UNKNOWN
string position
float32 height
"""
__slots__ = ['status']
_slot_types = ['robotnik_msgs/ElevatorStatus']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
status
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SetElevatorFeedback, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.status is None:
self.status = robotnik_msgs.msg.ElevatorStatus()
else:
self.status = robotnik_msgs.msg.ElevatorStatus()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.status.state
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.status.position
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.status.height
buff.write(_get_struct_f().pack(_x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.status is None:
self.status = robotnik_msgs.msg.ElevatorStatus()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.state = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status.state = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.position = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status.position = str[start:end]
start = end
end += 4
(self.status.height,) = _get_struct_f().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.status.state
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.status.position
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.status.height
buff.write(_get_struct_f().pack(_x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.status is None:
self.status = robotnik_msgs.msg.ElevatorStatus()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.state = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status.state = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.position = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status.position = str[start:end]
start = end
end += 4
(self.status.height,) = _get_struct_f().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_f = None
def _get_struct_f():
global _struct_f
if _struct_f is None:
_struct_f = struct.Struct("<f")
return _struct_f
|
[
"[email protected]"
] | |
a67d6079a5eec64bc07497534737ee8ef949dd51
|
3ab1f37b4372d0796c85ef24343dd8c03accb6ef
|
/CoinBase/ConnectFour.py
|
aa65bc2e3204e93c5a8b26877efbaf25d28eb2c3
|
[] |
no_license
|
Blossomyyh/leetcode
|
2be6a99534801fc59fe9551317ca49c3704b1c3d
|
38615779eb43d147587467e11dc22761ac0726cb
|
refs/heads/master
| 2023-01-22T16:56:26.624677 | 2020-11-20T13:47:43 | 2020-11-20T13:47:43 | 266,845,278 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,135 |
py
|
"""
Connect 4
use get column and line and diagonals to find wins
4 ->wins
https://codereview.stackexchange.com/questions/225840/a-simple-connect-4-game-in-python
https://github.com/KeithGalli/Connect4-Python/blob/master/connect4.py
Better solution:
focus on the current move's row and col! to check wins
"""
TEAM1 = 1
TEAM2 = 2
class connect4:
def __init__(self, row=6, col=7):
self.row = row
self.col = col
# generate empty 6*6 board
self.board = [[0]*self.col for _ in range(self.row)]
self.rows =[]
self.count = self.row * self.col
# one situation- 4positions -> 0; team1+1, team2-1 4/-4--> win
def returnboard(self):
for i in range(self.row):
print(self.board[i])
return
def checkwins(self, team):
# n*m --> Time O(4*N*M)
# horizontally
for r in range(self.row):
for c in range(self.col - 3):
if self.board[r][c] == team and self.board[r][c+1]== team and self.board[r][c+2]== team and self.board[r][c+3]== team:
return True
# vertically
for r in range(self.row - 3):
for c in range(self.col):
if self.board[r][c] == team and self.board[r+1][c] == team and self.board[r+2][c] == team and self.board[r+3][c] == team:
return True
# diagonally
for r in range(self.row -3):
for c in range(self.col - 3):
if self.board[r][c] == team and self.board[r+1][c+1]== team and self.board[r+2][c+2]== team and self.board[r+3][c+3] == team:
return True
# anti-diagonally
for r in range(3, self.row):
for c in range(self.col - 3):
if self.board[r][c] == team and self.board[r-1][c+1] == team and self.board[r-2][c+2] == team and self.board[r-3][c+3] == team:
return True
return False
def checkcolumn(self, col):
# check whether the current column can make move
return 0 in [i[col] for i in self.board]
def checkend(self, rounds):
# check all the element are filled
print("The end of the game! ")
return rounds > self.count
def makemove(self, team, col):
# col is valid here
i = self.row -1
# check from bottom until find the empty position
while self.board[i][col] != 0:
i -= 1
self.board[i][col] = team
print(str(team)+" move at col: " +str(col))
self.returnboard()
if self.checkwins(team):
print("Team "+str(team)+ " WIN !")
return True
return False
import random
if __name__ == "__main__":
game = connect4()
game.returnboard()
rounds = 1
win = False
while not win and not game.checkend(rounds):
team = rounds % 2 + 1
# generate a random number 0-6
colidx = random.randrange(7)
while not game.checkcolumn(colidx):
colidx = random.randrange(7)
win = game.makemove(team, colidx)
rounds += 1
game.returnboard()
|
[
"[email protected]"
] | |
339700e4ddf899c0336dd7012c4c6385c8eb3cbb
|
9716a77ef1d0ba5ef9a61be04f6229494744d5d5
|
/chapter06 정렬/위에서 아래로.py
|
158d19ce05bdd45aaedf7a1c03c28402fb6a8ac5
|
[] |
no_license
|
korea-space-codingmonster/Algorithm_Study
|
98b00c81839cf8ac8365d3982c25650a21226ce9
|
8c92857e458994a2d1d77dc3ea0d4b645b8b6a4b
|
refs/heads/main
| 2023-06-03T20:00:52.915447 | 2021-06-20T05:51:47 | 2021-06-20T05:51:47 | 329,354,196 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 712 |
py
|
# 하나의 주열에는 다양한 수가 존재한다. 이러한 수의 크기에 상관없이 나열되어 있다.
# 이 수를 큰 수부터 작은 수의 순서로 정렬해야한다. 수열을 내림차순으로 정렬하는 프로그램을 만드시오.
# 입력조건
# 첫째 줄에 수열에 속해 있는 수의 개수 N이 주어진다.(1 < N <= 500)
# 둘째 줄부터 N + 1번째 줄까지 N개의 수가 입려된다. 수의 범위는 1이상 100000이하의 자연수이다.
# 입력예시
# 3
# 15
# 27
# 12
# 출력예시
# 27 15 12
n = int(input())
array = []
for i in range(n):
array.append(int(input()))
array = sorted(array, reverse = True)
for i in array:
print(i, end = ' ')
|
[
"[email protected]"
] | |
0eb8c05f44ce6192a839496e20dd39bbaf464182
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/artificial/transf_RelativeDifference/trend_Lag1Trend/cycle_5/ar_/test_artificial_1024_RelativeDifference_Lag1Trend_5__100.py
|
f07285216a3cc7ff2114694287b967c65119eace
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 |
BSD-3-Clause
| 2023-03-08T21:45:40 | 2016-10-13T09:30:30 |
Python
|
UTF-8
|
Python
| false | false | 275 |
py
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 5, transform = "RelativeDifference", sigma = 0.0, exog_count = 100, ar_order = 0);
|
[
"[email protected]"
] | |
24b6342cfd9d6f470842e7b811d8251cdbf6b932
|
c85a6d674679780ee510b5c8c3dbcbdecc859f64
|
/test/test_alert_config.py
|
b935b925e2b7e72d3f4f6959ec9d5a61a7aa6c8d
|
[] |
no_license
|
cbrowet-axway/APIM_sdk
|
d4f4a124e86a7b2e65d0ef07b54c68e95de68337
|
4f82df67ebe3dd6eae645bab8f86e72c0347ee24
|
refs/heads/master
| 2020-05-25T13:22:35.802350 | 2020-04-16T09:25:21 | 2020-04-16T09:25:21 | 187,820,389 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 944 |
py
|
# coding: utf-8
"""
API Manager API v1.3
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.3.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.alert_config import AlertConfig # noqa: E501
from swagger_client.rest import ApiException
class TestAlertConfig(unittest.TestCase):
"""AlertConfig unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAlertConfig(self):
"""Test AlertConfig"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.alert_config.AlertConfig() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
f12c7e78f6cc76322a20f97f04b8731c60d73ac0
|
5474905a26e356fe2742e62567718173b81b616d
|
/templates/python.flask/{{cookiecutter.project_safe_name}}/test/test_demo.py
|
52e109dcd4a6b06c2716df9480549aeac5797cf5
|
[
"MIT"
] |
permissive
|
by46/recipe
|
16dd24a8a83f2a00beab84c5b6522c0bff073233
|
203abd2141a536b66b4e57d073169a49395be1f0
|
refs/heads/master
| 2020-04-13T22:41:27.865516 | 2016-09-09T10:09:20 | 2016-09-09T10:09:20 | 65,368,302 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 746 |
py
|
import unittest
import app
from app import create_app
class HelloWorldTestCase(unittest.TestCase):
def setUp(self):
self.client = create_app('test').test_client()
def test_hello_world(self):
response = self.client.get('/{{cookiecutter.project_slug}}', follow_redirects=True)
self.assertTrue('The Art of Computer Programming' in response.data)
def test_version(self):
response = self.client.get('/{{cookiecutter.project_slug}}/version', follow_redirects=True)
self.assertTrue(app.__version__ in response.data)
def test_faq(self):
response = self.client.get('/{{cookiecutter.project_slug}}/faq.htm')
self.assertEqual('<!--Newegg-->', response.data)
|
[
"[email protected]"
] | |
9106c5d1a7b95165084dd263e4a7421c8030d12e
|
e95eeb123f3772da8d2dc7677e7afdc1287f1276
|
/bot.py
|
a1f32743e20491dec9bd1bd4b5ce00cf5bfb409e
|
[
"MIT"
] |
permissive
|
jayrav13/presidency
|
3cf880bf51d211f8fb21d5c4bc564f22c2a8ae4f
|
f18721d5df9af161cc01f503b6657d9b06fea0e9
|
refs/heads/master
| 2022-09-29T22:53:40.867506 | 2017-03-03T18:57:04 | 2017-03-03T18:57:04 | 72,818,604 | 15 | 2 |
MIT
| 2022-09-16T17:46:42 | 2016-11-04T05:56:54 |
Python
|
UTF-8
|
Python
| false | false | 5,501 |
py
|
"""
Imports
"""
from presidency.models import *
from lxml import html
import requests
import json
import datetime
from twython import Twython
import os
import time
import sys
"""
Set UTF-8 for everything.
"""
reload(sys)
sys.setdefaultencoding("utf-8")
# Establish Base URL.
base_url = os.environ.get('WHITE_HOUSE_URL') + ""
# Establish all pages to scrape.
pages = {
"/briefing-room/speeches-and-remarks": "Speeches and Remarks",
"/briefing-room/press-briefings": "Press Briefings",
"/briefing-room/statements-and-releases": "Statements and Releases",
"/briefing-room/presidential-actions/executive-orders": "Executive Orders",
"/briefing-room/presidential-actions/presidential-memoranda": "Presidential Memoranda",
"/briefing-room/presidential-actions/proclamations": "Proclamations",
"/briefing-room/presidential-actions/related-omb-material": "Related OMB Material",
# "/briefing-room/pending-legislation": "Pending Legislation",
# "/briefing-room/signed-legislation": "Signed Legislation",
# "/briefing-room/vetoed-legislation": "Vetoed Legislation",
"/briefing-room/statements-administration-policy": "Statements of Administration Policy"
}
# Scrape each page.
for key, value in pages.iteritems():
print("Scanning " + value)
# Make request and transform into tree.
page_url = base_url + key
response = requests.get(page_url)
tree = html.document_fromstring(response.text)
# Deterimine number of total pages.
pagecount = int(tree.xpath('//li[@class="pager-current"]')[0].text_content().split(' of ')[1]) if len(tree.xpath('//li[@class="pager-current"]')) > 0 else 1
# Keep iterating through pages until you reach a page that has been fully scraped. Then stop.
for i in range(0, pagecount):
# Use ?page= parameter to scrape, starting with page 0.
response = requests.get(page_url)
print("PAGE URL: " + page_url)
tree = html.document_fromstring(response.text)
# Build the resulting dictionary objects for each document on that page.
objects = [{
"document_date": x.xpath('div[contains(@class, "views-field-created")]')[0].text_content().strip() if len(x.xpath('div[contains(@class, "views-field-created")]')) > 0 else x.xpath('div')[0].text_content().split(' on ')[1],
"title": x.xpath('div[contains(@class, "views-field-title")]')[0].text_content().strip(),
"uri": x.xpath('div[contains(@class, "views-field-title")]')[0].xpath('h3')[0].xpath('a')[0].attrib['href'].strip(),
"category_slug": key,
"category_name": value,
"full_url": os.environ.get('WHITE_HOUSE_URL') + x.xpath('div[contains(@class, "views-field-title")]')[0].xpath('h3')[0].xpath('a')[0].attrib['href'].strip()
} for x in tree.xpath('//div[contains(@class, "views-row")]')]
# Add url's to object.
for i in range(0, len(objects)):
url = requests.post('https://www.googleapis.com/urlshortener/v1/url?key=' + os.environ.get('GOOGLE_URL_SHORTENER_API_KEY'), json={"longUrl": os.environ.get('WHITE_HOUSE_URL') + objects[i]['uri']})
if url.status_code == 200:
objects[i]['short_url'] = url.json()['id']
else:
objects[i]['short_url'] = objects[i]['short_url']
# Create database objects for all of these.
records = [WhiteHouse(x['title'], x['uri'], x['category_slug'], x['category_name'], x['document_date'], x['full_url'], x['short_url']) for x in objects]
# Track number of records successfully added. Those not added will be duplicates.
record_counter = 0
# Iterate through records.
for x in records:
# Attempt to persist.
try:
db.session.add(x)
db.session.commit()
record_counter = record_counter + 1
print("Added " + x.title + " successfully.")
# Fallback,
except Exception as e:
# Flush old commit that did not persist.
db.session.rollback()
# Try to save an error message.
"""
try:
db.session.add(Error(str(e)))
db.session.commit()
except:
db.session.rollback()
"""
print("Failed to add " + x.title + " successfully: " + str(e))
# If 0 records were added to the database, everything henceforth is old in this topic.
# Break, go to next slug.
pager = tree.xpath('//li[contains(@class, "pager-next")]')
try:
print(pager[0].xpath('a')[0].attrib['href'])
page_url = base_url + pager[0].xpath('a')[0].attrib['href']
except:
pass
# Retrieve all documents in descending order.
documents = WhiteHouse.query.filter_by(is_tweeted=False).order_by(WhiteHouse.document_date.asc())
print("New documents detected: %d" % (documents.count()))
# Set up Twitter bot.
twitter = Twython(
os.environ.get('TWITTER_CONSUMER_KEY'),
os.environ.get('TWITTER_CONSUMER_SECRET'),
os.environ.get('TWITTER_ACCESS_TOKEN'),
os.environ.get('TWITTER_ACCESS_TOKEN_SECRET')
)
# Go through all relevant documents and tweet them out.
for document in documents:
try:
tweet = document.title[0 : 113] + ("..." if len(document.title) > 113 else "") + " " + document.short_url
if os.environ.get('TWEET_ENV') == "TRUE":
try:
twitter.update_status( status=(tweet) )
document.is_tweeted = True
except Exception as e:
"""
db.session.add(Error(str(e)))
db.session.commit()
"""
continue
document.tweet = tweet
print("Tweeted: " + document.tweet)
db.session.add(document)
db.session.commit()
except Exception as e:
"""
try:
db.session.add(Error(str(e)))
db.session.commit()
except:
db.session.rollback()
"""
pass
# Time Delay
if os.environ.get('TWEET_ENV') == "TRUE":
time.sleep(10)
|
[
"[email protected]"
] | |
a2300d6a94ca2cefd91d8d13d10b57d752bcefa4
|
1ade02a8e0c6d7e442c9d9041f15518d22da3923
|
/w2/d5/sqlite_db/schema.py
|
99c0917574b62199a3263ba8d784e3cfc122ffc9
|
[] |
no_license
|
fodisi/ByteAcademy-Bootcamp
|
7980b80636a36db6da3e0fc0e529fbc6b8e097e0
|
d53e3f4864f6cba1b85e806c29b01c48e3c2e81d
|
refs/heads/master
| 2020-03-19T12:55:31.489638 | 2018-07-25T16:19:19 | 2018-07-25T16:19:19 | 136,550,128 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 458 |
py
|
#!/usr/bin/env python3
import sqlite3
# create a connection to the database
connection = sqlite3.connect("securities_master.db", check_same_thread=False)
# create a cursor object to represent the "gaze" of the database management system
cursor = connection.cursor()
cursor.execute(
"""CREATE TABLE rippleUSD(
pk INTEGER PRIMARY KEY AUTOINCREMENT,
unix_time FLOAT,
last_price FLOAT,
trade_volume FLOAT
);"""
)
cursor.close()
connection.close()
|
[
"[email protected]"
] | |
606c6daa39403e1f7813670974620cd5c5c62c6f
|
9c8b45b2b2be2e4c7063675965fa25538114e660
|
/namseoul/urls.py
|
85b13d914746388b20e042876187df50d8b64b07
|
[] |
no_license
|
gloweean/namseoul
|
1a8f8b85b7ff4213c078b8e3cca409dfadfac5f4
|
9acc0c3c0e12f61d5ad399c32364bff2d11cbcfb
|
refs/heads/master
| 2020-04-07T01:49:23.669077 | 2018-12-22T05:44:37 | 2018-12-22T05:44:37 | 157,953,505 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,509 |
py
|
"""namseoul URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from message import views
from rest_framework import routers
from rest_framework.authtoken import views as AuthView
from member.views import UserSignUpView, UserRetrieveUpdateDestroyView, UserLogoutView
# ViewSet을 사용할 경우 router를 지정해주어야 한다.
router = routers.DefaultRouter()
router.register(r'message', views.MessageViewSet)
urlpatterns = [
path('admin/', admin.site.urls),
path('', include(router.urls)),
path('signup', UserSignUpView.as_view()),
path('user_info', UserRetrieveUpdateDestroyView.as_view()),
path('login', AuthView.obtain_auth_token), # 이후 요청부터는 Authorization: Token 9944b09199c62bcf9418ad846dd0e4bbdfc6ee4b 형식으로 request header에 넣어서 요청을 보내야 한다.
path('logout', UserLogoutView.as_view()),
]
|
[
"[email protected]"
] | |
28a57877699840d447b57131eafedbf97d4ffd13
|
9e15ada895e90d033bc3b65c2666065bddd62605
|
/08/8.1.repr_test.py
|
06248a91f25680a03388cc1f6d0487d858914dcf
|
[] |
no_license
|
zhyErick/fengkuang_python
|
b0f0c78273420fd862691799bfd7e4f1b6eadf80
|
6d50ad3b7d4ae05d06379c2dc87d91081964ec6d
|
refs/heads/master
| 2021-02-14T08:23:26.616211 | 2020-05-06T13:08:07 | 2020-05-06T13:08:07 | 244,788,500 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 340 |
py
|
class Apple:
# 实现构造器
def __init__(self, color, weight):
self.color = color
self.weight = weight
# 重写__repr__()方法,用于实现Apple对象的自我描述
def __repr__(self):
return "Apple[color=" + self.color + ", weight=" + str(self.weight) + "]"
a = Apple("红色", 5.68)
print(a)
|
[
"[email protected]"
] | |
387f78efedf54707074b3d54e433ca863301716b
|
9c9701f79c8eeaa05f684442d2d03f7de4bba1f1
|
/Korpora/korpus_namuwiki.py
|
2e75dd8d2a48c3a78b50a187af42870c394678d6
|
[
"CC-BY-4.0"
] |
permissive
|
hank110/Korpora
|
e54708fe2d7910df4e6ec5cff1cf1ca0696636bf
|
b0e014f5c8c4ba71aba335285d0be48cbb802a0d
|
refs/heads/master
| 2023-01-10T04:24:14.386097 | 2020-09-21T03:42:25 | 2020-09-21T03:42:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,007 |
py
|
import os
from .korpora import Korpus, SentencePairKorpusData
from .utils import fetch, default_korpora_path, load_wikitext
NAMUWIKI_FETCH_INFORMATION = [
{
'url': 'https://github.com/lovit/namuwikitext/releases/download/v0.1/namuwikitext_20200302.v0.1.train.zip',
'destination': 'namuwikitext/namuwikitext_20200302.train.zip',
'method': 'download & unzip'
},
{
'url': 'https://github.com/lovit/namuwikitext/releases/download/v0.1/namuwikitext_20200302.v0.1.test.zip',
'destination': 'namuwikitext/namuwikitext_20200302.test.zip',
'method': 'download & unzip'
},
{
'url': 'https://github.com/lovit/namuwikitext/releases/download/v0.1/namuwikitext_20200302.v0.1.dev.zip',
'destination': 'namuwikitext/namuwikitext_20200302.dev.zip',
'method': 'download & unzip'
}
]
description = """ Author : Hyunjoong Kim lovit@github
Repository : https://github.com/lovit/namuwikitext
References :
나무위키의 덤프 데이터를 바탕을 제작한 wikitext 형식의 텍스트 파일입니다.
학습 및 평가를 위하여 위키페이지 별로 train (99%), dev (0.5%), test (0.5%) 로 나뉘어져있습니다.
"""
license = " CC BY-NC-SA 2.0 KR which Namuwiki dump dataset is licensed"
class NamuwikiTextKorpusData(SentencePairKorpusData):
"""
Args:
description (str) : data description
texts (list of str) : namuwiki contents including '\n'
pairs (list of str) : title
"""
def __init__(self, description, texts, pairs):
super().__init__(description, texts, pairs)
class NamuwikiTextKorpus(Korpus):
def __init__(self, root_dir=None, force_download=False):
super().__init__(description, license)
if root_dir is None:
root_dir = default_korpora_path
fetch_namuwikitext(root_dir, force_download)
for information in NAMUWIKI_FETCH_INFORMATION:
destination = information['destination']
local_path = os.path.join(os.path.abspath(root_dir), destination[:-4])
if 'train' in destination:
response = input(
'NamuwikiText.train text file is large (5.3G).'
'If you want to load text in your memory, please insert `yes`').lower()
if (len(response) == 1 and response == 'y') or (response == 'yes'):
texts, titles = self.load(local_path)
self.train = NamuwikiTextKorpusData(description, texts, titles)
else:
dirname = os.path.abspath(f'{root_dir}/namuwikitext')
self.train = f'Namuwikitext corpus is downloaded. Open local directory {dirname}'
print('Continue to load `dev` and `test`')
continue
texts, titles = self.load(local_path)
if 'dev' in destination:
self.dev = NamuwikiTextKorpusData(description, texts, titles)
elif 'test' in destination:
self.test = NamuwikiTextKorpusData(description, texts, titles)
else:
raise ValueError(f'Check local files')
def load(self, path):
def split_title_text(wikitext):
lines = wikitext.split('\n')
title = lines[0]
text = '\n'.join([line.strip() for line in lines[2:] if line.strip()])
return title, text
wikitexts = load_wikitext(path)
wikitexts = [split_title_text(wikitext) for wikitext in wikitexts]
titles, texts = zip(*wikitexts)
# swap position
return texts, titles
def fetch_namuwikitext(root_dir, force_download):
for information in NAMUWIKI_FETCH_INFORMATION:
url = information['url']
destination = information['destination']
local_path = os.path.join(os.path.abspath(root_dir), destination)
fetch(url, local_path, 'namuwikitext', force_download, information['method'])
|
[
"[email protected]"
] | |
d60e972614e566bef7cbc20eb726db3227df9346
|
d41d18d3ea6edd2ec478b500386375a8693f1392
|
/plotly/validators/pointcloud/_y.py
|
dfc46fe42092562d7111c7b05c2ec21d2a386694
|
[
"MIT"
] |
permissive
|
miladrux/plotly.py
|
38921dd6618650d03be9891d6078e771ffccc99a
|
dbb79e43e2cc6c5762251537d24bad1dab930fff
|
refs/heads/master
| 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 |
MIT
| 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null |
UTF-8
|
Python
| false | false | 400 |
py
|
import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name='y', parent_name='pointcloud', **kwargs):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc+clearAxisTypes',
role='data',
**kwargs
)
|
[
"[email protected]"
] | |
7a218a01ecbfc594cc00ff334d30ebe2489e5c13
|
c324a6d923bae3a00bd1dc69a43d0e5c707a104a
|
/addons-vauxoo/hr_expense_replenishment/__openerp__.py
|
f75805678aedf47bc322db43b9213897c5e35bdc
|
[] |
no_license
|
meswapnilwagh/odoo-adr
|
5c593c2240d23b79811ccd7b5297b634e5ffe19d
|
442c8d5fa52cab30028a26dd93bd8eae88d58fed
|
refs/heads/master
| 2020-01-27T10:03:27.142715 | 2015-09-04T14:36:59 | 2015-09-04T14:36:59 | 50,238,226 | 0 | 4 | null | 2016-01-23T12:53:28 | 2016-01-23T12:53:25 | null |
UTF-8
|
Python
| false | false | 2,537 |
py
|
# -*- encoding: utf-8 -*-
###############################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) OpenERP Venezuela (<http://openerp.com.ve>).
# All Rights Reserved
############# Credits #########################################################
# Coded by: Katherine Zaoral <[email protected]>
# Planified by: Humberto Arocha <[email protected]>
# Audited by: Humberto Arocha <[email protected]>
###############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
{
"name": "Expenses Replenishment",
"version": "0.1",
"author": "Vauxoo",
"category": "HR Module",
"description": """
Expenses Replenishment
======================
This module add the functionality to the HR Expense module to manage deductible
expenses by using invoices asociated to an expense document. Also make an
automation of the reconciliation process for the expense and the employee
payment.
Dependencies information
------------------------
- You can download the *account_invoice_line_currency* module from::
bzr branch lp:addons-vauxoo/7.0
""",
"website": "http://openerp.com.ve",
"license": "",
"depends": [
"hr_expense",
"account_invoice_line_currency",
"hr_expense_analytic",
"account_move_report"
],
"demo": [],
"data": [
"security/hr_security.xml",
"wizard/hr_expense_wizard_view.xml",
"view/account_invoice_view.xml",
"view/hr_expense_view.xml",
"workflow/workflow.xml"
],
"test": [],
"js": [],
"css": [],
"qweb": [],
"installable": True,
"auto_install": False,
"active": False
}
|
[
"[email protected]"
] | |
b51ac12b70717c54b15760648a95a50bb8013523
|
b36c065d9fe10a6a9bf42415f3a716565ba26756
|
/old_code/basicdatas/dicts.py
|
e4c860a52e2b343ae12c7d32c9bedfb1cc78cc21
|
[] |
no_license
|
fanghongbin/nmc_met_class
|
a447255ce43b2b8f33ee2db584e55483ce68d82c
|
b59e5ab68c47d83c70c0d7081ca23dce72bf8c75
|
refs/heads/master
| 2022-02-13T05:25:40.201333 | 2019-05-09T06:54:58 | 2019-05-09T06:54:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,942 |
py
|
#!/usr/bin/python3.6
# -*- coding:UTF-8 -*-
import mymethods.str_finder as finder
gds_station_data_element_name_id = {
"经度":1,
"纬度":2,
"测站高度":3,
"测站级别(short)":4,
"测站类型(short)":5,
"气压传感器海拔高度":6,
"温湿传感器离地面高度":7,
"温湿传感器距水面高度":8,
"风速传感器距地面高度":9,
"风传感器距甲板平台高度":10,
"风速传感器距水面高度":11,
"移动平台移动方向":12,
"移动平台移动速度":13,
"海盐传感器距海面深度":14,
"浪高传感器距海面高度":15,
"浮标方位":16,
"总水深":17,
"海面/水面以下深度":18,
"船面距海面高度":19,
"方位或方位角":20,
"字符型站名":21,
"风向":201,
"风速":203,
"1分钟平均风向":205,
"1分钟平均风速":207,
"2分钟平均风向":209,
"2分钟平均风速":211,
"10分钟平均风向":213,
"10分钟平均风速":215,
"最大风速的风向":217,
"最大风速":219,
"瞬时风向":221,
"瞬时风速":223,
"极大风速的风向":225,
"极大风速":227,
"过去6小时极大瞬时风速的风向":229,
"过去6小时极大瞬时风速":231,
"过去12小时极大瞬时风速的风向":233,
"过去12小时极大瞬时风速":235,
"风力(short)":237,
"海平面气压":401,
"3小时变压":403,
"24小时变压":405,
"本站气压":407,
"最高气压":409,
"最低气压":411,
"气压":413,
"日平均气压":415,
"日平均海平面气压":417,
"高度(探空)":419,
"位势高度(探空)":421,
"温度":601,
"最高气温":603,
"最低气温":605,
"24小时变温":607,
"过去24小时最高气温":609,
"过去24小时最低气温":611,
"日平均气温":613,
"露点温度":801,
"温度露点差":803,
"相对湿度":805,
"最小相对湿度":807,
"日平均相对湿度":809,
"水汽压":811,
"日平均水汽压":813,
"降水量":1001,
"1小时降水":1003,
"3小时降水":1005,
"6小时降水":1007,
"12小时降水":1009,
"24小时降水":1011,
"日总降水":1013,
"20-08时降水量":1015,
"08-20时降水量":1017,
"20-20时降水量":1019,
"08-08时降水量":1021,
"蒸发":1023,
"蒸发(大型)":1025,
"可降水分(预报降水量)":1027,
"1分钟平均水平能见度":1201,
"10分钟平均水平能见度":1203,
"最小水平能见度":1205,
"水平能见度(人工)":1207,
"总云量":1401,
"低云量":1403,
"云底高度":1405,
"低云状(short)":1407,
"中云状(short)":1409,
"高云状(short)":1411,
"日平均总云量":1413,
"日平均低云量":1415,
"云量(低云或中云)":1417,
"云类型(short)":1419,
"现在天气(short)":1601,
"过去天气1(short)":1603,
"过去天气2(short)":1605,
"龙卷类型(short)":1801,
"龙卷所在方位(short)":1803,
"最大冰雹直径":1805,
"雷暴(short)":1807,
"电流强度(闪电定位)":1809,
"地面温度":2001,
"最高地面温度":2003,
"最低地面温度":2005,
"过去12小时最低地面温度":2007,
"5cm地温":2009,
"10cm地温":2011,
"15cm地温":2013,
"20cm地温":2015,
"40cm地温":2017,
"80cm地温":2019,
"160cm地温":2021,
"320cm地温":2023,
"草面(雪面)温度":2025,
"草面(雪面)最高温度":2027,
"草面(雪面)最低温度":2029,
"日平均地面温度":2031,
"日平均5cm地温":2033,
"日平均10cm地温":2035,
"日平均15cm地温":2037,
"日平均20cm地温":2039,
"日平均40cm地温":2041,
"日平均80cm地温":2043,
"日平均160cm地温":2045,
"日平均320cm地温":2047,
"日平均草面(雪面)温度":2049,
"地面状态(short)":2201,
"积雪深度":2203,
"雪压":2205,
"电线积冰直径":2207,
"电线积冰-现象(short)":2209,
"电线积冰-南北方向直径":2211,
"电线积冰-南北方向厚度":2213,
"电线积冰-南北方向重量":2215,
"电线积冰-东西方向直径":2217,
"电线积冰-东西方向厚度":2219,
"电线积冰-东西方向重量":2221,
"船上结冰原因(short)":2223,
"船上结冰厚度":2225,
"船上结冰速度(short)":2227,
"海冰密集度(short)":2229,
"冰情发展(short)":2231,
"冰总量和类型(short)":2233,
"冰缘方位":2235,
"冰情(short)":2237,
"最高气压出现时间":10001,
"最低气压出现时间":10003,
"最高气温出现时间":10005,
"最低气温出现时间":10007,
"最小相对湿度出现时间":10009,
"最大风速出现时间":10011,
"极大风速出现时间":10013,
"最高地面温度出现时间":10015,
"最低地面温度出现时间":10017,
"草面(雪面)最低温度出现时间":10019,
"草面(雪面)最高温度出现时间":10021,
"最小水平能见度出现时间":10023,
"天气出现时间":10025,
"海表最高温度出现时间":10027,
"海表最低温度出现时间":10029,
"最大波高出现时间":10031,
"风速表类型":2401,
"湿球温度测量方法":2403,
"海面温度测量方法":2405,
"洋流测量方法":2407,
"气压倾向特征":2409,
"海面温度":2601,
"湿球温度":2603,
"海面盐度":2605,
"海表最高温度":2607,
"海表最低温度":2609,
"海水温度":2611,
"海水盐度":2613,
"海面海流方向":2801,
"海面海流速度":2803,
"洋流方向和速度的平均周期(short)":2805,
"表层海洋面流速":2807,
"表层海洋面波向":2809,
"海流方向":2811,
"海流速度":2813,
"波浪方向":3001,
"波浪周期":3003,
"波浪高度":3005,
"风浪方向":3007,
"风浪周期":3009,
"风浪高度":3011,
"第一涌浪方向":3013,
"第一涌浪周期":3015,
"第一涌浪高度":3017,
"第二涌浪方向":3019,
"第二涌浪周期":3021,
"第二涌浪高度":3023,
"有效波高":3025,
"有效波高的周期":3027,
"平均波高":3029,
"平均波周期":3031,
"最大波高":3033,
"最大波高的周期":3035,
"人工测量浪高":3037,
"仪器测量浪高":3039,
"浪级代码(short)":3041
}
gds_station_data_element_id_name = dict(zip(gds_station_data_element_name_id.values(),gds_station_data_element_name_id.keys()))
def gds_station_data_element_id_finder(input_strs):
ele_names = finder.muti_strs_finder(input_strs,gds_station_data_element_name_id)
names_ids = {}
for names in ele_names:
names_ids[names] = gds_station_data_element_name_id[names]
print(names + " : " + str(names_ids[names]))
return names_ids
class m1_value_column:
站号 = 0
经度 = 1
纬度 = 2
拔海高度 = 3
站点级别 = 4
总云量 =5
风向 = 6
风速 = 7
气压 = 8
小时变压 = 9
过去天气1 = 10
过去天气2 =11
降水6小时 =12
低云状 =13
低云量 =14
低云高 =15
露点 =16
能见度 =17
现在天气 =18
温度 =19
中云状 =20
高云状 =21
标志1 =22
标志2 =23
日变温 = 24
日变压 =25
class m2_value_column:
站号 = 0
经度 = 1
纬度 = 2
拔海高度 = 3
位势高度 = 4
温度 = 5
温度露点差 = 6
风向 = 7
风速 = 8
class m8_value_column:
站号 = 0
经度 = 1
纬度 = 2
拔海高度 = 3
天气现象1 = 4
风向1 = 5
风速1 = 6
最低温度 = 7
最高温度 = 8
天气现象2 = 9
风向2 = 10
风速2 = 11
|
[
"[email protected]"
] | |
1e14a12fb0353af32a9218ab79645ee9b390dfb1
|
51554f9c49231e4a0c7a0356456050e927ce2884
|
/accounts/views.py
|
901b9709e55d1d06d857e863436a139628cc653d
|
[
"Apache-2.0"
] |
permissive
|
geoffreynyaga/ANGA-UTM
|
10a2958e172faad66e414b561ec035a2162571e7
|
68d3033529490d3fb57ac727c8c2a2f77fcffae6
|
refs/heads/master
| 2022-12-09T18:30:25.622423 | 2022-01-10T18:07:29 | 2022-01-10T18:07:29 | 232,053,896 | 8 | 3 |
Apache-2.0
| 2022-11-22T03:59:59 | 2020-01-06T08:10:06 |
JavaScript
|
UTF-8
|
Python
| false | false | 4,576 |
py
|
from django.shortcuts import render
from django.contrib.auth import login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.models import User
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import PermissionDenied
from django.urls import reverse_lazy
from django.forms.models import inlineformset_factory
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from flight_plans.models import FlightLog
from rpas.models import Rpas
from . import (
forms,
) # TODO: where is this needed? see line below and resolve to use just one in this doc
from .forms import UserForm
from .models import UserProfile
# Create your views here.
class LoginView(generic.FormView):
form_class = AuthenticationForm
success_url = reverse_lazy("view_airspace")
template_name = "accounts/login.html"
def get_form(self, form_class=None):
if form_class is None:
form_class = self.get_form_class()
return form_class(self.request, **self.get_form_kwargs())
def form_valid(self, form):
login(self.request, form.get_user())
return super().form_valid(form)
def logout_view(request):
logout(request)
return HttpResponseRedirect("/account/login")
class SignUp(generic.CreateView):
form_class = forms.UserCreateForm
success_url = reverse_lazy("login")
template_name = "accounts/signup.html"
@login_required() # only logged in users should access this
def edit_user(request, pk):
# querying the User object with pk from url
user = User.objects.get(pk=pk)
# prepopulate UserProfileForm with retrieved user values from above.
user_form = UserForm(instance=user)
# The sorcery begins from here, see explanation below
ProfileInlineFormset = inlineformset_factory(
User,
UserProfile,
fields=(
"phone_number",
"organization",
"bio",
"profile_pic",
"location",
"birth_date",
),
)
formset = ProfileInlineFormset(instance=user)
if request.user.is_authenticated and request.user.id == user.id:
if request.method == "POST":
user_form = UserForm(request.POST, request.FILES, instance=user)
formset = ProfileInlineFormset(request.POST, request.FILES, instance=user)
if user_form.is_valid():
created_user = user_form.save(commit=False)
formset = ProfileInlineFormset(
request.POST, request.FILES, instance=created_user
)
if formset.is_valid():
created_user.save()
formset.save()
# return HttpResponseRedirect('/account/profile/')
return HttpResponseRedirect(
reverse("accounts:view_profile", args=(user.id,))
)
return render(
request,
"accounts/edit_profile.html",
{"noodle": pk, "noodle_form": user_form, "formset": formset},
)
else:
raise PermissionDenied
# class view_profile(generic.TemplateView):
# template_name = "accounts/profile.html"
# # model = UserProfile
#
# def get(self, request):
# myrpas = Rpas.objects.filter(organization = request.user.userprofile.organization)
# myflightlogs = FlightLog.objects.filter(user = request.user)
# args = {'myrpas': myrpas, 'myflightlogs':myflightlogs}
# return render(request, self.template_name ,args)
class ViewProfile(LoginRequiredMixin, generic.DetailView):
template_name = "accounts/profile.html"
model = UserProfile
def get_context_data(self, *args, **kwargs):
context = super(ViewProfile, self).get_context_data(**kwargs)
pk = self.kwargs["pk"]
thisuser = User.objects.get(pk=pk)
org = thisuser.userprofile.organization
context["myrpas"] = Rpas.objects.filter(organization=org)
context["myflightlogs"] = FlightLog.objects.filter(user=thisuser)
return context
def error_404(request, exception):
data = {}
return render(request, "errors/404.html", data)
def error_500(request):
data = {}
return render(request, "errors/500.html", data)
|
[
"[email protected]"
] | |
23dc496b373f870ec52009d414579d71d99fa082
|
8807958eab34f289cc8b1b07e180af757bde7124
|
/design2/test_LineClassifier.py
|
75cb3a10fcec6095c64c62655aa304d8f43531da
|
[
"BSD-2-Clause"
] |
permissive
|
davidjamesbeck/IJAL-interlinear
|
4f34cbb8626403f7bc52db96f0349d10ca2ce674
|
cb5dbb1d6aea98cce76668aa868a9189f31baf3f
|
refs/heads/master
| 2020-03-30T11:00:46.001171 | 2018-10-01T13:50:02 | 2018-10-01T13:50:02 | 151,148,840 | 0 | 0 |
BSD-2-Clause
| 2018-10-01T19:45:38 | 2018-10-01T19:45:37 | null |
UTF-8
|
Python
| false | false | 3,028 |
py
|
import re
import sys
import unittest
from Line import *
from LineClassifier import *
import importlib
pd.set_option('display.width', 1000)
import pdb
def runTests():
test_recognizeDegenerateLine()
test_recognizeCanonicalLine()
test_recognizeWordsAsElementsLine()
test_MonkeyAndThunder_allLinesRecognized()
test_LOKONO_allLinesRecognized()
def test_recognizeDegenerateLine():
"""
MonkeyAndThunder starts off with a few introductory lines in Spanish, with English translation.
No words, no glosses, just a line with time slots, and one child
"""
print("--- test_recognizeDegenerateLine")
filename = "../testData/monkeyAndThunder/AYA1_MonkeyandThunder.eaf"
xmlDoc = etree.parse(filename)
x0 = Line(xmlDoc, 0)
assert(x0.getTierCount() == 2)
classifier = LineClassifier(x0.getTable())
assert(classifier.run() == "DegenerateLine")
def test_recognizeCanonicalLine():
"""
MonkeyAndThunder line 6 fits the canonical form:
1) a time line
"""
print("--- test_recognizeCanonicalLine")
filename = "../testData/monkeyAndThunder/AYA1_MonkeyandThunder.eaf"
xmlDoc = etree.parse(filename)
x = Line(xmlDoc, 6)
assert(x.getTierCount() == 4)
classifier = LineClassifier(x.getTable())
assert(classifier.run() == "CanonicalLine")
def test_recognizeWordsAsElementsLine():
"""
LOKONO has the canonical spokenText tier, its translation, but each word in the
spokenText is its own element, each with two children: morpheme and gloss
"""
print("--- test_recognizeWordsAsElementsLine")
filename = "../testData/LOKONO_IJAL_2.eaf"
xmlDoc = etree.parse(filename)
x = Line(xmlDoc, 1)
# print(x.getTable())
assert(x.getTierCount() == 20)
classifier = LineClassifier(x.getTable())
assert(classifier.run() == "WordsAsElementsLine")
def test_MonkeyAndThunder_allLinesRecognized():
print("--- test_MonkeyAndThunder_allLinesRecognized")
filename = "../testData/monkeyAndThunder/AYA1_MonkeyandThunder.eaf"
xmlDoc = etree.parse(filename)
lineCount = len(xmlDoc.findall("TIER/ANNOTATION/ALIGNABLE_ANNOTATION"))
assert(lineCount == 41)
for i in range(lineCount):
x = Line(xmlDoc, i)
classifier = LineClassifier(x.getTable())
classification = classifier.run()
#print("%d: %s" % (i, classification))
assert(classification in ["DegenerateLine", "CanonicalLine"])
def test_LOKONO_allLinesRecognized():
print("--- test_LOKONO_allLinesRecognized")
filename = "../testData/LOKONO_IJAL_2.eaf"
xmlDoc = etree.parse(filename)
lineCount = len(xmlDoc.findall("TIER/ANNOTATION/ALIGNABLE_ANNOTATION"))
assert(lineCount == 344)
for i in range(lineCount):
x = Line(xmlDoc, i)
classifier = LineClassifier(x.getTable())
classification = classifier.run()
#print("%d: %s" % (i, classification))
assert(classification in ["WordsAsElementsLine"])
#x = Line(xmlDoc, 28)
#x.getTable()
|
[
"[email protected]"
] | |
2674d077dcd3e48cf5445537f600e6171777c48d
|
3f7c4de996894d83f0e999ab9e60302be5ab195f
|
/tests/test_fleet_telematics_api.py
|
08355ed115f5e5d2e7b808128cec81a2981e98ee
|
[
"MIT"
] |
permissive
|
tungson-pm/HerePy
|
3f18ffddd181434c63f94abe67844c0fcb02747d
|
a9e2797f251ff157cf89cfae7c1605833bfee75f
|
refs/heads/master
| 2022-12-25T06:08:21.880054 | 2020-10-05T19:54:51 | 2020-10-05T19:54:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,376 |
py
|
#!/usr/bin/env python
import os
import time
import unittest
import json
import responses
import herepy
class FleetTelematicsApiTest(unittest.TestCase):
def setUp(self):
api = herepy.FleetTelematicsApi('api_key')
self._api = api
def test_initiation(self):
self.assertIsInstance(self._api, herepy.FleetTelematicsApi)
self.assertEqual(self._api._api_key, 'api_key')
self.assertEqual(self._api._base_url, 'https://wse.ls.hereapi.com/2/')
@responses.activate
def test_find_sequence_whensucceed(self):
with open('testdata/models/fleet_telematics_find_sequence.json', 'r') as f:
expected_response = f.read()
responses.add(responses.GET, 'https://wse.ls.hereapi.com/2/findsequence.json',
expected_response, status=200)
start = str.format('{0};{1},{2}', 'WiesbadenCentralStation', 50.0715, 8.2434)
intermediate_destinations = [str.format('{0};{1},{2}', 'FranfurtCentralStation', 50.1073, 8.6647),
str.format('{0};{1},{2}', 'DarmstadtCentralStation', 49.8728, 8.6326),
str.format('{0};{1},{2}', 'FrankfurtAirport', 50.0505, 8.5698)]
end = str.format('{0};{1},{2}', 'MainzCentralStation', 50.0021, 8.259)
modes = [herepy.RouteMode.fastest, herepy.RouteMode.car, herepy.RouteMode.traffic_enabled]
response = self._api.find_sequence(start=start,
departure='2014-12-09T09:30:00%2b01:00',
intermediate_destinations=intermediate_destinations,
end=end,
modes=modes)
self.assertTrue(response)
self.assertIsInstance(response, herepy.WaypointSequenceResponse)
@responses.activate
def test_find_sequence_whenerroroccured(self):
with open('testdata/models/fleet_telematics_unauthorized_error.json', 'r') as f:
expected_response = f.read()
responses.add(responses.GET, 'https://wse.ls.hereapi.com/2/findsequence.json',
expected_response, status=200)
start = str.format('{0};{1},{2}', 'WiesbadenCentralStation', 50.0715, 8.2434)
intermediate_destinations = [str.format('{0};{1},{2}', 'FranfurtCentralStation', 50.1073, 8.6647),
str.format('{0};{1},{2}', 'DarmstadtCentralStation', 49.8728, 8.6326),
str.format('{0};{1},{2}', 'FrankfurtAirport', 50.0505, 8.5698)]
end = str.format('{0};{1},{2}', 'MainzCentralStation', 50.0021, 8.259)
modes = [herepy.RouteMode.fastest, herepy.RouteMode.car, herepy.RouteMode.traffic_enabled]
with self.assertRaises(herepy.HEREError):
self._api.find_sequence(start=start,
departure='2014-12-09T09:30:00%2b01:00',
intermediate_destinations=intermediate_destinations,
end=end,
modes=modes)
@responses.activate
def test_find_pickups_whensucceed(self):
with open('testdata/models/fleet_telematics_find_pickups.json', 'r') as f:
expected_response = f.read()
responses.add(responses.GET, 'https://wse.ls.hereapi.com/2/findpickups.json',
expected_response, status=200)
modes = [herepy.RouteMode.fastest, herepy.RouteMode.car, herepy.RouteMode.traffic_enabled]
start = str.format('{0},{1};{2}:{3},value:{4}', 50.115620,
8.631210, herepy.MultiplePickupOfferType.pickup.__str__(),
'GRAPEFRUITS', 1000)
departure = '2016-10-14T07:30:00+02:00'
capacity = 10000
vehicle_cost = 0.29
driver_cost = 20
max_detour = 60
rest_times = 'disabled'
intermediate_destinations = [str.format('{0},{1};{2}:{3},value:{4}', 50.118578,
8.636551, herepy.MultiplePickupOfferType.drop.__str__(),
'APPLES', 30),
str.format('{0},{1};{2}:{3}', 50.122540, 8.631070,
herepy.MultiplePickupOfferType.pickup.__str__(), 'BANANAS')]
end = str.format('{1},{2}', 'MainzCentralStation', 50.132540, 8.649280)
response = self._api.find_pickups(modes=modes,
start=start,
departure=departure,
capacity=capacity,
vehicle_cost=vehicle_cost,
driver_cost=driver_cost,
max_detour=max_detour,
rest_times=rest_times,
intermediate_destinations=intermediate_destinations,
end=end)
self.assertTrue(response)
self.assertIsInstance(response, herepy.WaypointSequenceResponse)
@responses.activate
def test_find_pickups_whenerroroccured(self):
with open('testdata/models/fleet_telematics_unauthorized_error.json', 'r') as f:
expected_response = f.read()
responses.add(responses.GET, 'https://wse.ls.hereapi.com/2/findpickups.json',
expected_response, status=200)
modes = [herepy.RouteMode.fastest, herepy.RouteMode.car, herepy.RouteMode.traffic_enabled]
start = str.format('{0},{1};{2}:{3},value:{4}', 50.115620,
8.631210, herepy.MultiplePickupOfferType.pickup.__str__(),
'GRAPEFRUITS', 1000)
departure = '2016-10-14T07:30:00+02:00'
capacity = 10000
vehicle_cost = 0.29
driver_cost = 20
max_detour = 60
rest_times = 'disabled'
intermediate_destinations = [str.format('{0},{1};{2}:{3},value:{4}', 50.118578,
8.636551, herepy.MultiplePickupOfferType.drop.__str__(),
'APPLES', 30),
str.format('{0},{1};{2}:{3}', 50.122540, 8.631070,
herepy.MultiplePickupOfferType.pickup.__str__(), 'BANANAS')]
end = str.format('{1},{2}', 'MainzCentralStation', 50.132540, 8.649280)
with self.assertRaises(herepy.HEREError):
self._api.find_pickups(modes=modes,
start=start,
departure=departure,
capacity=capacity,
vehicle_cost=vehicle_cost,
driver_cost=driver_cost,
max_detour=max_detour,
rest_times=rest_times,
intermediate_destinations=intermediate_destinations,
end=end)
|
[
"[email protected]"
] | |
a85cc8cf2b49e89ca79b5d93c0af0d7e1dcec4ee
|
c55083d8a23a9d093b677066a5a827634c09357b
|
/chstrings/__init__.py
|
39796432eff779705b6f260f03ae6661e1d07d2b
|
[
"MIT"
] |
permissive
|
earwig/citationhunt
|
211a44c7bdb67e675872ca44aeae982d33fcf359
|
b6084d2958989c9082db7a8d4556a4e51b78bdb3
|
refs/heads/master
| 2021-01-15T16:11:11.563650 | 2016-07-21T11:08:43 | 2016-07-21T11:08:43 | 62,332,946 | 1 | 0 | null | 2016-06-30T18:16:46 | 2016-06-30T18:16:46 | null |
UTF-8
|
Python
| false | false | 1,902 |
py
|
import flask
import os
import json
def _preprocess_variables(config, strings):
in_page_link = flask.Markup(
'<a target="_blank" href=%s>%s</a>')
strings['in_page'] = \
flask.Markup(strings['in_page']) % in_page_link
if config.lead_section_policy_link:
lead_section_policy_link = flask.Markup(
'<a target="_blank" href=%s>%s</a>') % (
config.lead_section_policy_link,
config.lead_section_policy_link_title)
strings['lead_section_hint'] = \
flask.Markup(strings['lead_section_hint']) % \
lead_section_policy_link
else:
strings['lead_section_hint'] = ''
beginners_hint_link = flask.Markup(
'<a target="_blank" href=%s>%s</a>') % (
config.beginners_link,
config.beginners_link_title)
strings['beginners_hint'] = \
flask.Markup(strings['beginners_hint']) % beginners_hint_link
if '404' not in config.flagged_off:
page_not_found_link = flask.Markup('<a href=%s>Citation Hunt</a>') % (
config.lang_code)
strings['page_not_found_text'] = \
flask.Markup(strings['page_not_found_text']) % page_not_found_link
strings.setdefault('instructions_goal', '')
strings.setdefault('instructions_details', '')
if strings['instructions_details']:
strings['instructions_details'] = flask.Markup(
strings['instructions_details']) % (
flask.Markup('<b>' + strings['button_wikilink'] + '</b>'),
flask.Markup('<b>' + strings['button_next'] + '</b>'),
beginners_hint_link)
return strings
def get_localized_strings(config, lang_code):
strings_dir = os.path.dirname(__file__)
strings = json.load(file(os.path.join(strings_dir, lang_code + '.json')))
return _preprocess_variables(config, strings)
|
[
"[email protected]"
] | |
7f2bc13f3b49ac4bb99cd8a03c9d886de3c9552c
|
a59d55ecf9054d0750168d3ca9cc62a0f2b28b95
|
/.install/.backup/platform/gsutil/gslib/help_provider.py
|
adf4c90d50cad5e50dfe990e242fb236c5bc9fdd
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
bopopescu/google-cloud-sdk
|
bb2746ff020c87271398196f21a646d9d8689348
|
b34e6a18f1e89673508166acce816111c3421e4b
|
refs/heads/master
| 2022-11-26T07:33:32.877033 | 2014-06-29T20:43:23 | 2014-06-29T20:43:23 | 282,306,367 | 0 | 0 |
NOASSERTION
| 2020-07-24T20:04:47 | 2020-07-24T20:04:46 | null |
UTF-8
|
Python
| false | false | 3,604 |
py
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gslib.exception import CommandException
class HelpType(object):
COMMAND_HELP = 'command_help'
ADDITIONAL_HELP = 'additional_help'
ALL_HELP_TYPES = [HelpType.COMMAND_HELP, HelpType.ADDITIONAL_HELP]
# help_spec key constants.
HELP_NAME = 'help_name'
HELP_NAME_ALIASES = 'help_name_aliases'
HELP_TYPE = 'help_type'
HELP_ONE_LINE_SUMMARY = 'help_one_line_summary'
HELP_TEXT = 'help_text'
SUBCOMMAND_HELP_TEXT = 'subcommand_help_text'
# Constants enforced by SanityCheck
MAX_HELP_NAME_LEN = 15
MIN_ONE_LINE_SUMMARY_LEN = 10
MAX_ONE_LINE_SUMMARY_LEN = 80 - MAX_HELP_NAME_LEN
REQUIRED_SPEC_KEYS = [HELP_NAME, HELP_NAME_ALIASES, HELP_TYPE,
HELP_ONE_LINE_SUMMARY, HELP_TEXT]
DESCRIPTION_PREFIX = """
<B>DESCRIPTION</B>"""
SYNOPSIS_PREFIX = """
<B>SYNOPSIS</B>"""
class HelpProvider(object):
"""Interface for providing help."""
# Each subclass must define the following map.
help_spec = {
# Name of command or auxiliary help info for which this help applies.
HELP_NAME : None,
# List of help name aliases.
HELP_NAME_ALIASES : None,
# HelpType.
HELP_TYPE : None,
# One line summary of this help.
HELP_ONE_LINE_SUMMARY : None,
# The full help text.
HELP_TEXT : None,
}
# This is a static helper instead of a class method because the help loader
# (gslib.commands.help._LoadHelpMaps()) operates on classes not instances.
def SanityCheck(help_provider, help_name_map):
"""Helper for checking that a HelpProvider has minimally adequate content."""
for k in REQUIRED_SPEC_KEYS:
if k not in help_provider.help_spec or help_provider.help_spec[k] is None:
raise CommandException('"%s" help implementation is missing %s '
'specification' % (help_provider.help_name, k))
# Sanity check the content.
assert (len(help_provider.help_spec[HELP_NAME]) > 1
and len(help_provider.help_spec[HELP_NAME]) < MAX_HELP_NAME_LEN)
for hna in help_provider.help_spec[HELP_NAME_ALIASES]:
assert len(hna) > 0
one_line_summary_len = len(help_provider.help_spec[HELP_ONE_LINE_SUMMARY])
assert (one_line_summary_len > MIN_ONE_LINE_SUMMARY_LEN
and one_line_summary_len < MAX_ONE_LINE_SUMMARY_LEN)
assert len(help_provider.help_spec[HELP_TEXT]) > 10
# Ensure there are no dupe help names or aliases across commands.
name_check_list = [help_provider.help_spec[HELP_NAME]]
name_check_list.extend(help_provider.help_spec[HELP_NAME_ALIASES])
for name_or_alias in name_check_list:
if help_name_map.has_key(name_or_alias):
raise CommandException(
'Duplicate help name/alias "%s" found while loading help from %s. '
'That name/alias was already taken by %s' % (name_or_alias,
help_provider.__module__, help_name_map[name_or_alias].__module__))
def CreateHelpText(synopsis, description):
"""Helper for adding help text headers given synopsis and description."""
return SYNOPSIS_PREFIX + synopsis + DESCRIPTION_PREFIX + description
|
[
"[email protected]"
] | |
9e86b3518912ee7ce4ce5497fb45ab9c6eb765ab
|
295ecf4f254c42e9201657ef0a13ec2c68c40c9b
|
/info/views.py
|
6a2850c2b723ff267061ff6b95988447a8586342
|
[] |
no_license
|
zwolf21/StockAdmin-pre2
|
0236061284a6fe8801591608591d21129d4ea7c0
|
b21d069ff215c17ce3bca040ecf9b8f48b452ed4
|
refs/heads/master
| 2021-05-01T09:28:59.818469 | 2016-11-30T17:33:30 | 2016-11-30T17:33:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,923 |
py
|
from django.shortcuts import render, render_to_response
from django.core.urlresolvers import reverse_lazy
from django.views.generic.edit import FormView
from django.views.generic import ListView, DetailView, CreateView, TemplateView
from django.conf import settings
from django.db.models import Q
import os, sys
from .models import Info
from .forms import XlFileForm
from .modules.utils import xlDB2DicIter, is_xlfile
from django.utils import simplejson
from django.http import HttpResponse
# Create your views here.
class DrugInfoFromXlFile(FormView):
form_class = XlFileForm
template_name = 'info/get_xlfile_form.html'
def form_valid(self, form):
recreate = form.cleaned_data['recreate']
xlfile = self.request.FILES['xlfile']
if not is_xlfile(xlfile.name):
context = {
'error_message': '파일 형식이 일치하지 않습니다',
'file_name' : xlfile.name
}
return render_to_response('info/update_failure.html', context)
temp_file = os.path.join(settings.MEDIA_ROOT,'temp.xls')
with open(temp_file, 'wb') as fp:
fp.write(xlfile.read())
di_table = xlDB2DicIter(temp_file)
os.remove(temp_file)
src_field_set = set(di_table[0])
essential_field_set = {'약품코드','EDI코드','약품명(한글)','제약회사명','일반단가','수가명','규격단위'}
if not essential_field_set < src_field_set:
context = {
'error_message' : '엑셀파일에 지정된 필수 컬럼(열) 항목이 없습니다',
'essential_fields' : essential_field_set,
'missing_fields' : essential_field_set - src_field_set,
'input_file_fields': src_field_set,
'file_name' : xlfile.name
}
return render_to_response('info/update_failure.html', context)
if recreate:
Info.objects.all().delete()
context = {
'success_count' : 0,
'failure_count' : 0,
'failures' : [],
'why' : ''
}
success_count = 0
for row in di_table:
try:
Info.objects.create(
edi = int(row['EDI코드']),
code = row['약품코드'],
name = row['약품명(한글)'],
name_as = row['수가명'],
firm = row['제약회사명'],
price = row['일반단가'],
pkg_amount = row.get('포장단위') or 1,
standard_unit = row['규격단위'],
narcotic_class = int(row.get('약품법적구분') or 0)
)
except:
exception = {}
type_err, val_err, trcbk = sys.exc_info()
context['failures'].append({
'error_type': type_err.__name__,
'error_value': val_err,
'error_drug_name': row.get('약품명(한글)','약품명 미지정'),
'error_drug_code': row.get('약품코드','약품코드 미지정')
})
context['failure_count']+=1
else:
context['success_count']+=1
context['total_count'] = context['failure_count']+context['success_count']
return render_to_response('info/update_result.html', context)
class IndexTV(TemplateView):
template_name = "info/drug_info.html"
|
[
"[email protected]"
] | |
50f14085ebf1fa050502627f08de7bacfbbf9444
|
74c04ef3ed2bc71e728b3bb840c927a86352c6e1
|
/djangotesting/jango/resturant/forms.py
|
226cb77012fce96d306543ca927164a3764be1ac
|
[] |
no_license
|
zamanehsani/restaurant
|
06b658b277dda8fa8d4f5b598d389767ab61f876
|
0f21ce268fdc21402c32dee1ecc64850a24fcc2a
|
refs/heads/main
| 2023-01-12T04:52:09.541112 | 2020-11-16T05:44:04 | 2020-11-16T05:44:04 | 313,192,805 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 860 |
py
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from resturant.models import Profile
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
first_name = forms.CharField(max_length=150)
last_name = forms.CharField(max_length=150)
class Meta:
model = User
fields =['first_name','last_name','username', 'email', 'password1', 'password2']
class UserUpdateForm(forms.ModelForm):
# email = forms.EmailField()
# first_name = forms.CharField(max_length=150)
# last_name = forms.CharField(max_length=150)
class Meta:
model = User
fields =['first_name','last_name','username', 'email']
class UserProfileUpdateForm(forms.ModelForm):
class Meta:
model = Profile
fields =['image', 'gender']
|
[
"[email protected]"
] | |
d27d3af2bdcba02d17e1eab4c19c711a2074e5b4
|
e45d2faad9389886a82ff5176853b1ff6e37caae
|
/argparse/055_argparse_add_argument_nargs_questionMark.py
|
c28aa7baa511d22b3e9de4adc7c5adf3ead24488
|
[] |
no_license
|
allenmo/python_study
|
6320aa4cd80fe46ccf73076015c67bdcb6338d30
|
7aff5d810ca6e791d62235d57c072a8dc14457ca
|
refs/heads/master
| 2021-03-24T12:00:33.079530 | 2016-11-22T23:35:58 | 2016-11-22T23:35:58 | 55,770,379 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 297 |
py
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--foo', nargs='?', const='c', default='d')
parser.add_argument('bar', nargs='?', default='d')
print parser.parse_args('XX --foo YY'.split())
print parser.parse_args('XX --foo'.split())
print parser.parse_args(''.split())
|
[
"[email protected]"
] | |
1a3e9cae56843f2b9167840ccb12e915ec8f7161
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/SqbyWYwqChQroXfhu_10.py
|
ada02ff817a159ee3621064c542db76a15142950
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 159 |
py
|
def lower_triang(arr):
for i in range(len(arr)):
for j in range(len(arr[0])):
if j > i:
arr[i][j] = 0
return arr
|
[
"[email protected]"
] | |
78ad653bda4fe0bb743b72f0f9938f240fc28738
|
1929ce01411908ebe5f04f9db4ae1c7afef085e1
|
/home/migrations/0002_load_initial_data.py
|
ab7698763aa5dbf140368dc4b936b332fafab746
|
[] |
no_license
|
crowdbotics-apps/wislister-15050
|
81ad2bc3b804de790adb0606c0902915269c4990
|
2e7d08e9359d011448187a428a90ef21638ade5f
|
refs/heads/master
| 2022-12-10T08:08:17.564321 | 2020-03-25T06:57:42 | 2020-03-25T06:57:42 | 249,908,377 | 0 | 0 | null | 2022-12-08T06:06:15 | 2020-03-25T06:57:27 |
Python
|
UTF-8
|
Python
| false | false | 1,290 |
py
|
from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "wislister"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">wislister</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "wislister-15050.botics.co"
site_params = {
"name": "wislister",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.