blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f0714282ca1bed1a0bc706dfd5e96c9a2e87dc47 | a94770c70704c22590c72d7a90f38e3a7d2e3e5c | /Algo/Leetcode/123BestTimeToBuyAndSellStockIII.py | 2a292d28fef14431391bc62620bd69b4e46bf158 | [] | no_license | lawy623/Algorithm_Interview_Prep | 00d8a1c0ac1f47e149e95f8655d52be1efa67743 | ca8b2662330776d14962532ed8994dfeedadef70 | refs/heads/master | 2023-03-22T16:19:12.382081 | 2023-03-21T02:42:05 | 2023-03-21T02:42:05 | 180,056,076 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
buy1 = -2**31
buy2 = -2**31
sell1 = 0
sell2 = 0
for p in prices:
buy1 = max(buy1, -p)
sell1 = max(sell1, buy1+p)
buy2 = max(buy2, sell1-p)
sell2 = max(sell2, buy2+p)
return sell2 | [
"[email protected]"
] | |
35d65c1e31e62c13f7fa5ec8809270c77652898d | bbfd23efb084d9c13add9a36744cad94224762d2 | /octoprint_raisecloud/raisecloud.py | 39b54af88113f6dd8252d1b8bc41cc8f11e0320c | [] | no_license | Raise3D/Octoprint-Raisecloud | a68842dd8839a12640ac86ae8cfd529fcb2b8742 | 35975de651e15ac8fdb4a2eecea1f7ab8e25038d | refs/heads/master | 2023-08-31T09:41:39.477679 | 2023-08-17T07:52:43 | 2023-08-17T07:52:43 | 255,273,781 | 7 | 7 | null | 2023-08-17T07:52:44 | 2020-04-13T08:40:52 | Python | UTF-8 | Python | false | false | 3,237 | py | # coding=utf-8
from __future__ import absolute_import, unicode_literals
import json
import base64
import logging
import requests
from Crypto.Cipher import AES
_logger = logging.getLogger('octoprint.plugins.raisecloud')
class RaiseCloud(object):
def __init__(self, machine_id, printer_name, machine_type):
self.endpoint = "https://api.raise3d.com/octoprod-v1.1"
self.url = "/user/keyLogin"
self.machine_id = machine_id
self.machine_type = machine_type
self.machine_name = printer_name
def login_cloud(self, content):
body = {
"machine_id": self.machine_id,
"machine_type": self.machine_type,
"machine_name": self.machine_name,
"key": content
}
url = "{}{}".format(self.endpoint, self.url)
try:
result = requests.post(url=url, json=body, verify=True)
if result.status_code == 200:
data = json.loads(result.text)
state = data["state"] # state 0-绑定到达上线, 1-正常返回token, 3-用户名密码不匹配
message = data["msg"]
if state == 1:
token = data["data"]["token"]
group_name = data["data"]["group_name"]
if data["data"]["team_owner"]:
group_owner = data["data"]["team_owner"]
else:
group_owner = ""
return {"state": 1, "msg": message, "token": token, "group_name": group_name,
"machine_id": self.machine_id, "group_owner": group_owner}
return {"state": state, "msg": message}
return {"state": -1, "msg": "Login error"}
except Exception as e:
return {"state": -1, "msg": "Login error"}
class Util(object):
@staticmethod
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ["raisepem"]
@staticmethod
def decrypt(content):
if content:
secret = 'raiseqwertyuiopa'
key = secret.encode("utf8") # 兼容 python3
decode = base64.b64decode(content)
cryptor = AES.new(key, AES.MODE_ECB)
plain_text = cryptor.decrypt(decode)
unpad = lambda s: s[0:-ord(s[-1:])]
data = json.loads(bytes.decode(unpad(plain_text)))
return {"user_name": data["user_name"]}
return {"user_name": ""}
def access_key(self, file_name, file_path):
"""
:return: content user_name
"""
try:
if self.allowed_file(file_name):
with open(file_path, 'r') as load_f:
content = json.load(load_f)["content"] # to bytes
# content = str.encode(content)
result = self.decrypt(content)
return result["user_name"], content
return "", ""
except Exception as e:
_logger.error(e)
_logger.error("Get access key error...")
return "", ""
def get_access_key(file_name, file_path):
util = Util()
return util.access_key(file_name, file_path)
| [
"[email protected]"
] | |
b2c759567b93cac768c610e6337ebe2ca19626e0 | 735a315ea82893f2acd5ac141f1a9b8be89f5cb9 | /pylib/v6.1.84/mdsscalar.py | 7cf7fe6e0ba174ecd9dc55b37dbdca77b5786088 | [] | no_license | drsmith48/pppl-mdsplus-python | 5ce6f7ccef4a23ea4b8296aa06f51f3a646dd36f | 0fb5100e6718c8c10f04c3aac120558f521f9a59 | refs/heads/master | 2021-07-08T02:29:59.069616 | 2017-10-04T20:17:32 | 2017-10-04T20:17:32 | 105,808,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,108 | py | if '__package__' not in globals() or __package__ is None or len(__package__)==0:
def _mimport(name,level):
return __import__(name,globals())
else:
def _mimport(name,level):
return __import__(name,globals(),{},[],level)
import numpy,copy
_dtypes=_mimport('_mdsdtypes',1)
_data=_mimport('mdsdata',1)
def makeScalar(value):
if isinstance(value,str):
return String(value)
if isinstance(value,Scalar):
return copy.deepcopy(value)
if isinstance(value,numpy.generic):
if isinstance(value,numpy.string_):
return String(value)
try:
if isinstance(value,numpy.bytes_):
return String(str(value,encoding='utf8'))
except:
pass
if isinstance(value,numpy.bool_):
return makeScalar(int(value))
return globals()[value.__class__.__name__.capitalize()](value)
try:
if isinstance(value,long):
return Int64(value)
if isinstance(value,int):
return Int32(value)
except:
if isinstance(value,int):
return Int64(value)
if isinstance(value,float):
return Float32(value)
if isinstance(value,str):
return String(value)
if isinstance(value,bytes):
return String(value.decode())
if isinstance(value,bool):
return Int8(int(value))
if isinstance(value,complex):
return Complex128(numpy.complex128(value))
if isinstance(value,numpy.complex64):
return Complex64(value)
if isinstance(value,numpy.complex128):
return Complex128(value)
raise TypeError('Cannot make Scalar out of '+str(type(value)))
class Scalar(_data.Data):
def __new__(cls,value=0):
try:
import numpy
_array=_mimport('mdsarray',1)
if (isinstance(value,_array.Array)) or isinstance(value,list) or isinstance(value,numpy.ndarray):
return _array.__dict__[cls.__name__+'Array'](value)
except:
pass
return super(Scalar,cls).__new__(cls)
def __init__(self,value=0):
if self.__class__.__name__ == 'Scalar':
raise TypeError("cannot create 'Scalar' instances")
if self.__class__.__name__ == 'String':
self._value=numpy.string_(value)
return
self._value=numpy.__dict__[self.__class__.__name__.lower()](value)
def __getattr__(self,name):
return self._value.__getattribute__(name)
def _getValue(self):
"""Return the numpy scalar representation of the scalar"""
return self._value
value=property(_getValue)
def __str__(self):
formats={'Int8':'%dB','Int16':'%dW','Int32':'%d','Int64':'0X%0xQ',
'Uint8':'%uBU','Uint16':'%uWU','Uint32':'%uLU','Uint64':'0X%0xQU',
'Float32':'%g'}
ans=formats[self.__class__.__name__] % (self._value,)
if ans=='nan':
ans="$ROPRAND"
elif isinstance(self,Float32) and ans.find('.')==-1:
ans=ans+"."
return ans
def decompile(self):
return str(self)
def __int__(self):
"""Integer: x.__int__() <==> int(x)
@rtype: int"""
return self._value.__int__()
def __long__(self):
"""Long: x.__long__() <==> long(x)
@rtype: int"""
return self.__value.__long__()
def _unop(self,op):
return _data.makeData(getattr(self.value,op)())
def _binop(self,op,y):
try:
y=y.value
except AttributeError:
pass
ans=getattr(self.value,op)(y)
return _data.makeData(ans)
def _triop(self,op,y,z):
try:
y=y.value
except AttributeError:
pass
try:
z=z.value
except AttributeError:
pass
return _data.makeData(getattr(self.value,op)(y,z))
def _getMdsDtypeNum(self):
return {'Uint8':DTYPE_BU,'Uint16':DTYPE_WU,'Uint32':DTYPE_LU,'Uint64':DTYPE_QU,
'Int8':DTYPE_B,'Int16':DTYPE_W,'Int32':DTYPE_L,'Int64':DTYPE_Q,
'String':DTYPE_T,
'Float32':DTYPE_FS,
'Float64':DTYPE_FT,'Complex64':DTYPE_FSC,'Complex128':DTYPE_FTC}[self.__class__.__name__]
mdsdtype=property(_getMdsDtypeNum)
def all(self):
return self._unop('all')
def any(self):
return self._unop('any')
def argmax(self,*axis):
if axis:
return self._binop('argmax',axis[0])
else:
return self._unop('argmax')
def argmin(self,*axis):
if axis:
return self._binop('argmin',axis[0])
else:
return self._unop('argmin')
def argsort(self,axis=-1,kind='quicksort',order=None):
return _data.makeData(self.value.argsort(axis,kind,order))
def astype(self,type):
return _data.makeData(self.value.astype(type))
def byteswap(self):
return self._unop('byteswap')
def clip(self,y,z):
return self._triop('clip',y,z)
class Int8(Scalar):
"""8-bit signed number"""
class Int16(Scalar):
"""16-bit signed number"""
class Int32(Scalar):
"""32-bit signed number"""
class Int64(Scalar):
"""64-bit signed number"""
class Uint8(Scalar):
"""8-bit unsigned number"""
class Uint16(Scalar):
"""16-bit unsigned number"""
class Uint32(Scalar):
"""32-bit unsigned number"""
class Uint64(Scalar):
"""64-bit unsigned number"""
def _getDate(self):
return _data.Data.execute('date_time($)',self)
date=property(_getDate)
class Float32(Scalar):
"""32-bit floating point number"""
class Complex64(Scalar):
"""32-bit complex number"""
def __str__(self):
return "Cmplx(%g,%g)" % (self._value.real,self._value.imag)
class Float64(Scalar):
"""64-bit floating point number"""
def __str__(self):
return ("%E" % self._value).replace("E","D")
class Complex128(Scalar):
"""64-bit complex number"""
def __str__(self):
return "Cmplx(%s,%s)" % (str(Float64(self._value.real)),str(Float64(self._value.imag)))
class String(Scalar):
"""String"""
def __radd__(self,y):
"""Reverse add: x.__radd__(y) <==> y+x
@rtype: Data"""
return self.execute('$//$',y,self)
def __add__(self,y):
"""Add: x.__add__(y) <==> x+y
@rtype: Data"""
return self.execute('$//$',self,y)
def __str__(self):
"""String: x.__str__() <==> str(x)
@rtype: String"""
if len(self._value) > 0:
return str(self.value.tostring().decode())
else:
return ''
def __len__(self):
return len(str(self))
def decompile(self):
if len(self._value) > 0:
return repr(self._value.tostring())
else:
return "''"
class Int128(Scalar):
"""128-bit number"""
def __init__(self):
raise TypeError("Int128 is not yet supported")
class Uint128(Scalar):
"""128-bit unsigned number"""
def __init__(self):
raise TypeError("Uint128 is not yet supported")
| [
"[email protected]"
] | |
db5ec5010620f9dd0771c6bf99d56cad7dbaf3df | f39d030a25f63f5e60098b7069bfe129d7ffaa4e | /mapwidget/urls.py | 820a130c81cb55f15e47e30f360d29486c1ace39 | [] | no_license | RHoK-November-2012/zelenamapa | d248c4104b7ed497ca29aee1759567db7370f009 | 93080a67107dedeca1c9bc28177cdce4b2e7bc17 | refs/heads/master | 2021-01-15T22:09:16.899268 | 2012-12-10T12:57:26 | 2012-12-10T12:57:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | from django.conf.urls.defaults import patterns, include, url
from django.views.generic.simple import direct_to_template
from django.views.generic.simple import redirect_to
from views import *
urlpatterns = patterns("",
url(r"^(?P<w>\d+)x(?P<h>\d+)/(?P<pk>\d+)/$",
map_view,
name="zm.mapwidget.map", ),
url(r"^(?P<w>\d+)x(?P<h>\d+)/(?P<pk>\d+)/div/$",
map_div_view,
name="zm.mapwidget.map.div", ),
url(r"^mapconfig(?P<pk>\d+).js$",
mapconfig_js_view,
name="zm.mapwidget.map_js", ),
url(r"^mapconfig_div(?P<pk>\d+).js$",
mapconfig_js_view,
{"template": "mapwidget/mapconfig_div.js"},
name="zm.mapwidget.map_div_js", ),
) | [
"[email protected]"
] | |
8cd9345ff2229d47ac210b638d0b05a9febb0fd4 | ec0678f8077702d9a8a1b4abd3e6a41de5ffab39 | /projects/migrations/0002_auto_20210513_0047.py | 135a127972b150d6369ccb74906fb7eabfc1a5c8 | [
"CC0-1.0"
] | permissive | pmburu/brenda-maxi | 17955b50eca09a4362853e0a20f8bbe82bdfa2e7 | 756a813e6d8207ea5fbf6a57097094601d565e8e | refs/heads/main | 2023-05-06T16:04:12.738807 | 2021-05-31T01:44:28 | 2021-05-31T01:44:28 | 366,536,520 | 0 | 0 | CC0-1.0 | 2021-05-15T14:59:58 | 2021-05-11T23:21:20 | null | UTF-8 | Python | false | false | 732 | py | # Generated by Django 3.2.2 on 2021-05-13 00:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='project',
options={'verbose_name': 'Project', 'verbose_name_plural': 'Projects'},
),
migrations.AlterModelOptions(
name='projectdetail',
options={'verbose_name': 'Project Detail', 'verbose_name_plural': 'Project Details'},
),
migrations.AlterField(
model_name='projectdetail',
name='project_detail_description',
field=models.TextField(),
),
]
| [
"[email protected]"
] | |
c1854c18c3be9690a31572a2e09f19b60d45d596 | c5174b07aa1f2eefc9d1aa156973a947c7906591 | /Utilities/custon_logging.py | 78939a565ff30237090a2d3b3b35c5bddab56235 | [] | no_license | rajabhinav02/selenium-python-framework | 5e3a749fda815bbb9eea2a7479e7f80f301bd77c | 69e464fdfcb054b2688291522e28f35b85c0b2e7 | refs/heads/master | 2023-06-07T10:49:55.314883 | 2021-07-13T15:37:33 | 2021-07-13T15:37:33 | 385,606,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | import inspect
import logging
def loggingtest(loglevel):
tcname = inspect.stack()[1][3]
logger= logging.getLogger(tcname)
logger.setLevel(logging.DEBUG)
#filehandler = logging.FileHandler("{0}.log".format(tcname))
filehandler= logging.FileHandler("auto.log", mode="a")
filehandler.setLevel(loglevel)
format = logging.Formatter("%(asctime)s : %(levelname)s : %(name)s : %(message)s")
filehandler.setFormatter(format)
logger.addHandler(filehandler)
return logger | [
"[email protected]"
] | |
1bcde3653da01f434076651467117b7d65577dc5 | 5f4707160434d4c01e1839ca9054604643f93e77 | /tests/test.py | acf8637b4d3a6862f059c65b44e2b2a70a24d548 | [
"MIT"
] | permissive | blockspacer/pimpl-cpp-generator | 7e04dc62b2c85b6dfe1b9ea40974453ad3152e1b | e6afe37b6034e613b706b60a80e8daf93f33c1d3 | refs/heads/master | 2021-05-29T20:09:47.297778 | 2015-11-12T08:42:43 | 2015-11-12T08:42:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,319 | py | # Copyright (c) 2015 Noto, Yuta
# Released under the MIT license
# http://opensource.org/licenses/mit-license.php
import sys
import os
import pprint
sys.path.append(os.path.dirname(__file__) + '/..')
import pimplgen
from nose.tools import ok_, eq_
BASE_DIR = os.path.dirname(__file__)
def setup_module(module):
# static library path for travis ci (trusty)
pimplgen.cl.Config.set_compatibility_check(False)
pimplgen.cl.Config.set_library_file('/usr/lib/x86_64-linux-gnu/libclang-3.4.so.1')
TEST_PARAMETER = [
# 0
({
'src_file': BASE_DIR + '/cppsrc/basic1.cpp',
'target_class': 'Basic1',
'output_class': 'TBasic1'
},
{'class_decl': 'template < typename U = int > struct',
'class_name': 'Basic1',
'class_sig': 'template < typename U = int > struct Basic1',
'constructor_info': [],
'func_info': [{'args': [{'name': 'x', 'sig': 'float x'}], 'const': False, 'func_name': 'foo', 'is_void': False, 'restrict': False, 'result': 'int', 'template_args': [], 'volatile': False},
{'args': [{'name': 'pimplvar0', 'sig': 'int pimplvar0'}, {'name': 'y', 'sig': 'float y'}, {'name': 'pimplvar1', 'sig': 'double pimplvar1'}], 'const': False, 'func_name': 'bar', 'is_void': True, 'restrict': False, 'result': 'void', 'template_args': [], 'volatile': False},
{'args': [{'name': 'z', 'sig': 'int z = ( 42 )'}], 'const': False, 'func_name': 'baz', 'is_void': True, 'restrict': False, 'result': 'void', 'template_args': [], 'volatile': False},
{'args': [], 'const': False, 'func_name': 'qux', 'is_void': False, 'restrict': False, 'result': 'double', 'template_args': [], 'volatile': False},
{'args': [{'name': 't', 'sig': 'T t'}], 'const': False, 'func_name': 'norf', 'is_void': False, 'restrict': False, 'result': 'T', 'template_args': [{'name': 'T', 'sig': 'typename T'}, {'name': 'N', 'sig': 'long N'}], 'volatile': False}],
'template_args': [{'name': 'U', 'sig': 'typename U = int'}]}
),
# 1
({
'src_file': BASE_DIR + '/cppsrc/basic1.cpp',
'target_class': 'Basic2',
'output_class': 'TBasic2'
},
{'class_decl': 'struct',
'class_name': 'Basic2',
'class_sig': 'struct Basic2',
'constructor_info': [],
'func_info': [{'args': [], 'const': False, 'func_name': 'bar', 'is_void': True, 'restrict': False, 'result': 'void', 'template_args': [], 'volatile': False},
{'args': [{'name': 'x', 'sig': 'int x'}], 'const': False, 'func_name': 'baz', 'is_void': False, 'restrict': False, 'result': 'int', 'template_args': [], 'volatile': False}],
'template_args': []}
),
# 2
(),
# 3
( {
'src_file': BASE_DIR + '/cppsrc/basic1.cpp',
'target_class': 'Basic4',
'output_class': 'TBasic4'
},
{'class_decl': 'struct',
'class_name': 'Basic4',
'class_sig': 'struct Basic4',
'constructor_info': [],
'func_info': [{'args': [{'name': 'x', 'sig': 'float x'}], 'const': False, 'func_name': 'foo', 'is_void': False, 'restrict': False, 'result': 'int', 'template_args': [], 'volatile': False},
{'args': [{'name': 'x', 'sig': 'float x'}], 'const': False, 'func_name': 'foofoo', 'is_void': False, 'restrict': False, 'result': 'int', 'template_args': [], 'volatile': False},
{'args': [], 'const': False, 'func_name': 'bar', 'is_void': False, 'restrict': False, 'result': 'int', 'template_args': [], 'volatile': True},
{'args': [{'name': 'a', 'sig': 'char a'}], 'const': True, 'func_name': 'baz', 'is_void': False, 'restrict': False, 'result': 'int', 'template_args': [], 'volatile': False},
{'args': [], 'const': True, 'func_name': 'qux', 'is_void': False, 'restrict': False, 'result': 'double', 'template_args': [], 'volatile': True}],
'template_args': []}
),
# 4
( {
'src_file': BASE_DIR + '/cppsrc/basic1.cpp',
'target_class': 'a::Basic5',
'output_class': 'TBasic5'
},
{'class_decl': 'struct',
'class_name': 'Basic5',
'class_sig': 'struct Basic5',
'constructor_info': [],
'func_info': [{'args': [{'name': 'x', 'sig': 'float x'}], 'const': False, 'func_name': 'foo', 'is_void': True, 'restrict': False, 'result': 'void', 'template_args': [], 'volatile': False}],
'template_args': []}
),
# 5
( {
'src_file': BASE_DIR + '/cppsrc/basic1.cpp',
'target_class': 'a::b::Basic6',
'output_class': 'TBasic6'
},
{'class_decl': 'struct',
'class_name': 'Basic6',
'class_sig': 'struct Basic6',
'constructor_info': [],
'func_info': [{'args': [{'name': 'x', 'sig': 'int x'}], 'const': False, 'func_name': 'foo', 'is_void': True, 'restrict': False, 'result': 'void', 'template_args': [], 'volatile': False},
{'args': [{'name': 'b5', 'sig': 'Basic5 & b5'}], 'const': False, 'func_name': 'bar', 'is_void': True, 'restrict': False, 'result': 'void', 'template_args': [], 'volatile': False},
{'args': [{'name': 'other', 'sig': 'const Basic6 & other'}], 'const': False, 'func_name': 'operator=', 'is_void': False, 'restrict': False, 'result': 'a::b::Basic6 &', 'template_args': [], 'volatile': False}],
'template_args': []}
)
]
def check_pattern(idx):
pattern = TEST_PARAMETER[idx]
command = '{src_file} -t {target_class} -o {output_class}'.format(**pattern[0])
args = pimplgen.parse_args(command)
generator = pimplgen.PimplGenerator(args)
class_info = generator.parse()
eq_(pattern[1], class_info)
def test_0() : check_pattern(0)
def test_1() : check_pattern(1)
def test_3() : check_pattern(3)
def test_4() : check_pattern(4)
def test_5() : check_pattern(5)
def run_pattern(idx):
pattern = TEST_PARAMETER[idx]
command = '{src_file} -t {target_class} -o {output_class}'.format(**pattern[0])
args = pimplgen.parse_args(command)
generator = pimplgen.PimplGenerator(args)
class_info = generator.parse()
pprint.pprint(class_info, width=300)
print '/////////////////////////////////////////////////////////////////////'
codes = generator.generate_code()
print codes[0]
print ''
print codes[1]
if __name__ == '__main__':
setup_module(None)
run_pattern(5)
| [
"[email protected]"
] | |
af73d519f490016f2e04c80e9be69b7f30392f9c | 329d80ba2b792864aef583fa9ba0f8579ed96f46 | /src/timeslice/viz.py | 726ce7cae1fbce19cf716bf10cd873b63aeecab0 | [
"MIT"
] | permissive | spencerzhang91/GSPNet | fce229b11b23597375abbbe5a8e8bffaa4310551 | ff165de95ec0f258ba444ff343d18d812a066b8f | refs/heads/master | 2022-01-14T15:47:29.409475 | 2019-06-17T03:01:24 | 2019-06-17T03:01:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,078 | py | '''
Copyright <2019> <COPYRIGHT Pingcheng Zhang>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
| [
"[email protected]"
] | |
fcabbaa316e2739642e5b6863421db370606196e | 172e4cd05ec0bdf676444f24043791a931b660ce | /polls/tests.py | e52bbc00987c0a2aca7ebe80820d760ebef8ceed | [] | no_license | si-tm/tes_jdango | 61137109f394d68591df872ee14721a54392a4f1 | 88d65337f6959c0b46178dad3d7ec4e9f826737e | refs/heads/main | 2023-01-05T05:42:20.464471 | 2020-10-31T07:48:10 | 2020-10-31T07:48:10 | 308,815,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,635 | py | import datetime
from django.test import TestCase
from django.utils import timezone
from .models import Question
from django.urls import reverse
"""
モデルやビューごとに TestClass を分割する
テストしたい条件の集まりのそれぞれに対して、異なるテストメソッドを作る
テストメソッドの名前は、その機能を説明するようなものにする
"""
class QuestionModelTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() returns False for questions whose pub_date
is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
time = timezone.now() - datetime.timedelta(days=1, seconds=1)
old_question = Question(pub_date=time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
def create_question(question_text, days):
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text, pub_date=time)
class QuestionIndexViewTests(TestCase):
def test_no_questions(self):
"""
If no questions exist, an appropriate message is displayed.
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_past_question(self):
"""
Questions with a pub_date in the past are displayed on the
index page.
"""
create_question(question_text="Past question.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_future_question(self):
"""
Questions with a pub_date in the future aren't displayed on
the index page.
"""
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_future_question_and_past_question(self):
"""
Even if both past and future questions exist, only past questions
are displayed.
"""
create_question(question_text="Past question.", days=-30)
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_two_past_questions(self):
"""
The questions index page may display multiple questions.
"""
create_question(question_text="Past question 1.", days=-30)
create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question 2.>', '<Question: Past question 1.>']
)
class QuestionDetailViewTests(TestCase):
def test_future_question(self):
"""
The detail view of a question with a pub_date in the future
returns a 404 not found.
"""
future_question = create_question(question_text='Future question.', days=5)
url = reverse('polls:detail', args=(future_question.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_past_question(self):
"""
The detail view of a question with a pub_date in the past
displays the question's text.
"""
past_question = create_question(question_text='Past Question.', days=-5)
url = reverse('polls:detail', args=(past_question.id,))
response = self.client.get(url)
self.assertContains(response, past_question.question_text) | [
"[email protected]"
] | |
fce4178c6cb9494989c22ab01f8bd58fe80f1f06 | 3f2a1b1298883a147217fac62abf3e447f3328bd | /生成数据/scatter_squares.py | b3435ed3575348c54d41f4fc363df6554455bc17 | [] | no_license | wuxudongxd/python_code | c23a5e665d2f74b0b50839f3230b343b914d4a27 | 4a7bacc8061797c1344d9216813cdc02985fb0a3 | refs/heads/master | 2022-04-09T02:42:30.702003 | 2019-06-27T14:43:35 | 2019-06-27T14:43:35 | 234,686,253 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | import matplotlib.pyplot as plt
x_values = list(range(1, 1001))
y_values = [x ** 2 for x in x_values]
plt.scatter(x_values, y_values, c = y_values, cmap = plt.cm.Blues, edgecolor = 'none', s = 40)
#设置图表标题,并给坐标轴加上标签
plt.title("Square Numbers", fontsize = 24)
plt.xlabel("Value", fontsize = 14)
plt.ylabel("Square of Value", fontsize = 14)
#设置刻度标记的大小
plt.tick_params(axis = 'both', which = 'najor', labelsize = 14)
#设置每个坐标轴的取值范围
plt.axis([0, 1100, 0, 1100000])
plt.savefig('squares_plot.png', bbox_inches = 'tight')
| [
"[email protected]"
] | |
9e8e5607a62fa19a162b1026aab6e20e14275de9 | 1a2bf34d7fc1d227ceebf05edf00287de74259c5 | /Django/Test/LuZhenNan/APP/views.py | 7e131b7ef59abef0102ca853aada9b3ad236a88c | [] | no_license | lzn9423362/Django- | de69fee75160236e397b3bbc165281eadbe898f0 | 8c1656d20dcc4dfc29fb942b2db54ec07077e3ae | refs/heads/master | 2020-03-29T18:03:47.323734 | 2018-11-28T12:07:12 | 2018-11-28T12:07:12 | 150,192,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,187 | py | import hashlib
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, redirect
from django.urls import reverse
from .models import *
# Create your views here.
def index(request):
username = request.session.get('username')
users = User.objects.filter(username=username)
user = users.first()
girl = Girl.objects.all()
man = Man.objects.all()
if users.exists():
user = users.first()
return render(request, 'index.html', {'user': user, 'girl1': girl[0:1], 'girl2':girl[1:6], 'man1': man[0:2], 'man2': man[2:11]
})
else:
return render(request, 'index.html', {'user':user, 'girl1': girl[0:1], 'girl2':girl[1:6], 'man1': man[0:2], 'man2': man[2:11]
})
# 'girl1': girl[0:2], 'girl2':girl[2:7], 'man1': Man[0:2], 'man2': Man[2:11]
def register(request):
return render(request, 'register.html')
def registerhandle(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
email = request.POST.get('email')
User.objects.create(username=username, password=password, email=email)
return redirect(reverse('APP:login'))
def login(request):
return render(request, 'login.html')
def loginhandle(request):
if request.method == 'POST':
username = request.POST.get('phone')
password = request.POST.get('password')
users = User.objects.filter(username=username, password=password)
if users.exists():
request.session['username'] = users.first().username
return redirect(reverse('APP:index'))
else:
return HttpResponse('账号密码错误')
else:
return HttpResponse('请求方式错误')
def logout(request):
request.session.clear()
return redirect(reverse("APP:index"))
def loginajax(request):
username = request.POST.get('value')
try:
user = User.objects.get(username=username)
return JsonResponse({'status': 0})
except:
return JsonResponse({'status': 1})
def my_md5(string):
m = hashlib.md5()
m.update(string.encode())
return m.hexdigest()
| [
"[email protected]"
] | |
780073cc16c8f338f3195e45934b88dd0709ef5b | f777b5e4a98c40f4bfc5c5c9e326faa09beb2d53 | /projects/DensePose/densepose/modeling/cse/utils.py | 18480db5e485dec3bd0daf3cae69263a6abdde4f | [
"Apache-2.0"
] | permissive | alekseynp/detectron2 | 04ae9a47d950ea4c737715b5f2aa7637d3742264 | 2409af0bf0d4bdcc685feb6d2c7fd659828acac4 | refs/heads/master | 2022-05-30T09:13:26.438077 | 2022-04-11T20:59:40 | 2022-04-11T20:59:40 | 254,280,315 | 0 | 1 | Apache-2.0 | 2020-04-09T05:34:15 | 2020-04-09T05:34:14 | null | UTF-8 | Python | false | false | 3,538 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from torch.nn import functional as F
def squared_euclidean_distance_matrix(pts1: torch.Tensor, pts2: torch.Tensor) -> torch.Tensor:
"""
Get squared Euclidean Distance Matrix
Computes pairwise squared Euclidean distances between points
Args:
pts1: Tensor [M x D], M is the number of points, D is feature dimensionality
pts2: Tensor [N x D], N is the number of points, D is feature dimensionality
Return:
Tensor [M, N]: matrix of squared Euclidean distances; at index (m, n)
it contains || pts1[m] - pts2[n] ||^2
"""
edm = torch.mm(-2 * pts1, pts2.t())
edm += (pts1 * pts1).sum(1, keepdim=True) + (pts2 * pts2).sum(1, keepdim=True).t()
return edm.contiguous()
def normalize_embeddings(embeddings: torch.Tensor, epsilon: float = 1e-6) -> torch.Tensor:
"""
Normalize N D-dimensional embedding vectors arranged in a tensor [N, D]
Args:
embeddings (tensor [N, D]): N D-dimensional embedding vectors
epsilon (float): minimum value for a vector norm
Return:
Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1.
"""
return embeddings / torch.clamp(
embeddings.norm(p=None, dim=1, keepdim=True), min=epsilon # pyre-ignore[6]
)
def get_closest_vertices_mask_from_ES(
E: torch.Tensor,
S: torch.Tensor,
h: int,
w: int,
mesh_vertex_embeddings: torch.Tensor,
device: torch.device,
):
"""
Interpolate Embeddings and Segmentations to the size of a given bounding box,
and compute closest vertices and the segmentation mask
Args:
E (tensor [1, D, H, W]): D-dimensional embedding vectors for every point of the
default-sized box
S (tensor [1, 2, H, W]): 2-dimensional segmentation mask for every point of the
default-sized box
h (int): height of the target bounding box
w (int): width of the target bounding box
mesh_vertex_embeddings (tensor [N, D]): vertex embeddings for a chosen mesh
N is the number of vertices in the mesh, D is feature dimensionality
device (torch.device): device to move the tensors to
Return:
Closest Vertices (tensor [h, w]), int, for every point of the resulting box
Segmentation mask (tensor [h, w]), boolean, for every point of the resulting box
"""
# pyre-fixme[6]: Expected `Optional[int]` for 2nd param but got `Tuple[int, int]`.
embedding_resized = F.interpolate(E, size=(h, w), mode="bilinear")[0].to(device)
# pyre-fixme[6]: Expected `Optional[int]` for 2nd param but got `Tuple[int, int]`.
coarse_segm_resized = F.interpolate(S, size=(h, w), mode="bilinear")[0].to(device)
mask = coarse_segm_resized.argmax(0) > 0
closest_vertices = torch.zeros(mask.shape, dtype=torch.long, device=device)
all_embeddings = embedding_resized[:, mask].t()
size_chunk = 10_000 # Chunking to avoid possible OOM
edm = []
if len(all_embeddings) == 0:
return closest_vertices, mask
for chunk in range((len(all_embeddings) - 1) // size_chunk + 1):
chunk_embeddings = all_embeddings[size_chunk * chunk : size_chunk * (chunk + 1)]
edm.append(
torch.argmin(
squared_euclidean_distance_matrix(chunk_embeddings, mesh_vertex_embeddings), dim=1
)
)
closest_vertices[mask] = torch.cat(edm)
return closest_vertices, mask
| [
"[email protected]"
] | |
de41515bdfe3faa82c3ce8ed5c220f24b123aac9 | 3712a929d1124f514ea7af1ac0d4a1de03bb6773 | /开班笔记/pythonMongoDB部分/day39/code/mongo1.py | 5b9aef2d4d0dc19f114aaca150810694bc086161 | [] | no_license | jiyabing/learning | abd82aa3fd37310b4a98b11ea802c5b0e37b7ad9 | 6059006b0f86aee9a74cfc116d2284eb44173f41 | refs/heads/master | 2020-04-02T20:47:33.025331 | 2018-10-26T05:46:10 | 2018-10-26T05:46:10 | 154,779,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | #coding:utf8
#索引和聚合操作
from pymongo import MongoClient,IndexModel
conn = MongoClient('localhost',27017)
db = conn.stu
my_set = db.class4
#创建索引,并且将索引名返回
#index = my_set.ensure_index('name')
#print(index)
#复合索引
#index = my_set.ensure_index([('name',1),('king',-1)])
#print(index)
#唯一索引和稀疏索引
cls = db.class0
#唯一索引
#index = cls.ensure_index('name',unique=True)
#稀疏索引
#index = my_set.ensure_index('king_name',sparse=True)
#删除索引
#my_set.drop_index('name_1')
#my_set.drop_indexes() #删除所有索引
#同时创建多个索引
#index1 = IndexModel([('name',1),('king',-1)])
#index2 = IndexModel([('king_name',1)])
#indexes = my_set.create_indexes([index1,index2])
#查看一个集合中的索引
#for i in my_set.list_indexes():
# print(i)
#聚合管道
l = [{'$group':{'_id':'$king','count':{'$sum':1}}},{'$match':{'count':{'$gt':1}}}]
cursor = my_set.aggregate(l)
for i in cursor:
print(i)
| [
"[email protected]"
] | |
f3b6f4f9df6a54c847740fed5d78cd37e29a0824 | a66a0152c7b37e7aa74ab7fff38704fb45fe3b9c | /img_util/img_test.py | 2673a916c9d93711fe949f572cba75bec8433b21 | [] | no_license | guoyy2017/python_util | 9c7a1351a0fd20fe187ac39972832abffbce3bad | ec90baacd7ca79420a598b701ae960d9337772b8 | refs/heads/master | 2021-06-25T23:47:10.056534 | 2020-10-09T01:01:45 | 2020-10-09T01:01:45 | 129,709,653 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | #!/usr/bin/env python
# encoding: utf-8
'''
@author:maidou
@contact:QQ4113291000
@time:18/4/23.下午4:46
'''
from gevent import monkey
monkey.patch_socket()
from gevent.pool import Group
import gevent
import requests
import time
import functools
import traceback
def no_error(func):
@functools.wraps(func)
def wapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
traceback.print_exc()
return None
return wapper
pass
@no_error
def get_urls():
# url = ''
# res = requests.get(url)
# if res.status_code == requests.codes.ok:
# pass
return ["", "", ""]
@no_error
def get_img(url):
print 'get begin url %s ' % url
res = requests.get(url)
print 'get end url %s ' % url
urls = get_urls()
if urls and len(urls) > 0:
group = Group()
start = time.time()
for url in urls:
# get_img(url)
g = gevent.spawn(get_img, url)
group.add(g)
group.join()
end = time.time()
print 'get cost %f, begin %f, end %f' % (end - start, start , end)
if __name__ == '__main__':
pass | [
"[email protected]"
] | |
b605974ab6d3d89ba69d3248a135c89cc71111ec | 5ca02343c366662b60966e060e50e9d6960c0531 | /TX/TX/settings.py | 45f686302330fb2cfde0ecc003da2686115a362c | [] | no_license | yyzhu0817/scrapy | eff5cc68ab25c89fe01c62e2c94e5511dad3fc34 | 9186b127bf49450850028c76142262c6f2c935da | refs/heads/master | 2020-12-10T01:00:34.924969 | 2020-01-20T02:54:58 | 2020-01-20T02:54:58 | 233,465,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,174 | py | # -*- coding: utf-8 -*-
# Scrapy settings for TX project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'TX'
SPIDER_MODULES = ['TX.spiders']
NEWSPIDER_MODULE = 'TX.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'TX (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'TX.middlewares.TxSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'TX.middlewares.TxDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# ITEM_PIPELINES = {
# 'TX.pipelines.TxPipeline': 300,
# }
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"[email protected]"
] | |
e94e7557d33b1909175c4988faa350e67ef00853 | 5024bd5585b193310405b8cbcf9f87bef68c8b32 | /processing/process_synthesized_images.py | 6117162e38f0ad66cb043a4c4c2c8e0a6c3749f5 | [] | no_license | san-soucie/NetDissect-Lite | 7374e8a03bca7102cd1cbc081dea019548079267 | 73c744f5eac68fc8a9db73576242ec8bdfb49fa4 | refs/heads/master | 2020-04-08T17:24:04.872861 | 2018-12-07T08:51:44 | 2018-12-07T08:51:44 | 159,565,020 | 0 | 0 | null | 2018-11-28T21:07:55 | 2018-11-28T21:07:54 | null | UTF-8 | Python | false | false | 8,385 | py | import torch
import torch.nn as nn
import torchvision
import os
import argparse
from matplotlib import pyplot as plt
class NetSoftmax(nn.Module):
def __init__(self, num_classes=1000, aux_logits=True, transform_input=False):
super(NetSoftmax, self).__init__()
self.net = torchvision.models.alexnet(
pretrained=True, num_classes=num_classes)
# aux_logits=aux_logits,
# transform_input=transform_input)
# self.net.classifier = nn.Linear(self.net.classifier.in_features*81, num_classes)
self.sm = nn.Softmax()
def forward(self, x):
x = self.net(x)
x = self.sm(x)
return x
def get_params(filename):
v = os.path.split(filename.rstrip('__0.png'))[1].split('_')
unit, category, label, score, layer = v[0:5]
hp = v[5:]
params = {'unit': unit,
'category': category,
'label': label,
'score': score,
'layer': layer,
'hyperparameters': hp}
return params
def process_target_class(filename):
return get_params(filename)['label']
def process_score(filename):
return get_params(filename)['score']
def find_classes(class_filename):
with open(class_filename) as f:
classes = [x.strip() for x in f.readlines()]
class_to_idx = {y: i for i, x in enumerate(classes) for y in x.split(', ')}
full_class_to_idx = {y: i for i, y in enumerate(classes)}
return classes, {**class_to_idx, **full_class_to_idx}
class CustomDataset(torch.utils.data.Dataset):
def __init__(self,
folder_name,
transform=None,
class_filename="imagenet_classes.txt",
stopwords=None):
classes, class_to_idx = find_classes(class_filename)
self.filenames = [os.path.join(folder_name, x) for x in os.listdir(folder_name)]
removes = set()
for stopword in stopwords:
for f in self.filenames:
if stopword in f:
removes |= {f}
self.filenames = [f for f in self.filenames if f not in removes]
self.root = folder_name
samples = [filename for filename in self.filenames]
scores = {filename: float(process_score(filename))
for filename in self.filenames}
self.loader = torchvision.datasets.folder.pil_loader
self.extensions = torchvision.datasets.folder.IMG_EXTENSIONS
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.transform = transform
self.scores = scores
def __getitem__(self, index):
path = self.samples[index]
sample = self.loader(path)
return self.transform(sample) if self.transform else sample, self.get_score(index), path
def get_score(self, index):
path = self.samples[index]
return self.scores[path]
def __len__(self):
return len(self.samples)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
def main():
parser = argparse.ArgumentParser(description="Creates a scatterplot of scores vs. softmax probabilities")
parser.add_argument("image_folder_name", default="/home/warp/Documents/6.861/NetDissect-Lite/outputs/synthesizing-06:52:34/CORnet-S/clean",
metavar="F", help="Folder containing images to be parsed")
parser.add_argument("-T", "--network_name",
metavar="T", default="CORnet-S", help="Name of neural network generating synthesized images")
parser.add_argument("-O", "--output_image_filename",
metavar="F", default="CORnet-S_Clean_MaxSofmax_vs_IoU.pdf", help="Filename for output image")
args = parser.parse_args()
args.image_folder_names = []
if args.image_folder_name == "LIST":
args.image_folder_names = ["/home/warp/Documents/6.861/NetDissect-Lite/processing/example_synthesized_images"
"/synthesizing-02:07:37",
"/home/warp/Documents/6.861/NetDissect-Lite/processing/example_synthesized_images"
"/synthesizing-02:20:25",
"/home/warp/Documents/6.861/NetDissect-Lite/processing/example_synthesized_images"
"/synthesizing-02:24:08",
"/home/warp/Documents/6.861/NetDissect-Lite/processing/example_synthesized_images"
"/synthesizing-02:25:48",
"/home/warp/Documents/6.861/NetDissect-Lite/processing/example_synthesized_images"
"/synthesizing-04:25:46"]
if not args.image_folder_names:
dataset = CustomDataset(args.image_folder_name,
transform=torchvision.transforms.ToTensor(),
stopwords={})
dataloader = torch.utils.data.DataLoader(dataset)
net = NetSoftmax()
softmax_scores_object = []
iou_scores_object = []
softmax_scores_bad = []
iou_scores_bad = []
softmax_scores_texture = []
iou_scores_texture = []
with torch.no_grad():
for image, score, path in dataloader:
softmaxes = net.forward(image)
if '_bad.' in path[0]:
softmax_scores_bad.append(max(softmaxes.tolist()[0]))
iou_scores_bad.append(score[0].item())
elif "_object_" in path[0]:
softmax_scores_object.append(max(softmaxes.tolist()[0]))
iou_scores_object.append(score[0].item())
else:
softmax_scores_texture.append(max(softmaxes.tolist()[0]))
iou_scores_texture.append(score[0].item())
else:
datasets = [CustomDataset(x,
transform=torchvision.transforms.ToTensor(),
stopwords={'CORnet'}) for x in args.image_folder_names]
dataloaders = [torch.utils.data.DataLoader(d) for d in datasets]
net = NetSoftmax()
softmax_scores_object = []
iou_scores_object = []
softmax_scores_bad = []
iou_scores_bad = []
softmax_scores_texture= []
iou_scores_texture = []
with torch.no_grad():
for d in dataloaders:
for image, score, path in d:
softmaxes = net.forward(image)
if '_bad.' in path[0]:
softmax_scores_bad.append(max(softmaxes.tolist()[0]))
iou_scores_bad.append(score[0].item())
elif "_object_" in path[0]:
softmax_scores_object.append(max(softmaxes.tolist()[0]))
iou_scores_object.append(score[0].item())
else:
softmax_scores_texture.append(max(softmaxes.tolist()[0]))
iou_scores_texture.append(score[0].item())
plt.plot(iou_scores_object, softmax_scores_object, 'bo', label="Top-5 Unit (Object)")
plt.plot(iou_scores_texture, softmax_scores_texture, 'go', label="Top-5 Unit (Texture)")
plt.plot(iou_scores_bad, softmax_scores_bad, 'ro', label="Bottom-5 Unit")
if args.network_name:
plt.title("Maximum softmax probability vs. IoU score (" + args.network_name + ")")
else:
plt.title("Maximum softmax probability vs. IoU score")
plt.xlim(left=0, right=0.6)
plt.ylim(bottom=0, top=1)
plt.xlabel('Unit IoU Score')
plt.ylabel('Maximum Softmax Score')
plt.legend()
img_filename = (args.output_image_filename
if args.output_image_filename
else args.network_name + "softmax_vs_iou.pdf")
plt.savefig(img_filename, bbox_inches='tight')
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
68c3596f7b0719e22f39a5bb9add3cf40d285973 | 893f83189700fefeba216e6899d42097cc0bec70 | /bioinformatics/photoscan-pro/python/lib/python3.5/site-packages/qtconsole/jupyter_widget.py | c2a8969866d3d2b0203d59d00013fe1e00dc58b6 | [
"GPL-3.0-only",
"Apache-2.0",
"MIT",
"Python-2.0"
] | permissive | pseudoPixels/SciWorCS | 79249198b3dd2a2653d4401d0f028f2180338371 | e1738c8b838c71b18598ceca29d7c487c76f876b | refs/heads/master | 2021-06-10T01:08:30.242094 | 2018-12-06T18:53:34 | 2018-12-06T18:53:34 | 140,774,351 | 0 | 1 | MIT | 2021-06-01T22:23:47 | 2018-07-12T23:33:53 | Python | UTF-8 | Python | false | false | 22,144 | py | """A FrontendWidget that emulates a repl for a Jupyter kernel.
This supports the additional functionality provided by Jupyter kernel.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from collections import namedtuple
import os.path
import re
from subprocess import Popen
import sys
import time
from textwrap import dedent
from qtconsole.qt import QtCore, QtGui
from qtconsole import __version__
from traitlets import Bool, Unicode
from .frontend_widget import FrontendWidget
from . import styles
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# Default strings to build and display input and output prompts (and separators
# in between)
default_in_prompt = 'In [<span class="in-prompt-number">%i</span>]: '
default_out_prompt = 'Out[<span class="out-prompt-number">%i</span>]: '
default_input_sep = '\n'
default_output_sep = ''
default_output_sep2 = ''
# Base path for most payload sources.
zmq_shell_source = 'ipykernel.zmqshell.ZMQInteractiveShell'
if sys.platform.startswith('win'):
default_editor = 'notepad'
else:
default_editor = ''
#-----------------------------------------------------------------------------
# JupyterWidget class
#-----------------------------------------------------------------------------
class IPythonWidget(FrontendWidget):
"""Dummy class for config inheritance. Destroyed below."""
class JupyterWidget(IPythonWidget):
"""A FrontendWidget for a Jupyter kernel."""
# If set, the 'custom_edit_requested(str, int)' signal will be emitted when
# an editor is needed for a file. This overrides 'editor' and 'editor_line'
# settings.
custom_edit = Bool(False)
custom_edit_requested = QtCore.Signal(object, object)
editor = Unicode(default_editor, config=True,
help="""
A command for invoking a system text editor. If the string contains a
{filename} format specifier, it will be used. Otherwise, the filename
will be appended to the end the command.
""")
editor_line = Unicode(config=True,
help="""
The editor command to use when a specific line number is requested. The
string should contain two format specifiers: {line} and {filename}. If
this parameter is not specified, the line number option to the %edit
magic will be ignored.
""")
style_sheet = Unicode(config=True,
help="""
A CSS stylesheet. The stylesheet can contain classes for:
1. Qt: QPlainTextEdit, QFrame, QWidget, etc
2. Pygments: .c, .k, .o, etc. (see PygmentsHighlighter)
3. QtConsole: .error, .in-prompt, .out-prompt, etc
""")
syntax_style = Unicode(config=True,
help="""
If not empty, use this Pygments style for syntax highlighting.
Otherwise, the style sheet is queried for Pygments style
information.
""")
# Prompts.
in_prompt = Unicode(default_in_prompt, config=True)
out_prompt = Unicode(default_out_prompt, config=True)
input_sep = Unicode(default_input_sep, config=True)
output_sep = Unicode(default_output_sep, config=True)
output_sep2 = Unicode(default_output_sep2, config=True)
# JupyterWidget protected class variables.
_PromptBlock = namedtuple('_PromptBlock', ['block', 'length', 'number'])
_payload_source_edit = 'edit_magic'
_payload_source_exit = 'ask_exit'
_payload_source_next_input = 'set_next_input'
_payload_source_page = 'page'
_retrying_history_request = False
_starting = False
#---------------------------------------------------------------------------
# 'object' interface
#---------------------------------------------------------------------------
def __init__(self, *args, **kw):
super(JupyterWidget, self).__init__(*args, **kw)
# JupyterWidget protected variables.
self._payload_handlers = {
self._payload_source_edit : self._handle_payload_edit,
self._payload_source_exit : self._handle_payload_exit,
self._payload_source_page : self._handle_payload_page,
self._payload_source_next_input : self._handle_payload_next_input }
self._previous_prompt_obj = None
self._keep_kernel_on_exit = None
# Initialize widget styling.
if self.style_sheet:
self._style_sheet_changed()
self._syntax_style_changed()
else:
self.set_default_style()
#---------------------------------------------------------------------------
# 'BaseFrontendMixin' abstract interface
#
# For JupyterWidget, override FrontendWidget methods which implement the
# BaseFrontend Mixin abstract interface
#---------------------------------------------------------------------------
def _handle_complete_reply(self, rep):
"""Support Jupyter's improved completion machinery.
"""
self.log.debug("complete: %s", rep.get('content', ''))
cursor = self._get_cursor()
info = self._request_info.get('complete')
if info and info.id == rep['parent_header']['msg_id'] and \
info.pos == cursor.position():
content = rep['content']
matches = content['matches']
start = content['cursor_start']
end = content['cursor_end']
start = max(start, 0)
end = max(end, start)
# Move the control's cursor to the desired end point
cursor_pos = self._get_input_buffer_cursor_pos()
if end < cursor_pos:
cursor.movePosition(QtGui.QTextCursor.Left,
n=(cursor_pos - end))
elif end > cursor_pos:
cursor.movePosition(QtGui.QTextCursor.Right,
n=(end - cursor_pos))
# This line actually applies the move to control's cursor
self._control.setTextCursor(cursor)
offset = end - start
# Move the local cursor object to the start of the match and
# complete.
cursor.movePosition(QtGui.QTextCursor.Left, n=offset)
self._complete_with_items(cursor, matches)
def _handle_execute_reply(self, msg):
"""Support prompt requests.
"""
msg_id = msg['parent_header'].get('msg_id')
info = self._request_info['execute'].get(msg_id)
if info and info.kind == 'prompt':
content = msg['content']
if content['status'] == 'aborted':
self._show_interpreter_prompt()
else:
number = content['execution_count'] + 1
self._show_interpreter_prompt(number)
self._request_info['execute'].pop(msg_id)
else:
super(JupyterWidget, self)._handle_execute_reply(msg)
def _handle_history_reply(self, msg):
""" Handle history tail replies, which are only supported
by Jupyter kernels.
"""
content = msg['content']
if 'history' not in content:
self.log.error("History request failed: %r"%content)
if content.get('status', '') == 'aborted' and \
not self._retrying_history_request:
# a *different* action caused this request to be aborted, so
# we should try again.
self.log.error("Retrying aborted history request")
# prevent multiple retries of aborted requests:
self._retrying_history_request = True
# wait out the kernel's queue flush, which is currently timed at 0.1s
time.sleep(0.25)
self.kernel_client.history(hist_access_type='tail',n=1000)
else:
self._retrying_history_request = False
return
# reset retry flag
self._retrying_history_request = False
history_items = content['history']
self.log.debug("Received history reply with %i entries", len(history_items))
items = []
last_cell = u""
for _, _, cell in history_items:
cell = cell.rstrip()
if cell != last_cell:
items.append(cell)
last_cell = cell
self._set_history(items)
def _insert_other_input(self, cursor, content):
"""Insert function for input from other frontends"""
cursor.beginEditBlock()
start = cursor.position()
n = content.get('execution_count', 0)
cursor.insertText('\n')
self._insert_html(cursor, self._make_in_prompt(n))
cursor.insertText(content['code'])
self._highlighter.rehighlightBlock(cursor.block())
cursor.endEditBlock()
def _handle_execute_input(self, msg):
"""Handle an execute_input message"""
self.log.debug("execute_input: %s", msg.get('content', ''))
if self.include_output(msg):
self._append_custom(self._insert_other_input, msg['content'], before_prompt=True)
def _handle_execute_result(self, msg):
"""Handle an execute_result message"""
if self.include_output(msg):
self.flush_clearoutput()
content = msg['content']
prompt_number = content.get('execution_count', 0)
data = content['data']
if 'text/plain' in data:
self._append_plain_text(self.output_sep, True)
self._append_html(self._make_out_prompt(prompt_number), True)
text = data['text/plain']
# If the repr is multiline, make sure we start on a new line,
# so that its lines are aligned.
if "\n" in text and not self.output_sep.endswith("\n"):
self._append_plain_text('\n', True)
self._append_plain_text(text + self.output_sep2, True)
def _handle_display_data(self, msg):
"""The base handler for the ``display_data`` message."""
# For now, we don't display data from other frontends, but we
# eventually will as this allows all frontends to monitor the display
# data. But we need to figure out how to handle this in the GUI.
if self.include_output(msg):
self.flush_clearoutput()
data = msg['content']['data']
metadata = msg['content']['metadata']
# In the regular JupyterWidget, we simply print the plain text
# representation.
if 'text/plain' in data:
text = data['text/plain']
self._append_plain_text(text, True)
# This newline seems to be needed for text and html output.
self._append_plain_text(u'\n', True)
def _handle_kernel_info_reply(self, rep):
"""Handle kernel info replies."""
content = rep['content']
self.kernel_banner = content.get('banner', '')
if self._starting:
# finish handling started channels
self._starting = False
super(JupyterWidget, self)._started_channels()
def _started_channels(self):
"""Make a history request"""
self._starting = True
self.kernel_client.kernel_info()
self.kernel_client.history(hist_access_type='tail', n=1000)
#---------------------------------------------------------------------------
# 'FrontendWidget' protected interface
#---------------------------------------------------------------------------
def _process_execute_error(self, msg):
"""Handle an execute_error message"""
content = msg['content']
traceback = '\n'.join(content['traceback']) + '\n'
if False:
# FIXME: For now, tracebacks come as plain text, so we can't use
# the html renderer yet. Once we refactor ultratb to produce
# properly styled tracebacks, this branch should be the default
traceback = traceback.replace(' ', ' ')
traceback = traceback.replace('\n', '<br/>')
ename = content['ename']
ename_styled = '<span class="error">%s</span>' % ename
traceback = traceback.replace(ename, ename_styled)
self._append_html(traceback)
else:
# This is the fallback for now, using plain text with ansi escapes
self._append_plain_text(traceback)
def _process_execute_payload(self, item):
""" Reimplemented to dispatch payloads to handler methods.
"""
handler = self._payload_handlers.get(item['source'])
if handler is None:
# We have no handler for this type of payload, simply ignore it
return False
else:
handler(item)
return True
def _show_interpreter_prompt(self, number=None):
""" Reimplemented for IPython-style prompts.
"""
# If a number was not specified, make a prompt number request.
if number is None:
msg_id = self.kernel_client.execute('', silent=True)
info = self._ExecutionRequest(msg_id, 'prompt')
self._request_info['execute'][msg_id] = info
return
# Show a new prompt and save information about it so that it can be
# updated later if the prompt number turns out to be wrong.
self._prompt_sep = self.input_sep
self._show_prompt(self._make_in_prompt(number), html=True)
block = self._control.document().lastBlock()
length = len(self._prompt)
self._previous_prompt_obj = self._PromptBlock(block, length, number)
# Update continuation prompt to reflect (possibly) new prompt length.
self._set_continuation_prompt(
self._make_continuation_prompt(self._prompt), html=True)
def _show_interpreter_prompt_for_reply(self, msg):
""" Reimplemented for IPython-style prompts.
"""
# Update the old prompt number if necessary.
content = msg['content']
# abort replies do not have any keys:
if content['status'] == 'aborted':
if self._previous_prompt_obj:
previous_prompt_number = self._previous_prompt_obj.number
else:
previous_prompt_number = 0
else:
previous_prompt_number = content['execution_count']
if self._previous_prompt_obj and \
self._previous_prompt_obj.number != previous_prompt_number:
block = self._previous_prompt_obj.block
# Make sure the prompt block has not been erased.
if block.isValid() and block.text():
# Remove the old prompt and insert a new prompt.
cursor = QtGui.QTextCursor(block)
cursor.movePosition(QtGui.QTextCursor.Right,
QtGui.QTextCursor.KeepAnchor,
self._previous_prompt_obj.length)
prompt = self._make_in_prompt(previous_prompt_number)
self._prompt = self._insert_html_fetching_plain_text(
cursor, prompt)
# When the HTML is inserted, Qt blows away the syntax
# highlighting for the line, so we need to rehighlight it.
self._highlighter.rehighlightBlock(cursor.block())
self._previous_prompt_obj = None
# Show a new prompt with the kernel's estimated prompt number.
self._show_interpreter_prompt(previous_prompt_number + 1)
#---------------------------------------------------------------------------
# 'JupyterWidget' interface
#---------------------------------------------------------------------------
def set_default_style(self, colors='lightbg'):
""" Sets the widget style to the class defaults.
Parameters
----------
colors : str, optional (default lightbg)
Whether to use the default light background or dark
background or B&W style.
"""
colors = colors.lower()
if colors=='lightbg':
self.style_sheet = styles.default_light_style_sheet
self.syntax_style = styles.default_light_syntax_style
elif colors=='linux':
self.style_sheet = styles.default_dark_style_sheet
self.syntax_style = styles.default_dark_syntax_style
elif colors=='nocolor':
self.style_sheet = styles.default_bw_style_sheet
self.syntax_style = styles.default_bw_syntax_style
else:
raise KeyError("No such color scheme: %s"%colors)
#---------------------------------------------------------------------------
# 'JupyterWidget' protected interface
#---------------------------------------------------------------------------
def _edit(self, filename, line=None):
""" Opens a Python script for editing.
Parameters
----------
filename : str
A path to a local system file.
line : int, optional
A line of interest in the file.
"""
if self.custom_edit:
self.custom_edit_requested.emit(filename, line)
elif not self.editor:
self._append_plain_text('No default editor available.\n'
'Specify a GUI text editor in the `JupyterWidget.editor` '
'configurable to enable the %edit magic')
else:
try:
filename = '"%s"' % filename
if line and self.editor_line:
command = self.editor_line.format(filename=filename,
line=line)
else:
try:
command = self.editor.format()
except KeyError:
command = self.editor.format(filename=filename)
else:
command += ' ' + filename
except KeyError:
self._append_plain_text('Invalid editor command.\n')
else:
try:
Popen(command, shell=True)
except OSError:
msg = 'Opening editor with command "%s" failed.\n'
self._append_plain_text(msg % command)
def _make_in_prompt(self, number):
""" Given a prompt number, returns an HTML In prompt.
"""
try:
body = self.in_prompt % number
except TypeError:
# allow in_prompt to leave out number, e.g. '>>> '
from xml.sax.saxutils import escape
body = escape(self.in_prompt)
return '<span class="in-prompt">%s</span>' % body
def _make_continuation_prompt(self, prompt):
""" Given a plain text version of an In prompt, returns an HTML
continuation prompt.
"""
end_chars = '...: '
space_count = len(prompt.lstrip('\n')) - len(end_chars)
body = ' ' * space_count + end_chars
return '<span class="in-prompt">%s</span>' % body
def _make_out_prompt(self, number):
""" Given a prompt number, returns an HTML Out prompt.
"""
try:
body = self.out_prompt % number
except TypeError:
# allow out_prompt to leave out number, e.g. '<<< '
from xml.sax.saxutils import escape
body = escape(self.out_prompt)
return '<span class="out-prompt">%s</span>' % body
#------ Payload handlers --------------------------------------------------
# Payload handlers with a generic interface: each takes the opaque payload
# dict, unpacks it and calls the underlying functions with the necessary
# arguments.
def _handle_payload_edit(self, item):
self._edit(item['filename'], item['line_number'])
def _handle_payload_exit(self, item):
self._keep_kernel_on_exit = item['keepkernel']
self.exit_requested.emit(self)
def _handle_payload_next_input(self, item):
self.input_buffer = item['text']
def _handle_payload_page(self, item):
# Since the plain text widget supports only a very small subset of HTML
# and we have no control over the HTML source, we only page HTML
# payloads in the rich text widget.
data = item['data']
if 'text/html' in data and self.kind == 'rich':
self._page(data['text/html'], html=True)
else:
self._page(data['text/plain'], html=False)
#------ Trait change handlers --------------------------------------------
def _style_sheet_changed(self):
""" Set the style sheets of the underlying widgets.
"""
self.setStyleSheet(self.style_sheet)
if self._control is not None:
self._control.document().setDefaultStyleSheet(self.style_sheet)
bg_color = self._control.palette().window().color()
self._ansi_processor.set_background_color(bg_color)
if self._page_control is not None:
self._page_control.document().setDefaultStyleSheet(self.style_sheet)
def _syntax_style_changed(self):
""" Set the style for the syntax highlighter.
"""
if self._highlighter is None:
# ignore premature calls
return
if self.syntax_style:
self._highlighter.set_style(self.syntax_style)
else:
self._highlighter.set_style_sheet(self.style_sheet)
#------ Trait default initializers -----------------------------------------
def _banner_default(self):
return "Jupyter QtConsole {version}\n".format(version=__version__)
# clobber IPythonWidget above:
class IPythonWidget(JupyterWidget):
"""Deprecated class. Use JupyterWidget"""
def __init__(self, *a, **kw):
warn("IPythonWidget is deprecated, use JupyterWidget")
super(IPythonWidget, self).__init__(*a, **kw)
| [
"[email protected]"
] | |
09c5679119c5cc73c586ae0c22dbdc3ea5c11ac5 | 5f490d281ea385a692e7090a28867c47a82ccf61 | /models/base_model.py | f417254e0661a01749b032984b5da28bd79330df | [] | no_license | agoe/FlaskLogicBankSFRS_Demo | 7db0a93255ec8c4961ddcd32f030a7e369f0a8c1 | 62857caee4ba22a870f22b5f00da6f8ce1b742bd | refs/heads/master | 2023-02-12T04:56:00.325377 | 2021-01-06T15:46:37 | 2021-01-06T15:46:37 | 327,120,220 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | import safrs
from safrs import SAFRSBase
db = safrs.DB
SAFRSBase.db_commit = False
class BaseModel(SAFRSBase):
def __new__(cls, *args, **kwargs):
return object.__new__(cls)
#__abstract__ = True
# Enables us to handle db session ourselves
# Override SAFRS __str__ with custom repr
'''
def __str__(self):
return repr(self)
def __repr__(self):
return "<{}: id={}{}>".format(
self.__class__.__name__,
self.id,
f" name={self.name}" if hasattr(self, "name") else "",
)
''' | [
"[email protected]"
] | |
2e879594bf48e697d44aa763f51722ea69384e48 | 2e4673cea11375e9e772abd71f71e6a8d9ec69eb | /decompress.py | 770a99aea56f81b2a0d848d06d98222e436578f0 | [] | no_license | lygztq/RLE-compressor | e8ef4ef6dd7a71fd20cf0c35351f8711accedc48 | fa23aef05331dad7204f19c33450134fe20e5cbe | refs/heads/master | 2021-08-24T13:43:25.186957 | 2017-11-21T08:35:03 | 2017-11-21T08:35:03 | 111,517,524 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,006 | py | #coding=utf-8
from tools import *
import cv2
import numpy as np
import argparse
def main():
compressed = ''
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("--compressed",help="The path of compressed file")
arg_parser.add_argument("--window",help="size of compress window")
#arg_parser.add_argument("--output_path",help="path of output compressed file")
args = arg_parser.parse_args()
if args.compressed:
compressed = args.compressed
else:
print "no compressed file input"
if args.window:
window_size = args.window
else:
window_size = 50
RLE_decompressor = binary_RLE.Binary_RLE_Compressor()
LZ77_decompressor = LZ77.LZ77(window_size)
LZ77_unpack = LZ77_decompressor.decompress(fromFile=True,input_file_path=compressed)
RLE_unpack = RLE_decompressor.decompression(LZ77_unpack)
cv2.imshow('result', RLE_unpack)
cv2.waitKey(0)
save_img_path = compressed[:compressed.find('.')] + '.png'
cv2.imwrite(save_img_path, RLE_unpack)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
b51668fb9bbb19d721415138985f985fa747b4e1 | 48db7db6e065af9bfdd9226176a1147dd39b8deb | /dataportalapi.py | 117219b7e92f791e9f7e36477238329fe2dc7c49 | [] | no_license | Najah-lshanableh/data-portal-treemap | 90131f3d8ca19b916fdb1e7a8729eba2869824ad | 35b8bee2eb5384d980724a78d5e97a6c8c771b6e | refs/heads/master | 2020-12-25T22:29:40.693147 | 2013-06-24T19:10:38 | 2013-06-24T19:10:38 | 38,136,477 | 0 | 1 | null | 2015-06-26T22:12:16 | 2015-06-26T22:12:16 | null | UTF-8 | Python | false | false | 2,209 | py | #Connects to the Socrata search API and loads data describing the tabular datasets in the catalog for use by D3 tree map
#Use: python dataportalapi.py > portaldata.json
import requests, json, math, re
def check_categories(d,category):
for i in range(len(d)):
if d[i]['name'] == category: return i
return -1
#found a simpler url construct that works, keeping this here for now
def build_url(category,name,vid):
if category != "None":
category = re.sub('[^0-9a-zA-Z-\s]+', '', category)
category = category.replace(" ","-")
else:
category = "dataset"
name = re.sub('[^0-9a-zA-Z-\s]+', '', name)
name = name.replace(" ","-")
url = sURL + "/" + category + "/" + name + "/" + vid
return url
#This is the url of your Socrata domain
sURL = 'https://data.sfgov.org'
out = []
page = 1
records = 0
total = 2
rwithdata = 0
while records < total:
payload = {'limit' : 100, 'page' : page, 'limitTo' : 'TABLES'}
r = requests.get(sURL + '/api/search/views.json', params=payload)
responses = r.json()
total = responses['count']
for response in responses['results']:
view = response['view']
records += 1
if len(view['columns']) != 0:
rwithdata += 1
name = view['name']
vid = view['id']
views = view['viewCount']
size = view['columns'][0]['cachedContents']['non_null']
if size == 0:
size = 2 #probably should just skip these altogether, for now making them a tiny dataset so LOG(0) doesn't occur
logsize = math.log(size)
if 'category' in view:
category = view['category']
else:
category = "None"
if 'tags' in view:
for tag in view['tags']:
#tags aren't used in the json file yet, these could probably be used to do alternate visualizations or in a companion list, this is just a placeholder for now
foo = tag
index = check_categories(out,category)
url = sURL + '/d/' + vid
if index == -1:
out.append({"name": category, "children": [ {"name": name, "value": size, "url": url, "log": logsize } ] })
else:
out[index]["children"].append({"name": name, "value": size, "url": url, "log": logsize })
page += 1
final = {"name" :" San Francisco Data Portal", "count" : rwithdata, "children" : out}
print json.dumps(final) | [
"[email protected]"
] | |
27caf49568beef7598fd633ed60baf0c983961c5 | 8ed3b7d6f2afcba0162481235c67a51c6a0b0d83 | /apps/stocks/models.py | aec1f7e449a3c1a602dd7b0a1041ae6249895ac6 | [] | no_license | jeszytanada/management_inventory_pjt | 200c12d06f25e19a78c926e81d6ae05fcaa874d3 | c2bf33cf5ea1e3194a4c5705c69296204a11ebd2 | refs/heads/master | 2021-01-12T16:37:23.976797 | 2016-10-26T05:40:03 | 2016-10-26T05:40:03 | 71,419,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,911 | py | from django.db import models
from django.utils import timezone
from products.models import Product
class StocksAdjustment(models.Model):
BOOL_CHOICES = ((True, 'adding'), (False, 'removing'))
SA = 'Stock Adjustment'
PO = 'Purchase Order'
SO = 'Sales Order'
NO = 'None'
REFERENCE_CHOICES = (
(SA, 'Stock Adjustment'),
(PO, 'Purchase Order'),
(SO, 'Sales Order'),
(NO, 'None'),
)
reference_type = models.CharField(max_length=7, choices=REFERENCE_CHOICES, default=NO)
reference = models.IntegerField(default=0, null=True)
reason = models.TextField(blank=True, null=True)
type = models.BooleanField(choices=BOOL_CHOICES, default=True)
remarks = models.TextField(blank=True, null=True)
date_created = models.DateTimeField(default=timezone.now)
date_updated = models.DateTimeField(auto_now=True)
modified_by = models.ForeignKey('auth.User', null=True)
def __unicode__(self):
return self.reference_type
def __str__(self):
return self.reference_type
def get_fields(self):
return [(field.name, field.value_to_string(self)) for field in StocksAdjustment._meta.fields]
class StocksAdjustmentItems(models.Model):
stock_adjustment = models.ForeignKey(StocksAdjustment, related_name='stock_adjustments', on_delete=models.SET_NULL, null=True)
product = models.ForeignKey(Product, related_name='stock_products', on_delete=models.SET_NULL, null=True)
quantity = models.IntegerField(default=0, null=False)
remarks = models.TextField(blank=True, null=True)
date_created = models.DateTimeField(default=timezone.now)
def __unicode__(self):
return self.stock_adjustment
def __str__(self):
return self.stock_adjustment
def get_fields(self):
return [(field.name, field.value_to_string(self)) for field in StocksAdjustmentItems._meta.fields] | [
"[email protected]"
] | |
72ff753a9ba4196f39464a93290728c75816d6aa | 5623771414b26c021be54facaaaefbd9314b389d | /week7/DS/DP/Min_sum_path.py | ae37aa64e4f0f1e20de2069fd94641db8a4796da | [] | no_license | saxenasamarth/BootCamp_PythonLearning | 36b705b83c7f0e297931bb8d75cb541088690248 | d5b8fe2d6fcfe54c5a7393f218414b1122f3e49e | refs/heads/master | 2023-04-17T15:29:05.402863 | 2019-08-29T08:46:34 | 2019-08-29T08:46:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | # Given a m x n grid filled with non-negative numbers, find a path from top left to bottom right which minimizes the sum of all numbers along its path.
def find_min_sum_path(matrix):
out = [[0 for i in range(len(matrix[0]))] for j in range(len(matrix))]
out[0][0] = matrix[0][0]
for i in range(1, len(matrix)):
out[i][0] = out[i-1][0]+matrix[i][0]
for i in range(1, len(matrix[0])):
out[0][i] = out[0][i-1]+matrix[0][i]
for i in range(1, len(matrix)):
for j in range(1, len(matrix[0])):
out[i][j] = matrix[i][j] + min(out[i-1][j], out[i][j-1])
return out[-1][-1]
matrix = [[1,3,1],[1,5,1],[4,2,1]]
print(find_min_sum_path(matrix)) | [
"[email protected]"
] | |
95612c8e2207355469ab70ff6f985fb9fef74ba0 | d6ca0b326f1bd0ce381c6db611f6331096bf4187 | /pypet/tests/_atworema.py | 5862c910f1aa08c6ff96162a56510430111ec8f6 | [
"BSD-3-Clause"
] | permissive | SmokinCaterpillar/pypet | aa35355d70e8f44be015313494376d993f645d80 | 3d454ac65f89e7833baaf89510f73c546e90d8f6 | refs/heads/develop | 2023-08-08T16:01:54.087819 | 2023-02-14T14:59:32 | 2023-02-14T14:59:32 | 12,901,526 | 89 | 22 | BSD-3-Clause | 2023-07-24T00:46:12 | 2013-09-17T17:06:00 | Python | UTF-8 | Python | false | false | 352 | py | __author__ = 'Robert Meyer'
from pypet.tests.testutils.ioutils import run_suite, discover_tests, TEST_IMPORT_ERROR
if __name__ == '__main__':
suite = discover_tests(predicate= lambda class_name, test_name, tags:
class_name != TEST_IMPORT_ERROR)
run_suite(remove=False, folder=None, suite=suite) | [
"[email protected]"
] | |
001fb2cc5db5996dfd5928fc977b7574860dffc1 | b349869bcd55098ee58bfe9f0bb2eb66e6f4dc71 | /MyLearning/ChatBot/google_docs_api.py | cbd4113695e5a9cfe384ed5c96182acf33f4e8e8 | [] | no_license | niteshsrivastava21/MyLearning | ec5206552c461f69d95750f2c1849ceeb56ff896 | 4d0561f360b0dd1b38554c6924cc9eaa9e1167b6 | refs/heads/master | 2020-06-21T10:58:15.590671 | 2020-04-23T12:16:17 | 2020-04-23T12:16:17 | 197,429,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,188 | py | from __future__ import print_function
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import gspread
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
# https://docs.google.com/spreadsheets/d/e/2PACX-1vSc_2y5N0I67wDU38DjDh35IZSIS30rQf7_NYZhtYYGU1jJYT6_kDx4YpF-qw0LSlGsBYP8pqM_a1Pd/pubhtml#
# The ID and range of a sample spreadsheet.
SAMPLE_SPREADSHEET_ID = '1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms'
SAMPLE_RANGE_NAME = 'Class Data!A2:E'
def main():
"""Shows basic usage of the Sheets API.
Prints values from a sample spreadsheet.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,
range=SAMPLE_RANGE_NAME).execute()
values = result.get('values', [])
if not values:
print('No data found.')
else:
print('Name, Major:')
for row in values:
# Print columns A and E, which correspond to indices 0 and 4.
print('%s, %s' % (row[0], row[4]))
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
66998385cfd63d8da3209d0c2b35fbf502de5205 | 309b4412972a36ec3a36ee3489f5ecca66894c5e | /quad96/training/actor_critic.py | bb798d5aa0320b73432876907158c04f4f664e60 | [
"BSD-3-Clause"
] | permissive | Genix-Gu/quad96 | 84c11b70c2dd43145c849bafa0bcd661243c3869 | f30929342da4c44b1c05ce03a5b484be2eac7925 | refs/heads/main | 2023-02-02T21:36:28.182229 | 2020-12-23T13:05:25 | 2020-12-23T13:05:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,747 | py | import numpy as np
import tensorflow as tf
from keras import backend as K
from keras import layers, models, optimizers, regularizers
__author__ = "Ussama Zahid"
__email__ = "[email protected]"
# this file contains the model definations i.e. Actor and Critic and the special actor loss function
# which use the feedback from the position controller
neurons = 128
class Actor:
"""Actor (Policy) Model."""
def __init__(self, args, state_size, action_size, name):
"""Initialize parameters and build model."""
self.args = args
self.state_size = state_size
self.action_size = action_size
self.name = name
self.lr_actor = 1e-3
self.loss_mse_scale = 0.5
self.build_model()
def build_model(self):
"""Build an actor (policy) network that maps states to actions."""
# special loss for quad
def quadloss(action_grad, actions):
action_gradients, outputs = action_grad[:,:self.action_size], action_grad[:,self.action_size:]
loss1 = K.mean(-action_gradients * actions)
loss2 = K.mean(K.square(outputs - actions))
loss = (1-self.loss_mse_scale)*loss1 + (self.loss_mse_scale)*loss2
return loss
# for gym environments
def gymloss(action_grad, actions):
action_gradients, outputs = action_grad[:,:self.action_size], action_grad[:,self.action_size:]
loss = K.mean(-action_gradients * actions)
return loss
last_activation = None if self.args.export else 'tanh'
states = layers.Input(shape=(1,1, self.state_size,), name='input_data_actor_{}'.format(self.name))
net = layers.Conv2D(neurons, 1)(states)
net = layers.Activation(tf.nn.relu6)(net)
net = layers.Flatten()(net)
net = layers.Dense(neurons)(net)
net = layers.Activation(tf.nn.relu6)(net)
net = layers.Dense(self.action_size)(net)
outputs = layers.Activation(last_activation, name='output_logits_actor_{}'.format(self.name))(net)
self.model = models.Model(inputs=[states], outputs=outputs)
self.model.compile(optimizer=optimizers.Adam(lr=self.lr_actor), loss=quadloss)
class Critic:
"""Critic (Value) Model."""
def __init__(self, state_size, action_size):
"""Initialize parameters and build model."""
self.lr_critic = 0.001
self.state_size = state_size
self.action_size = action_size
self.build_model()
def build_model(self):
# Define input layers
states = layers.Input(shape=(1,1, self.state_size,), name='states')
actions = layers.Input(shape=(self.action_size,), name='actions')
# states
net_states = layers.Flatten()(states)
net = layers.Concatenate()([net_states, actions])
net = layers.Dense(neurons)(net)
net = layers.Activation('relu')(net)
net = layers.Dense(neurons)(net)
net = layers.Activation('relu')(net)
# Add final output layer to produce action values (Q values)
Q_values = layers.Dense(units=1, name='q_values')(net)
# Create Keras model
self.model = models.Model(inputs=[states, actions], outputs=Q_values)
# self.optimizer = optimizers.Adam(self.lr_critic)
self.model.compile(optimizer=optimizers.Adam(self.lr_critic), loss='mse')
action_gradients = K.gradients(Q_values, actions)
# Define an additional function to fetch action gradients (to be used by actor model)
self.get_action_gradients = K.function(
inputs=[*self.model.input, K.learning_phase()],
outputs=action_gradients)
| [
"[email protected]"
] | |
d7cab272034def647cc8d74d213a5bd61f80a1cd | 3f5a1ef51620fd8c35ef38064ca5aa00776ab6f4 | /full_speed_educative/dictionary/defination.py | e920f8dbd52c14cb5029ac0ed167f195ae926aff | [] | no_license | poojagmahajan/python_exercises | 1b290a5c0689f703538caf89bca5bc6c1fdb392a | 65539cf31c5b2ad5768d652ed5fe95054ce5f63f | refs/heads/master | 2022-11-12T03:52:13.533781 | 2020-07-04T20:50:29 | 2020-07-04T20:54:46 | 263,151,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,814 | py |
### Dictionaries are data structures that index values by a given key (key-value pairs).
ages = {
"purvansh" : 3,
"Pooja" : 28,
"Gaurav" : 30,
"swarit" : 1 ,
"meet" : 10,
"ishan" : 6
}
print("print age of any one -")
print (ages ["purvansh"])
print(ages["Gaurav"])
print("\n print ages of all -")
for name,age in ages.items() :
print(name,age)
address = {} #Call dictionary with no parameters using the empty {}
pincode = dict() #Call dictionary with no parameters using the dict keyword
address["pooja"] = "pune" ## fill to empty dictionary
address["purvansh"] = "chinchwad"
for name,address in address.items() :
print("\n", name,address)
d = { ## dictionary keys can be immutable object and don’t necessarily need to be strings
0: [0, 0, 0],
1: [1, 1, 1],
2: [2, 2, 2],
}
print ( d[2] )
##You can create an ordered dictionary which preserves the order in which the keys are inserted.
# This is done by importing the OrderedDictionary from the collections library, and call the OrderedDictionary() built-in method.
from collections import OrderedDict
ages = OrderedDict()
ages["ram"] = 20
ages["sham"] = 40
for key, value in ages.items():
print(key, value)
#####Loop to Get All Keys
for key in ages: #for name in ages
print(key)
####or
print(ages.keys())
##### Loop to Get All Values
for age in ages : # for value in ages :
print(ages[age])
#### or
print (ages.values())
######################################
Dict1 = {
"FruitName": "Mango",
"season": "Spring",
}
Dict1.pop("season") ## pop delete value
print(Dict1.values())
print (Dict1) ## print whole dictionary
print (Dict1.values())
print(Dict1.keys())
Dict1.clear() # delete Dict
print(Dict1) # will print empty paranthesis {}
| [
"[email protected]"
] | |
caa0850988b9faedb6726b682fb5a8154116b383 | ddd4edc45481e6a7c7141b93e47b974634506d2d | /tradgram/relations/admin.py | 4dbfea14ffdda1239afd8dbc98f9c3eba2c6aaf4 | [
"MIT"
] | permissive | didils/tradgram | 407de9d05d01bc840c5c165155d370f092d82f0d | 4868ca082ab78a1b5b96f25ee9f958567bd1bb1e | refs/heads/master | 2021-11-19T02:47:02.224088 | 2019-04-05T08:19:14 | 2019-04-05T08:19:14 | 148,162,588 | 0 | 0 | MIT | 2021-09-08T00:57:43 | 2018-09-10T13:49:57 | Python | UTF-8 | Python | false | false | 439 | py | from django.contrib import admin
from . import models
# Register your models here.
@admin.register(models.Relation)
class RelationAdmin(admin.ModelAdmin):
search_fields =(
'product1',
'product2',
)
list_filter = (
'product1',
'product2',
'count',
'created_at'
)
list_display = (
'product1',
'product2',
'count',
'created_at'
) | [
"[email protected]"
] | |
cbf46f36fb4141439838b27db0ec705a606c5468 | e905814918e46b2132d1e0b98c11fb53299b2138 | /gather_seq_comps.py | b179b6e0ce64eb034fb84c50e3e1a584a11e3e80 | [] | no_license | HanLabUNLV/HIDL | 0c013a1872be1e3a13960ce0bc2c4fbdf3f0baf9 | a27e01972b734dd1ae4d8c8a6054827139281050 | refs/heads/master | 2021-06-18T02:59:49.330038 | 2021-03-31T23:45:02 | 2021-03-31T23:45:02 | 190,291,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | from os import listdir as ls
def readFasta(fn):
fasta = {}
for line in fn:
if line[0]=='>':
fasta[line[1:-1]] = ''
key = line[1:-1]
else:
fasta[key] += line.strip()
return fasta
aas = ['A','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','V','W','Y']
linker_comp = {'total':0}
domain_comp = {'total':0}
for x in aas:
linker_comp[x] = 0
domain_comp[x] = 0
for fn in ls('LinkerFastasNewest'):
if 'fasta.fas' in fn:
fasta = readFasta(open('LinkerFastasNewest/'+fn,'r'))
for taxa in fasta:
for aa in aas:
linker_comp[aa] += fasta[taxa].count(aa)
linker_comp['total'] += fasta[taxa].count(aa)
for fn in ls('DomainFastasNewest'):
if 'fasta.fas' in fn:
fasta = readFasta(open('DomainFastasNewest/'+fn,'r'))
for taxa in fasta:
for aa in aas:
domain_comp[aa] += fasta[taxa].count(aa)
domain_comp['total'] += fasta[taxa].count(aa)
print('\t'.join(['AA','Linker','Domain']))
for aa in aas:
print('\t'.join([aa,str(float(linker_comp[aa])/linker_comp['total']), str(float(domain_comp[aa])/domain_comp['total'])]))
| [
"[email protected]"
] | |
806e3cd0e19b3608d616b002a8bb2b876ca9e51a | d564c1dcde3a139960e441a732f308dee7bac268 | /code/run5All_Content_PlainUD.py | 517e77577c22a0ae492044444a377776233b03a6 | [] | no_license | m-hahn/left-right-asymmetries | 9b5142dcf822194068feea2ccc0e8cc3b0573bbe | 45e5b40a145e2a9d51c12617dc76be5a49ddf43e | refs/heads/master | 2020-04-26T11:47:57.781431 | 2019-03-22T01:00:48 | 2019-03-22T01:00:48 | 173,528,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | from ud_languages import languages
import subprocess
languages = sorted(languages, reverse=True)
for language in languages:
for model in ["REAL_REAL", "REVERSE"]: #, "GROUND"] + (["RANDOM_BY_TYPE"] * 5):
command = ["./python27", "testLeftRightEntUniHDCond3FilterMIWord5_Content_PlainUD_Bugfix.py", language, model]
subprocess.call(command)
| [
"[email protected]"
] | |
bd0b390b25581621d8345e96bce69c41a2766451 | 8d4472817c83e2279794ed7a6856276e298a0d12 | /karmin/users/apps.py | 7a604544ca3274f9a47d58bb17ad671998520141 | [] | no_license | smirnoffs/karmin | cf2fa257e7a2f09b2b675d30347e557ae87445c8 | 4c1744a38468189f383ac26d9da870f8318a2789 | refs/heads/master | 2021-01-16T00:28:46.414881 | 2017-08-11T20:27:17 | 2017-08-11T20:27:17 | 99,963,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'karmin.users'
verbose_name = "Users"
def ready(self):
"""Override this to put in:
Users system checks
Users signal registration
"""
pass
| [
"[email protected]"
] | |
c692cadb2a52ac7d2cf357c396e4317491d8d0fd | c356b0f0d380b3dee5bf38334e6f162875ea23d9 | /05_scrapy/facebook/facebook/items.py | d9ec76da64665a791194c0c6bc4336f5be1baf87 | [
"MIT"
] | permissive | AlexaVane/Velasco-Yepez-Andres-David-Python | d29bd7f22a1dc69e3e106fd75811aecd129fe23a | 0c017d6e5f169f31207ddec5ceffc8dd82d327eb | refs/heads/master | 2020-08-23T15:16:54.268707 | 2019-02-09T08:52:23 | 2019-02-09T08:52:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ComentarioItem(scrapy.Item):
username = scrapy.Field()
content = scrapy.Field()
class FacebookItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| [
"[email protected]"
] | |
a7aebd8b565455d1449dd73646c2fa0fca0ce815 | c5463bb1f8e883498c627a0212a937818375110c | /volttron/bootstrap.py | 476a977d8aa53d4ec56e2dae8c822d507776cbdc | [
"BSD-2-Clause-Views"
] | permissive | cyrus19901/VOLTTRON-azure | 170a63eb04a40abb6658d22f4eed1e855986d393 | 9422d9dff9deb845d2f0b893199d84aa9dfa05b5 | refs/heads/master | 2021-01-16T00:03:33.069676 | 2017-08-10T19:00:17 | 2017-08-10T19:00:17 | 99,954,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,305 | py | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2015, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
#}}}
'''bootstrap - Prepare a VOLTTRON virtual environment.
Bootstrapping is broken into two stages. The first stage should only be
invoked once per virtual environment. It downloads virtualenv and
creates a virtual Python environment in the virtual environment
directory (defaults to a subdirectory named env in the same directory as
this script). It then executes stage two using the newly installed
virtual environment. Stage two uses the new virtual Python environment
to install VOLTTRON and its dependencies.
If a new dependency is added, this script may be run again using the
Python executable in the virtual environment to re-run stage two:
env/bin/python bootstrap.py
To speed up bootstrapping in a test environment, use the --wheel
feature, which might look something like this:
$ export PIP_WHEEL_DIR=/path/to/cache/wheelhouse
$ export PIP_FIND_LINKS=file://$PIP_WHEEL_DIR
$ mkdir -p $PIP_WHEEL_DIR
$ python2.7 bootstrap.py -o
$ env/bin/python bootstrap.py --wheel
$ env/bin/python bootstrap.py
Instead of setting the environment variables, a pip configuration file
may be used. Look here for more information on configuring pip:
https://pip.pypa.io/en/latest/user_guide.html#configuration
'''
from __future__ import print_function
import argparse
import errno
import logging
import os
import subprocess
import sys
import json
_log = logging.getLogger(__name__)
_WINDOWS = sys.platform.startswith('win')
def shescape(args):
'''Return a sh shell escaped string composed of args.'''
return ' '.join('{1}{0}{1}'.format(arg.replace('"', '\\"'),
'"' if ' ' in arg else '') for arg in args)
def bootstrap(dest, prompt='(volttron)', version=None, verbose=None):
'''Download latest virtualenv and create a virtual environment.
The virtual environment will be created in the given directory. The
shell prompt in the virtual environment can be overridden by setting
prompt and a specific version of virtualenv can be used by passing
the version string into version.
'''
# Imports used only for bootstrapping the environment
import contextlib
import re
import shutil
import tarfile
import tempfile
import urllib2
class EnvBuilder(object):
'''Virtual environment builder.
The resulting python executable will be set in the env_exe
attribute.
'''
__slots__ = ['version', 'prompt', 'env_exe']
def __init__(self, version=None, prompt=None):
'''Allow overriding version and prompt.'''
self.version = version
self.prompt = prompt
self.env_exe = None
def _fetch(self, url):
'''Open url and return the response object (or bail).'''
_log.debug('Fetching %s', url)
response = urllib2.urlopen(url)
if response.getcode() != 200:
_log.error('Server response is %s %s',
response.code, response.msg)
_log.fatal('Download failed!')
sys.exit(1)
return response
def get_version(self):
'''Return the latest version from virtualenv DOAP record.'''
_log.info('Downloading virtualenv DOAP record')
doap_url = ('https://pypi.python.org/pypi'
'?:action=doap&name=virtualenv')
with contextlib.closing(self._fetch(doap_url)) as response:
doap_xml = response.read()
self.version = re.search(
r'<revision>([^<]*)</revision>', doap_xml).group(1)
return self.version
def download(self, directory):
'''Download the virtualenv tarball into directory.'''
if self.version is None:
self.get_version()
url = ('https://pypi.python.org/packages/source/v/virtualenv/'
'virtualenv-{}.tar.gz'.format(self.version))
_log.info('Downloading virtualenv %s', self.version)
tarball = os.path.join(directory, 'virtualenv.tar.gz')
with contextlib.closing(self._fetch(url)) as response:
with open(tarball, 'wb') as file:
shutil.copyfileobj(response, file)
with contextlib.closing(tarfile.open(tarball, 'r|gz')) as archive:
archive.extractall(directory)
def create(self, directory, verbose=None):
'''Create a virtual environment in directory.'''
tmpdir = tempfile.mkdtemp()
try:
self.download(tmpdir)
args = [sys.executable]
args.append(os.path.join(tmpdir, 'virtualenv-{}'.format(
self.version), 'virtualenv.py'))
if verbose is not None:
args.append('--verbose' if verbose else '--quiet')
if self.prompt:
args.extend(['--prompt', prompt])
args.append(directory)
_log.debug('+ %s', shescape(args))
subprocess.check_call(args)
if _WINDOWS:
self.env_exe = os.path.join(
directory, 'Scripts', 'python.exe')
else:
self.env_exe = os.path.join(directory, 'bin', 'python')
assert(os.path.exists(self.env_exe))
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
_log.info('Creating virtual Python environment')
builder = EnvBuilder(prompt=prompt, version=version)
builder.create(dest, verbose)
return builder.env_exe
def pip(operation, args, verbose=None, upgrade=False, offline=False):
'''Call pip in the virtual environment to perform operation.'''
cmd = ['pip', operation]
if verbose is not None:
cmd.append('--verbose' if verbose else '--quiet')
if upgrade and operation == 'install':
cmd.append('--upgrade')
if offline:
cmd.extend(['--retries', '0', '--timeout', '1'])
cmd.extend(args)
_log.info('+ %s', shescape(cmd))
cmd[:0] = [sys.executable, '-m']
subprocess.check_call(cmd)
def update(operation, verbose=None, upgrade=False, offline=False):
'''Install dependencies in setup.py and requirements.txt.'''
from setup import (option_requirements, local_requirements,
optional_requirements)
assert operation in ['install', 'wheel']
wheeling = operation == 'wheel'
path = os.path.dirname(__file__) or '.'
_log.info('%sing required packages', 'Build' if wheeling else 'Install')
if wheeling:
try:
import wheel
except ImportError:
pip('install', ['wheel'], verbose, offline=offline)
# Build option_requirements separately to pass install options
build_option = '--build-option' if wheeling else '--install-option'
for requirement, options in option_requirements:
args = []
for opt in options:
args.extend([build_option, opt])
args.extend(['--no-deps', requirement])
pip(operation, args, verbose, upgrade, offline)
# Build the optional requirements that the user specified via the command
# line.
for requirement in optional_requirements:
pip('install', [requirement], verbose, upgrade, offline)
# Install local packages and remaining dependencies
args = []
for _, location in local_requirements:
args.extend(['--editable', os.path.join(path, location)])
args.extend(['--editable', path])
requirements_txt = os.path.join(path, 'requirements.txt')
if os.path.exists(requirements_txt):
args.extend(['--requirement', requirements_txt])
pip(operation, args, verbose, upgrade, offline)
def main(argv=sys.argv):
'''Script entry point.'''
# Refuse to run as root
if not getattr(os, 'getuid', lambda: -1)():
sys.stderr.write('%s: error: refusing to run as root to prevent '
'potential damage.\n' % os.path.basename(argv[0]))
sys.exit(77)
# Unfortunately, many dependencies are not yet available in Python3.
if sys.version_info[:2] != (2, 7):
sys.stderr.write('error: Python 2.7 is required\n')
sys.exit(1)
# Build the parser
python = os.path.join('$VIRTUAL_ENV',
'Scripts' if _WINDOWS else 'bin', 'python')
if _WINDOWS:
python += '.exe'
parser = argparse.ArgumentParser(
description='Bootstrap and update a virtual Python environment '
'for VOLTTRON development.',
usage='\n bootstrap: python2.7 %(prog)s [options]'
'\n update: {} %(prog)s [options]'.format(python),
prog=os.path.basename(argv[0]),
epilog='''
The first invocation of this script, which should be made
using the system Python, will create a virtual Python
environment in the 'env' subdirectory in the same directory as
this script or in the directory given by the --envdir option.
Subsequent invocations of this script should use the Python
executable installed in the virtual environment.'''
)
verbose = parser.add_mutually_exclusive_group()
verbose.add_argument(
'-q', '--quiet', dest='verbose', action='store_const', const=False,
help='produce less output')
verbose.add_argument(
'-v', '--verbose', action='store_const', const=True,
help='produce extra output')
bs = parser.add_argument_group('bootstrap options')
bs.add_argument(
'--envdir', default=None, metavar='VIRTUAL_ENV',
help='alternate location for virtual environment')
bs.add_argument(
'--force', action='store_true', default=False,
help='force installing in non-empty directory')
bs.add_argument(
'-o', '--only-virtenv', action='store_true', default=False,
help='create virtual environment and exit (skip install)')
bs.add_argument(
'--prompt', default='(volttron)', help='provide alternate prompt '
'in activated environment (default: %(default)s)')
bs.add_argument('--force-version', help=argparse.SUPPRESS)
# allows us to look and see if any of the dynamic optional arguments
# are on the command line. We check this during the processing of the args
# variable at the end of the block. If the option is set then it needs
# to be passed on.
optional_args = []
if os.path.exists('optional_requirements.json'):
po = parser.add_argument_group('Extra packaging options')
with open('optional_requirements.json', 'r') as optional_arguments:
data = json.load(optional_arguments)
for arg, vals in data.items():
optional_args.append(arg)
if 'help' in vals.keys():
po.add_argument(arg, action='store_true', default=False,
help=vals['help'])
else:
po.add_argument(arg, action='store_true', default=False)
# Update options
up = parser.add_argument_group('update options')
up.add_argument(
'--offline', action='store_true', default=False,
help='install from cache without downloading')
ex = up.add_mutually_exclusive_group()
ex.add_argument(
'-u', '--upgrade', action='store_true', default=False,
help='upgrade installed packages')
ex.add_argument(
'-w', '--wheel', action='store_const', const='wheel', dest='operation',
help='build wheels in the pip wheelhouse')
path = os.path.dirname(__file__) or os.getcwd()
parser.set_defaults(envdir=os.path.join(path, 'env'), operation='install')
options = parser.parse_args(argv[1:])
# Route errors to stderr, info and debug to stdout
error_handler = logging.StreamHandler(sys.stderr)
error_handler.setLevel(logging.WARNING)
error_handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
info_handler = logging.StreamHandler(sys.stdout)
info_handler.setLevel(logging.DEBUG)
info_handler.setFormatter(logging.Formatter('%(message)s'))
root = logging.getLogger()
root.setLevel(logging.DEBUG if options.verbose else logging.INFO)
root.addHandler(error_handler)
root.addHandler(info_handler)
# Main script logic to perform bootstrapping or updating
if hasattr(sys, 'real_prefix'):
# The script was called from a virtual environment Python, so update
update(options.operation, options.verbose,
options.upgrade, options.offline)
else:
# The script was called from the system Python, so bootstrap
try:
# Refuse to create environment in existing, non-empty
# directory without the --force flag.
if os.listdir(options.envdir):
if not options.force:
parser.print_usage(sys.stderr)
print('{}: error: directory exists and is not empty: {}'
.format(parser.prog, options.envdir), file=sys.stderr)
print('Use the virtual Python to update or use '
'the --force option to overwrite.', file=sys.stderr)
parser.exit(1)
_log.warning('using non-empty environment directory: %s',
options.envdir)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
env_exe = bootstrap(options.envdir, options.prompt,
options.force_version, options.verbose)
if options.only_virtenv:
return
# Run this script within the virtual environment for stage2
args = [env_exe, __file__]
if options.verbose is not None:
args.append('--verbose' if options.verbose else '--quiet')
# Transfer dynamic properties to the subprocess call 'update'.
# Clip off the first two characters expecting long parameter form.
for arg in optional_args:
if getattr(options, arg[2:]):
args.append(arg)
subprocess.check_call(args)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
except subprocess.CalledProcessError as exc:
sys.exit(exc.returncode)
| [
"[email protected]"
] | |
9101f98269a25bbd0d6a6a65bc58c9e4c128d376 | 443927accc10828ea1e81c46c38f4f7b0815390c | /armstrong.py | d3a9905eed995beb6394b333b62043b5a5161e61 | [] | no_license | 1PrathameshMore/PPL-assignments | 690042bceba55731c01e99903dd8cb3fc7e50f97 | 7633a001b9bebc7ab2057fa9ab72a9c1fdbaeda3 | refs/heads/master | 2022-10-14T22:00:25.658406 | 2020-06-10T16:55:49 | 2020-06-10T16:55:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | def armstrong():
d = input()
e = input()
d = int(d)
e = int(e)
if d < e:
for i in range(d,e):
z = noofplaces(i)
c = i
sum = 0
while c > 0 :
r = c % 10
f = pow(r,z)
sum = sum + f
c = c // 10
if sum == i:
print(sum)
return 0
def noofplaces(x):
j = 0
while x > 0:
x = x // 10
j += 1
return j
armstrong() | [
"[email protected]"
] | |
2e6fadca5613e103fb85f5c3b055556a12ea5d25 | 840f44f053283d19357b611d30e3c0077c9c06ab | /Back-End Projects/Python/django/Getting Started/Survey/Survey/urls.py | 290d51c4778fd62ac5910f027e5e4b576167346c | [] | no_license | Siahnide/All_Projects | a790700eb4ae66a9eb16098d09249666a403df21 | 91aa259fba145af2f972fdb5285d3ee1e7a02d7b | refs/heads/master | 2020-03-26T09:49:30.932028 | 2018-08-14T20:01:49 | 2018-08-14T20:01:49 | 144,766,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | """Survey URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^words', include('apps.words.urls')),
url(r'^$', include('apps.form.urls')),
url(r'^process$', include('apps.form.urls')),
url(r'^card$', include('apps.form.urls')),
]
| [
"[email protected]"
] | |
bfe97c1c044bc1c39b5f9594fdec9c287f465d1d | 407d52d5d9cfb8f04861a92bc97724bf2e07857d | /utils/views.py | 352d299563eb6e04d754988a7d942141e8516f1e | [] | no_license | buddy-israel/win_crawler | 43b6e60d5008d6138f8fea07f56633acced29ff4 | e2daf56e6c7be803146623a2c56f2e46cfc63909 | refs/heads/main | 2023-05-09T03:30:05.238468 | 2021-06-01T17:52:46 | 2021-06-01T17:52:46 | 372,916,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | import sqlite3
from sqlite3 import Error
database = "../spider.db"
def create_connection():
conn = None
try:
conn = sqlite3.connect(database)
except Error as e:
print(e)
return conn
def create_views():
conn = create_connection()
view_1 = """SELECT DISTINCT UserName, PostCount FROM User ORDER BY PostCount DESC LIMIT 10;"""
try:
with conn:
c = conn.cursor()
c.execute(view_1)
result = [username for username in c.fetchall()]
except Error as e:
print(e)
for x, y in result:
print(f'{y}\t\t{x}')
if __name__ == '__main__':
create_views() | [
"[email protected]"
] | |
225ee20b98820cbb8db5fb60d084738de16a3dfd | b0c407f17764bceac693bd84e025f1a1c9231925 | /scrap/tmomanga_com.py | 26b65e1a4cf7d627bd67f0cc58c7991f8543da9d | [] | no_license | fotosycaptura/smg | aa5bbba0f4bdbc0ce077d4f5a2eaa303f26e6adf | af4e9d12c585c274c70e77b75ca059d2c7e02bc5 | refs/heads/master | 2023-08-08T15:30:38.461043 | 2023-06-23T19:22:37 | 2023-06-23T19:22:37 | 227,163,364 | 0 | 0 | null | 2022-03-18T19:27:10 | 2019-12-10T16:12:44 | Python | UTF-8 | Python | false | false | 5,252 | py | import bs4
import requests
import os
from varios import utilerias
import numpy as np
class tmomanga_com:
def __init__(self, version, home_mangas, opciones, listado_mangas):
self.home_mangas = home_mangas
self.opciones = opciones
self.listado_mangas = listado_mangas
self.version = version
def __descargar_imagenes_del_capitulo(self, ruta_manga, url_del_capitulo, capitulo):
Varios = utilerias.utilerias(self.version)
resultado = requests.get(url_del_capitulo, headers=Varios.get_headers())
sopa = bs4.BeautifulSoup(resultado.text, 'lxml')
#imagenes = sopa.select(".tab-content")
imagenes = sopa.find("div", {"id": "images_chapter"})
# Se descargan las imagenes
contador_imagen = 0
for imagen in imagenes.select('img'):
# Se extrae la imagen (url y nombre)
nombre_file = str(imagen['data-src'])
# print(imagen['data-src'])
nombre_file = str(contador_imagen).zfill(3) + '.jpg'
if (imagen['data-src'] != '/discord.jpg'):
# Se forma la ruta donde quedará finalmente
ruta = os.path.join(ruta_manga, capitulo, nombre_file)
ruta = os.path.abspath(ruta)
# Se verifica si la imagen ya existía previamente
if not(os.path.exists(ruta)):
# Se extrae la imagen de forma binaria de la web
imagen_save = requests.get(imagen['data-src'], Varios.get_headers())
# Se guarda la imagen en disco
f = open (ruta, 'wb')
f.write(imagen_save.content)
f.close()
contador_imagen = contador_imagen + 1
def __get_capitulos(self, url_del_manga, nombre_manga) -> list:
Varios = utilerias.utilerias(self.version)
print(f"url del manga: {url_del_manga}")
# Se obtienen los números de capítulos y sus urls
resultado = requests.get(url_del_manga, headers=Varios.get_headers())
sopa = bs4.BeautifulSoup(resultado.text, 'lxml')
#capitulos = sopa.find("div", {"id": "chapterlist"})
capitulos = sopa.select('.sub-chap.list-chap')
#print(capitulos[0].select('a')[0]['href'])
listado = []
for capitulo in capitulos:
cap = capitulo.select('a')
for item in cap:
#print(item['href'])
#print(item.getText().strip())
nombre_capitulo = item.getText().strip().replace(nombre_manga, '').strip()
# nombre_capitulo = nombre_capitulo.replace('.', '')
nombre_capitulo = nombre_capitulo.replace('-', '')
nombre_capitulo = nombre_capitulo.replace('?', '')
#nombre_capitulo = nombre_capitulo.replace('.', '')
nombre_capitulo = nombre_capitulo.replace('Capítulo', '').strip()
url = item['href']
try:
nombre_float = float(nombre_capitulo)
if (nombre_float.is_integer()):
listado.append([str(int(nombre_float)).zfill(2), url])
else:
listado.append([str(nombre_float).zfill(4), url])
except:
pass
return listado
def descargar_solo(self, url_manga):
Varios = utilerias.utilerias(self.version)
if(url_manga.startswith("https://tmomanga.com/manga/")):
# Se busca el nombre del manga en cuestión a descargar
resultado = requests.get(url_manga, headers=Varios.get_headers())
sopa = bs4.BeautifulSoup(resultado.text, 'lxml')
manhwas = sopa.select('h1')
#print(f"Test: {manhwas}")
nombre_manga = manhwas[0].getText()
print(f"Descargando {nombre_manga}")
ruta_manga = Varios.crear_carpeta_manga(self.home_mangas, nombre_manga)
lst_capitulos_del_manga = self.__get_capitulos(url_manga, nombre_manga)
lst_capitulos_del_manga.sort()
# print(lst_capitulos_del_manga)
# Se procede con el ciclo para la descarga
inicial = 1
final = len(lst_capitulos_del_manga)
for capitulo, url_capitulo in lst_capitulos_del_manga:
# Por cada capítulo:
# - Crear carpeta del capítulo, si es necesario
# - para el capítulo, descargar las imagenes, si es necesario
Varios.printProgressBar(inicial, final, prefix = 'Descarga:', suffix = 'Completado', length = 30)
inicial = inicial + 1
if (len(capitulo) > 0):
Varios.crear_carpeta_capitulo(ruta_manga, capitulo)
self.__descargar_imagenes_del_capitulo(ruta_manga, url_capitulo, capitulo)
print('Finalizado.')
input("> ") | [
"[email protected]"
] | |
3d130e94b284e910f8bf1d35df948b77df1983ef | e9ce73e5c3301705b63df42524ee12cff1e32fa9 | /libs/layers/nms/nms_layer.py | a95c95c835bb92ed21877e285ae5f153e5c28fba | [
"Apache-2.0"
] | permissive | tanghanlin/Detectron-PYTORCH | 374e800f888f3b485c7475efb10daa698ed7a23e | 468a8050330db4de1e22509c8b741ad236a55d88 | refs/heads/master | 2023-03-16T06:54:12.937167 | 2018-08-23T03:06:26 | 2018-08-23T03:06:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,617 | py | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
from torch.autograd import Function
import torch.nn as nn
from ._ext import nms
class NMSFunction(Function):
def __init__(self, overlap_threshold):
super(NMSFunction, self).__init__()
self.overlap_threshold = overlap_threshold
def forward(self, boxes, scores):
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.sort(0, descending=True)[1]
dets = torch.cat((boxes, scores), dim=0)
keep = torch.LongTensor(dets.size(0))
num_out = torch.LongTensor(1)
if not dets.is_cuda:
nms.cpu_nms(keep, num_out, dets, order, areas, self.overlap_threshold)
return keep[:num_out[0]]
else:
dets = dets[order].contiguous()
nms.gpu_nms(keep, num_out, dets, self.overlap_threshold)
return order[keep[:num_out[0]].cuda()].contiguous()
def backward(self, grad_top):
raise ValueError('backward on NMSFunction should never be called')
class NMSLayer(nn.Module):
def __init__(self, overlap_threshold):
super(NMSLayer, self).__init__()
self.overlap_threshold = overlap_threshold
def forward(self, boxes, scores):
keeps = NMSFunction(self.overlap_threshold)(boxes, scores)
return keeps
def backward(self, grad_top):
raise ValueError('backward on NMSLayer should never be called')
| [
"[email protected]"
] | |
a4cce41e3df9414b4d0faa27cb4e7dc024befcb8 | 4c5328381f53d8b77b56a597cc39a32b55a0c4c2 | /Cura/gui/view3D/printableObjectRenderer.py | 88a57fe5161dce1651e1ffc756679a55a1b9d57a | [] | no_license | sanyaade-iot/Cura2 | 47fc18a8886dcc8537439b699cdb201d92e68683 | b8655a20ca4a03acaa2ada555f57fe415264d944 | refs/heads/master | 2021-01-16T20:06:18.885340 | 2014-06-06T12:51:10 | 2014-06-06T12:51:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,207 | py | __author__ = 'Jaime van Kessel'
from OpenGL.GL import *
from Cura.gui import openGLUtils
from Cura.resources import getMesh
from Cura.gui.view3D.renderer import Renderer
class PrintableObjectRenderer(Renderer):
def __init__(self):
super(PrintableObjectRenderer,self).__init__()
self._shader = openGLUtils.GLShader(filename='objectShader.glsl')
def render(self):
self._shader.bind()
for obj in self.scene.getObjects():
mesh = obj.getMesh()
glPushMatrix()
offset = obj.getDrawOffset()
glTranslatef(obj.getPosition()[0], obj.getPosition()[1], obj.getSize()[2] / 2.0)
openGLUtils.glMultiplyMatrix(obj.getTempMatrix())
glTranslatef(offset[0], offset[1], offset[2] - obj.getSize()[2] / 2.0)
openGLUtils.glMultiplyMatrix(obj.getMatrix())
colorStrength = 0.8
if obj.isSelected():
colorStrength = 1.0
if mesh is not None:
for v in mesh.getVolumes():
if 'VertexRenderer' not in v.metaData:
v.metaData['VertexRenderer'] = openGLUtils.VertexRenderer(GL_TRIANGLES, v.vertexData)
glColor3f(1 * colorStrength, 0.5 * colorStrength, 1 * colorStrength)
v.metaData['VertexRenderer'].render()
else:
mesh = getMesh('loading_mesh.stl')
for v in mesh.getVolumes():
if 'VertexRenderer' not in v.metaData:
v.metaData['VertexRenderer'] = openGLUtils.VertexRenderer(GL_TRIANGLES, v.vertexData)
glColor3f(0.5 * colorStrength, 0.5 * colorStrength, 0.5 * colorStrength)
v.metaData['VertexRenderer'].render()
glPopMatrix()
self._shader.unbind()
def focusRender(self):
objIdx = 0
for obj in self.scene.getObjects():
glPushMatrix()
offset = obj.getDrawOffset()
glTranslatef(obj.getPosition()[0], obj.getPosition()[1], obj.getSize()[2] / 2.0)
openGLUtils.glMultiplyMatrix(obj.getTempMatrix())
glTranslatef(offset[0], offset[1], offset[2] - obj.getSize()[2] / 2.0)
openGLUtils.glMultiplyMatrix(obj.getMatrix())
self.setCurrentFocusRenderObject(obj)
mesh = obj.getMesh()
if mesh is not None:
volumeIdx = 0
for v in mesh.getVolumes():
if 'VertexRenderer' not in v.metaData:
v.metaData['VertexRenderer'] = openGLUtils.VertexRenderer(GL_TRIANGLES, v.vertexData)
v.metaData['VertexRenderer'].render()
volumeIdx += 1
else:
volumeIdx = 0
mesh = getMesh('loading_mesh.stl')
for v in mesh.getVolumes():
if 'VertexRenderer' not in v.metaData:
v.metaData['VertexRenderer'] = openGLUtils.VertexRenderer(GL_TRIANGLES, v.vertexData)
v.metaData['VertexRenderer'].render()
volumeIdx += 1
objIdx += 1
glPopMatrix()
| [
"[email protected]"
] | |
0d5fb3722a72d746607b18d92434d47ef39879d8 | c6f15aa103de030f7eea6c1aaf6e7ad0ec88dbc1 | /add/AppMcsv/storage/volume/Volume.py | b21fe01e6712fe2709429c0d0eb031b3f2a0eedd | [] | no_license | sysdeep/dcat | 6f3478348113b0d1206f82456f5bd80431282daf | f8c801173ace4447018c3034c56254ab1a6d4089 | refs/heads/master | 2023-05-03T16:04:28.027335 | 2023-04-17T15:04:04 | 2023-04-17T15:04:04 | 320,551,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,453 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import gzip
from enum import Enum
import time
DET = "-"*10
class Sections(Enum):
header = 0
body = 1
class Record(object):
def __init__(self):
self.name = ""
self.uuid = ""
self.parent = ""
self.childrens = []
def append(self, ch):
self.childrens.append(ch)
def parse_record(line: str) -> Record:
try:
fields = line.split("|")
except Exception as e:
print(e)
return None
r = Record()
r.name = fields[0]
r.uuid = fields[10]
r.parent = fields[9]
return r
class Volume(object):
def __init__(self, full_path):
self.path = full_path
self.name = "undefined"
self.__tree = []
self.__tmap = {}
self.__roots = []
def read_header(self):
fd = gzip.open(self.path, "rt", encoding="utf-8")
sheader = []
section = Sections.header
c = 1000
while True:
line = fd.readline().strip()
if not line:
print("null line")
break
if section == Sections.header:
if line == DET:
break
else:
sheader.append(line)
c -= 1
if c < 0:
print("emerg")
break
fd.close()
for line in sheader:
print(line)
chunks = line.split(":")
if chunks[0] == "name":
self.name = chunks[1]
break
def read_body(self):
self.__tree = []
self.__tmap = {}
fd = gzip.open(self.path, "rt", encoding="utf-8")
t1 = time.time()
section = Sections.header
c = 10000000000
while True:
line = fd.readline().strip()
if not line:
print("null line")
break
if section == Sections.header:
if line == DET:
section = Sections.body
else:
# pass header
pass
elif section == Sections.body:
# print(line)
record = parse_record(line)
# self.__tree.append(record)
self.__tmap[record.uuid] = record
if record.parent == "0":
self.__roots.append(record)
else:
pass
c -= 1
if c < 0:
print("emerg")
break
print("*"*20)
print("files: ", c)
print("*"*20)
fd.close()
t2 = time.time()
print("parse body time: ", t2-t1)
self.__link()
def __link(self):
for r in self.__tmap.values():
if r.parent == "0":
continue
parent_node = self.__tmap.get(r.parent)
if parent_node:
parent_node.append(r)
def get_root(self) -> list:
# return [r for r in self.__tree if r.parent == "0"]
return self.__roots
| [
"[email protected]"
] | |
c9e49c4e0852af965d1c1a324079541d2dbfbac0 | bf593db51f9a3ef9a4cd8235466e4dd138ee61f2 | /IT 310 - Data Structures and Algorithms/inclass 11_22.py | 1fe740c2e86c0ce9ebd384074173365ffd0ddf94 | [] | no_license | TylerBade/Classwork | 101e35d006cdf5d52079270c0f745a87a3142774 | 43ecc9b46d2d7fe604eac33ca064e9bc1c23302c | refs/heads/master | 2020-03-22T04:55:15.210658 | 2018-10-22T06:53:05 | 2018-10-22T06:53:05 | 137,776,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,693 | py | import sys
from functools import total_ordering
import heapq
class Vertex:
def __init__(self, node):
self.id = node
self.adjacent = {}
# Set distance to infinity for all nodes
self.distance = sys.maxsize
# Mark all nodes unvisited
self.visited = False
# Predecessor
self.previous = None
def add_neighbor(self, neighbor, weight=0):
self.adjacent[neighbor] = weight
def get_connections(self):
return self.adjacent.keys()
def get_id(self):
return self.id
def get_weight(self, neighbor):
return self.adjacent[neighbor]
def set_distance(self, dist):
self.distance = dist
def get_distance(self):
return self.distance
def set_previous(self, prev):
self.previous = prev
def set_visited(self):
self.visited = True
def __hash__(self):
return hash(str(self.id))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.distance == other.distance
return NotImplemented
def __lt__(self, other):
if isinstance(other, self.__class__):
return self.distance < other.distance
return NotImplemented
def __str__(self):
return str(self.id) + ' adjacent: ' + str([x.id for x in self.adjacent])
class Graph:
def __init__(self):
self.vert_dict = {}
self.num_vertices = 0
def __iter__(self):
return iter(self.vert_dict.values())
def add_vertex(self, node):
self.num_vertices = self.num_vertices + 1
new_vertex = Vertex(node)
self.vert_dict[node] = new_vertex
return new_vertex
def get_vertex(self, n):
if n in self.vert_dict:
return self.vert_dict[n]
else:
return None
def add_edge(self, frm, to, cost = 0):
if frm not in self.vert_dict:
self.add_vertex(frm)
if to not in self.vert_dict:
self.add_vertex(to)
self.vert_dict[frm].add_neighbor(self.vert_dict[to], cost)
self.vert_dict[to].add_neighbor(self.vert_dict[frm], cost)
def get_vertices(self):
return self.vert_dict.keys()
def set_previous(self, current):
self.previous = current
def get_previous(self, current):
return self.previous
def shortest(v, path):
''' make shortest path from v.previous'''
if v.previous:
path.append(v.previous.get_id())
shortest(v.previous, path)
return
def dijkstra(aGraph, start):
print('''Dijkstra's shortest path''')
# Set the distance for the start node to zero
start.set_distance(0)
# Put tuple pair into the priority queue
unvisited_queue = [(v.get_distance(),v) for v in aGraph]
heapq.heapify(unvisited_queue)
while len(unvisited_queue):
# Pops a vertex with the smallest distance
uv = heapq.heappop(unvisited_queue)
current = uv[1]
current.set_visited()
#for next in v.adjacent:
for next in current.adjacent:
# if visited, skip
if next.visited:
continue
new_dist = current.get_distance() + current.get_weight(next)
if new_dist < next.get_distance():
next.set_distance(new_dist)
next.set_previous(current)
print( 'updated : current = %s next = %s new_dist = %s' \
%(current.get_id(), next.get_id(), next.get_distance()))
else:
print('not updated : current = %s next = %s new_dist = %s' \
%(current.get_id(), next.get_id(), next.get_distance()))
# Rebuild heap
# 1. Pop every item
while len(unvisited_queue):
heapq.heappop(unvisited_queue)
# 2. Put all vertices not visited into the queue
unvisited_queue = [(v.get_distance(),v) for v in aGraph if not v.visited]
heapq.heapify(unvisited_queue)
if __name__ == '__main__':
g = Graph()
## g.add_vertex('a')
## g.add_vertex('b')
## g.add_vertex('c')
## g.add_vertex('d')
## g.add_vertex('e')
## g.add_vertex('f')
##
## g.add_edge('a', 'b', 7)
## g.add_edge('a', 'c', 9)
## g.add_edge('a', 'f', 14)
## g.add_edge('b', 'c', 10)
## g.add_edge('b', 'd', 15)
## g.add_edge('c', 'd', 11)
## g.add_edge('c', 'f', 2)
## g.add_edge('d', 'e', 6)
## g.add_edge('e', 'f', 9)
vertadd = ""
nodenum = int(input("How many verteces: "))
while nodenum > 0:
vertadd = input("Add a vertex value: ")
g.add_vertex(vertadd)
nodenum -= 1
firstnode = ""
secondnode = ""
edge = 0
connects = int(input("How many connections: "))
while connects > 0:
firstnode = input("First node to connect: ")
while not g.get_vertex(firstnode):
firstnode = input("Vertex value not found, try again: ")
secondnode = input("Second node to connect: ")
while not g.get_vertex(secondnode):
secondnode = input("Vertex value not found, try again: ")
edge = int(input("How long is the edge: "))
g.add_edge(firstnode, secondnode, edge)
connects -= 1
print('Graph data:')
for v in g:
for w in v.get_connections():
vid = v.get_id()
wid = w.get_id()
print('( %s , %s, %3d)' % ( vid, wid, v.get_weight(w)))
dijkstra(g, g.get_vertex('a'))
target = g.get_vertex('e')
path = [target.get_id()]
shortest(target, path)
print('The shortest path : %s' %(path[::-1]))
| [
"[email protected]"
] | |
e792f1f203c2d7d9d4238ae0cab0a1923a889d5d | e3fb3fe973bb7801ff6799a8cc2db974bc47a599 | /truck/signals.py | 9dbf3a0b0de9d43d4afad5208d57c98a670bccd9 | [] | no_license | Meals-on-Wheels/food_truck_locator | 734e398218bbccfda8904ef3e7c6ae40ac2f64ec | c34461d609cc433cc2aada56ffb9d6c2fbac057c | refs/heads/main | 2023-03-15T22:49:02.615658 | 2021-03-04T18:19:46 | 2021-03-04T18:19:46 | 329,754,601 | 0 | 0 | null | 2021-03-04T18:19:47 | 2021-01-14T22:58:42 | JavaScript | UTF-8 | Python | false | false | 699 | py | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
class Account(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name="profile")
signup_confirmation = models.BooleanField(default=False)
firstName = models.CharField(max_length=40, blank = True)
lastName = models.CharField(max_length=40, blank= True)
email = models.EmailField(max_length=250, blank = True)
@receiver(post_save, sender=User)
def update_profile(sender, instance, created, **kwargs):
if created:
Account.objects.create(user=instance)
instance.profile.save() | [
"[email protected]"
] | |
b174764fa0bbacb443dfd94c15d00bdf50133e72 | 50346c15b656d354b69d390b268bf1d93665d23a | /getliveconfig | e7fe696af3e872999005b38a010f6cab63fa7ab7 | [] | no_license | avongluck-r1soft/production | 9baf007724e2b9d7188b0b77b9ba37c557fe9a2d | ffa2af7e30d08f7a8c050dead5534c3620c1d066 | refs/heads/master | 2021-01-11T21:09:28.804283 | 2016-12-27T22:51:15 | 2016-12-27T22:51:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | #!/usr/bin/env python
import urllib2
import sys
def getLiveConfig(csbm):
url = "http://10.80.65.31:57988/r1rmGA/csbm/"+csbm+"/liveConfig"
print url
response = urllib2.urlopen(url)
content = response.read()
return content
def usage():
print " usage: " + sys.argv[0] + " <CSBM GUID>"
print "example: " + sys.argv[0] + " f8981d09-bb9a-4dad-8947-75ea5bd3a4dc"
def main():
if len(sys.argv) != 2:
usage()
sys.exit(2)
csbm = sys.argv[1]
lc = getLiveConfig(csbm)
print lc
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
sys.exit()
| [
"[email protected]"
] | ||
78e8717353029c7bc31c28856cf0bd047306f771 | d7e9c29b0ee5383e0b80c8c2da62d6f20991a616 | /core/logger.py | a6c1a022e0ba4b880a1bbb19efbefd8447876032 | [] | no_license | evescn/evescn_hosts_management_tools | f3e046868a6c7b497fd06872006465d47a5d4763 | c9f1e67284e73a729b219a7c6bdd9905ff7c15fb | refs/heads/main | 2023-01-24T20:56:19.860882 | 2020-11-18T06:11:47 | 2020-11-18T06:11:47 | 313,810,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 932 | py | # -*- coding: utf-8 -*-
# @Author : Evescn
# @time : 2020/11/18 10:06
# @File : logger.py
# @Software : PyCharm
import logging
from conf import setting
def logger(log_type):
logger = logging.getLogger(log_type)
logger.setLevel(setting.LOG_LEVEL)
ch = logging.StreamHandler()
ch.setLevel(setting.LOG_LEVEL)
# log_file = "%s/log/%s" % (setting.BASE_DIR, setting.LOG_TYPES[log_type])
# fh = logging.StreamHandler(log_file)
# fh.setLevel(setting.Log_LEVEL)
# create file handler and set level to warning
log_file = "%s/log/%s" % (setting.BASE_DIR, setting.LOG_TYPES[log_type])
fh = logging.FileHandler(log_file)
fh.setLevel(setting.LOG_LEVEL)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# logger.addHandler(ch)
logger.addHandler(fh)
return logger | [
"[email protected]"
] | |
d3ed2e74b0e9dba9944dd11ca896b5016acd263d | 154fd16fe7828cb6925ca8f90e049b754ce06413 | /lino_book/projects/lydia/tests/dumps/18.12.0/teams_team.py | e3d2d31af2866296e853ea6765cf5e65fe6a2a6c | [
"BSD-2-Clause"
] | permissive | lino-framework/book | 68de2f8d130266bd9d9de7576d30597b3cde1c91 | 4eab916832cd8f48ff1b9fc8c2789f0b437da0f8 | refs/heads/master | 2021-03-27T16:16:55.403940 | 2021-03-15T02:53:50 | 2021-03-15T02:53:50 | 58,830,342 | 3 | 9 | BSD-2-Clause | 2021-03-09T13:11:27 | 2016-05-14T21:02:17 | Python | UTF-8 | Python | false | false | 254 | py | # -*- coding: UTF-8 -*-
logger.info("Loading 2 objects to table teams_team...")
# fields: id, ref, name
loader.save(create_teams_team(1,u'E',['Eupen', '', '']))
loader.save(create_teams_team(2,u'S',['St. Vith', '', '']))
loader.flush_deferred_objects()
| [
"[email protected]"
] | |
6b6280abd4bf186e5c161efa413338f2de21d3a0 | 8f75faeed726ff79b36769898f7c81adafaf8553 | /project/models.py | e799b0d9547d080fa7e994e944a85faea9c28ad6 | [
"MIT"
] | permissive | jeremywgleeson/Corec-AutoSchedule-Avail | b20cadc9e0851663b8b06b541603854068dc6cfb | 39084fa2abcad4739a7df6ecffd65803a6c3b33b | refs/heads/master | 2022-12-28T00:33:27.325617 | 2020-10-01T02:36:41 | 2020-10-01T02:36:41 | 296,524,234 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,719 | py | from .extensions import db
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
import pyotp
import base64
class User(UserMixin, db.Model):
# __tablename__ = 'flasklogin-users'
id = db.Column(
db.Integer,
primary_key=True
)
username = db.Column(
db.String(40),
unique=True,
nullable=False
)
password = db.Column(
db.String(200),
primary_key=False,
unique=False,
nullable=False
)
hotp_secret = db.Column(
db.String(20),
primary_key=False,
unique=False,
nullable=False
)
counter = db.Column(
db.Integer,
primary_key=False,
unique=False,
nullable=False
)
pin = db.Column(
db.Integer,
primary_key=False,
unique=False,
nullable=False
)
# reference to reservations table
reservations = db.relationship(
'Reservation',
backref=db.backref('user', lazy='joined'),
cascade="all, delete-orphan",
lazy=True
)
# reference to repeating reservations table
repeating_reservations = db.relationship(
'RepeatingReservation',
backref=db.backref('user', lazy='joined'),
cascade="all, delete-orphan, delete",
lazy=True
)
def set_password(self, password):
"""Create hashed password."""
self.password = generate_password_hash(
password,
method='sha256'
)
def check_password(self, password):
"""Check hashed password."""
return check_password_hash(self.password, password)
def get_login_pass(self):
"""Generate login pass for purdue sites"""
hotp = pyotp.HOTP(base64.b32encode(self.hotp_secret.encode()))
hotpPassword = hotp.at(self.counter)
self.counter += 1
password = "{},{}".format(self.pin, hotpPassword)
return password
def __repr__(self):
return '<User {}>'.format(self.username)
class Reservation(db.Model):
# __tablename__ = 'reservation-events'
id = db.Column(
db.Integer,
primary_key=True
)
start_time = db.Column(
db.DateTime,
primary_key=False,
unique=False,
nullable=False
)
end_time = db.Column(
db.DateTime,
primary_key=False,
unique=False,
nullable=False
)
repeating_weekly = db.Column(
db.Boolean,
primary_key=False,
unique=False,
nullable=False
)
# use enum for this, dont know where to define though
# 0 = queued
# 1 = success
# 2 = failed
# 3 = deleted
status = db.Column(
db.Integer,
primary_key=False,
unique=False,
nullable=False
)
user_id = db.Column(
db.Integer,
db.ForeignKey('user.id'),
nullable=False
)
def __repr__(self):
return '<Reservation {}>'.format(self.start_time.strftime("%m/%d/%Y, %H:%M:%S"))
class RepeatingReservation(db.Model):
# __tablename__ = 'reservation-events'
id = db.Column(
db.Integer,
primary_key=True
)
start_time = db.Column(
db.DateTime,
primary_key=False,
unique=False,
nullable=False
)
end_time = db.Column(
db.DateTime,
primary_key=False,
unique=False,
nullable=False
)
user_id = db.Column(
db.Integer,
db.ForeignKey('user.id'),
nullable=False
)
def __repr__(self):
return '<RepeatingReservation {} {}>'.format(self.weekday, self.start_time.strftime("%H:%M:%S"))
| [
"[email protected]"
] | |
12f14216b2b4a57ff01c2b1c049c8688d0d4cbf8 | 34ed92a9593746ccbcb1a02630be1370e8524f98 | /lib/pints/pints/plot.py | e915a7b2d7e282f5c1642e4f76e9500005a923c2 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | HOLL95/Cytochrome_SV | 87b7a680ed59681230f79e1de617621680ea0fa0 | d02b3469f3ee5a4c85d756053bc87651093abea1 | refs/heads/master | 2022-08-01T05:58:16.161510 | 2021-02-01T16:09:31 | 2021-02-01T16:09:31 | 249,424,867 | 0 | 0 | null | 2022-06-22T04:09:11 | 2020-03-23T12:29:29 | Jupyter Notebook | UTF-8 | Python | false | false | 28,013 | py | #
# Quick diagnostic plots.
#
# This file is part of PINTS.
# Copyright (c) 2017-2018, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
def function(f, x, lower=None, upper=None, evaluations=20):
"""
Creates 1d plots of a :class:`LogPDF` or a :class:`ErrorMeasure` around a
point `x` (i.e. a 1-dimensional plot in each direction).
Returns a ``matplotlib`` figure object and axes handle.
Parameters
----------
f
A :class:`pints.LogPDF` or :class:`pints.ErrorMeasure` to plot.
x
A point in the function's input space.
lower
Optional lower bounds for each parameter, used to specify the lower
bounds of the plot.
upper
Optional upper bounds for each parameter, used to specify the upper
bounds of the plot.
evaluations
The number of evaluations to use in each plot.
"""
import matplotlib.pyplot as plt
import numpy as np
import pints
# Check function and get n_parameters
if not (isinstance(f, pints.LogPDF) or isinstance(f, pints.ErrorMeasure)):
raise ValueError(
'Given function must be pints.LogPDF or pints.ErrorMeasure.')
n_param = f.n_parameters()
# Check point
x = pints.vector(x)
if len(x) != n_param:
raise ValueError(
'Given point `x` must have same number of parameters as function.')
# Check boundaries
if lower is None:
# Guess boundaries based on point x
lower = x * 0.95
lower[lower == 0] = -1
else:
lower = pints.vector(lower)
if len(lower) != n_param:
raise ValueError('Lower bounds must have same number of'
+ ' parameters as function.')
if upper is None:
# Guess boundaries based on point x
upper = x * 1.05
upper[upper == 0] = 1
else:
upper = pints.vector(upper)
if len(upper) != n_param:
raise ValueError('Upper bounds must have same number of'
+ ' parameters as function.')
# Check number of evaluations
evaluations = int(evaluations)
if evaluations < 1:
raise ValueError('Number of evaluations must be greater than zero.')
# Create points to plot
xs = np.tile(x, (n_param * evaluations, 1))
for j in range(n_param):
i1 = j * evaluations
i2 = i1 + evaluations
xs[i1:i2, j] = np.linspace(lower[j], upper[j], evaluations)
# Evaluate points
fs = pints.evaluate(f, xs, parallel=False)
# Create figure
fig, axes = plt.subplots(n_param, 1, figsize=(6, 2 * n_param))
if n_param == 1:
axes = np.asarray([axes], dtype=object)
for j, p in enumerate(x):
i1 = j * evaluations
i2 = i1 + evaluations
axes[j].plot(xs[i1:i2, j], fs[i1:i2], c='green', label='Function')
axes[j].axvline(p, c='blue', label='Value')
axes[j].set_xlabel('Parameter ' + str(1 + j))
axes[j].legend()
plt.tight_layout()
return fig, axes
def function_between_points(f, point_1, point_2, padding=0.25, evaluations=20):
"""
Creates and returns a plot of a function between two points in parameter
space.
Returns a ``matplotlib`` figure object and axes handle.
Parameters
----------
f
A :class:`pints.LogPDF` or :class:`pints.ErrorMeasure` to plot.
point_1
The first point in parameter space. The method will find a line from
``point_1`` to ``point_2`` and plot ``f`` at several points along it.
point_2
The second point.
padding
Specifies the amount of padding around the line segment
``[point_1, point_2]`` that will be shown in the plot.
evaluations
The number of evaluation along the line in parameter space.
"""
import matplotlib.pyplot as plt
import numpy as np
import pints
# Check function and get n_parameters
if not (isinstance(f, pints.LogPDF) or isinstance(f, pints.ErrorMeasure)):
raise ValueError(
'Given function must be pints.LogPDF or pints.ErrorMeasure.')
n_param = f.n_parameters()
# Check points
point_1 = pints.vector(point_1)
point_2 = pints.vector(point_2)
if not (len(point_1) == len(point_2) == n_param):
raise ValueError('Both points must have the same number of parameters'
+ ' as the given function.')
# Check padding
padding = float(padding)
if padding < 0:
raise ValueError('Padding cannot be negative.')
# Check evaluation
evaluations = int(evaluations)
if evaluations < 3:
raise ValueError('The number of evaluations must be 3 or greater.')
# Figure setting
fig, axes = plt.subplots(1, 1, figsize=(6, 4))
axes.set_xlabel('Point 1 to point 2')
axes.set_ylabel('Function')
# Generate some x-values near the given parameters
s = np.linspace(-padding, 1 + padding, evaluations)
# Direction
r = point_2 - point_1
# Calculate function with other parameters fixed
x = [point_1 + sj * r for sj in s]
y = pints.evaluate(f, x, parallel=False)
# Plot
axes.plot(s, y, color='green')
axes.axvline(0, color='#1f77b4', label='Point 1')
axes.axvline(1, color='#7f7f7f', label='Point 2')
axes.legend()
return fig, axes
def histogram(samples, ref_parameters=None, n_percentiles=None):
"""
Takes one or more markov chains or lists of samples as input and creates
and returns a plot showing histograms for each chain or list of samples.
Returns a ``matplotlib`` figure object and axes handle.
Parameters
----------
samples
A list of lists of samples, with shape
``(n_lists, n_samples, n_parameters)``, where ``n_lists`` is the
number of lists of samples, ``n_samples`` is the number of samples in
one list and ``n_parameters`` is the number of parameters.
ref_parameters
A set of parameters for reference in the plot. For example, if true
values of parameters are known, they can be passed in for plotting.
n_percentiles
Shows only the middle n-th percentiles of the distribution.
Default shows all samples in ``samples``.
"""
import matplotlib.pyplot as plt
import numpy as np
# If we switch to Python3 exclusively, bins and alpha can be keyword-only
# arguments
bins = 40
alpha = 0.5
n_list = len(samples)
_, n_param = samples[0].shape
# Check number of parameters
for samples_j in samples:
if n_param != samples_j.shape[1]:
raise ValueError(
'All samples must have the same number of parameters.'
)
# Check reference parameters
if ref_parameters is not None:
if len(ref_parameters) != n_param:
raise ValueError(
'Length of `ref_parameters` must be same as number of'
' parameters.')
# Set up figure
fig, axes = plt.subplots(
n_param, 1, figsize=(6, 2 * n_param),
squeeze=False, # Tell matlab to always return a 2d axes object
)
# Plot first samples
for i in range(n_param):
for j_list, samples_j in enumerate(samples):
# Add histogram subplot
axes[i, 0].set_xlabel('Parameter ' + str(i + 1))
axes[i, 0].set_ylabel('Frequency')
if n_percentiles is None:
xmin = np.min(samples_j[:, i])
xmax = np.max(samples_j[:, i])
else:
xmin = np.percentile(samples_j[:, i],
50 - n_percentiles / 2.)
xmax = np.percentile(samples_j[:, i],
50 + n_percentiles / 2.)
xbins = np.linspace(xmin, xmax, bins)
axes[i, 0].hist(
samples_j[:, i], bins=xbins, alpha=alpha,
label='Samples ' + str(1 + j_list))
# Add reference parameters if given
if ref_parameters is not None:
# For histogram subplot
ymin_tv, ymax_tv = axes[i, 0].get_ylim()
axes[i, 0].plot(
[ref_parameters[i], ref_parameters[i]],
[0.0, ymax_tv],
'--', c='k')
if n_list > 1:
axes[0, 0].legend()
plt.tight_layout()
return fig, axes[:, 0]
def trace(samples, ref_parameters=None, n_percentiles=None):
"""
Takes one or more markov chains or lists of samples as input and creates
and returns a plot showing histograms and traces for each chain or list of
samples.
Returns a ``matplotlib`` figure object and axes handle.
Parameters
----------
samples
A list of lists of samples, with shape
``(n_lists, n_samples, n_parameters)``, where ``n_lists`` is the
number of lists of samples, ``n_samples`` is the number of samples in
one list and ``n_parameters`` is the number of parameters.
ref_parameters
A set of parameters for reference in the plot. For example, if true
values of parameters are known, they can be passed in for plotting.
n_percentiles
Shows only the middle n-th percentiles of the distribution.
Default shows all samples in ``samples``.
"""
import matplotlib.pyplot as plt
import numpy as np
# If we switch to Python3 exclusively, bins and alpha can be keyword-only
# arguments
bins = 40
alpha = 0.5
n_list = len(samples)
_, n_param = samples[0].shape
# Check number of parameters
for samples_j in samples:
if n_param != samples_j.shape[1]:
raise ValueError(
'All samples must have the same number of parameters.'
)
# Check reference parameters
if ref_parameters is not None:
if len(ref_parameters) != n_param:
raise ValueError(
'Length of `ref_parameters` must be same as number of'
' parameters.')
# Set up figure
fig, axes = plt.subplots(
n_param, 2, figsize=(12, 2 * n_param),
# Tell matplotlib to return 2d, even if n_param is 1
squeeze=False,
)
# Plot first samples
for i in range(n_param):
ymin_all, ymax_all = np.inf, -np.inf
for j_list, samples_j in enumerate(samples):
# Add histogram subplot
axes[i, 0].set_xlabel('Parameter ' + str(i + 1))
axes[i, 0].set_ylabel('Frequency')
if n_percentiles is None:
xmin = np.min(samples_j[:, i])
xmax = np.max(samples_j[:, i])
else:
xmin = np.percentile(samples_j[:, i],
50 - n_percentiles / 2.)
xmax = np.percentile(samples_j[:, i],
50 + n_percentiles / 2.)
xbins = np.linspace(xmin, xmax, bins)
axes[i, 0].hist(samples_j[:, i], bins=xbins, alpha=alpha,
label='Samples ' + str(1 + j_list))
# Add trace subplot
axes[i, 1].set_xlabel('Iteration')
axes[i, 1].set_ylabel('Parameter ' + str(i + 1))
axes[i, 1].plot(samples_j[:, i], alpha=alpha)
# Set ylim
ymin_all = ymin_all if ymin_all < xmin else xmin
ymax_all = ymax_all if ymax_all > xmax else xmax
axes[i, 1].set_ylim([ymin_all, ymax_all])
# Add reference parameters if given
if ref_parameters is not None:
# For histogram subplot
ymin_tv, ymax_tv = axes[i, 0].get_ylim()
axes[i, 0].plot(
[ref_parameters[i], ref_parameters[i]],
[0.0, ymax_tv],
'--', c='k')
# For trace subplot
xmin_tv, xmax_tv = axes[i, 1].get_xlim()
axes[i, 1].plot(
[0.0, xmax_tv],
[ref_parameters[i], ref_parameters[i]],
'--', c='k')
if n_list > 1:
axes[0, 0].legend()
plt.tight_layout()
return fig, axes
def autocorrelation(samples, max_lags=100):
"""
Creates and returns an autocorrelation plot for a given markov chain or
list of `samples`.
Returns a ``matplotlib`` figure object and axes handle.
Parameters
----------
samples
A list of samples, with shape ``(n_samples, n_parameters)``, where
``n_samples`` is the number of samples in the list and ``n_parameters``
is the number of parameters.
max_lags
The maximum autocorrelation lag to plot.
"""
import matplotlib.pyplot as plt
import numpy as np
# Check samples size
try:
n_sample, n_param = samples.shape
except ValueError:
raise ValueError('`samples` must be of shape (n_sample,'
+ ' n_parameters).')
fig, axes = plt.subplots(n_param, 1, sharex=True, figsize=(6, 2 * n_param))
if n_param == 1:
axes = np.asarray([axes], dtype=object)
for i in range(n_param):
axes[i].acorr(samples[:, i] - np.mean(samples[:, i]), maxlags=max_lags)
axes[i].set_xlim(-0.5, max_lags + 0.5)
axes[i].legend(['Parameter ' + str(1 + i)], loc='upper right')
# Add x-label to final plot only
axes[i].set_xlabel('Lag')
# Add vertical y-label to middle plot
# fig.text(0.04, 0.5, 'Autocorrelation', va='center', rotation='vertical')
axes[int(i / 2)].set_ylabel('Autocorrelation')
plt.tight_layout()
return fig, axes
def series(samples, problem, ref_parameters=None, thinning=None):
"""
Creates and returns a plot of predicted time series for a given list of
``samples`` and a single-output or multi-output ``problem``.
Because this method runs simulations, it can take a considerable time to
run.
Returns a ``matplotlib`` figure object and axes handle.
Parameters
----------
samples
A list of samples, with shape ``(n_samples, n_parameters)``, where
`n_samples` is the number of samples in the list and ``n_parameters``
is the number of parameters.
problem
A :class:``pints.SingleOutputProblem`` or
:class:``pints.MultiOutputProblem`` of a n_parameters equal to or
greater than the ``n_parameters`` of the `samples`. Any extra
parameters present in the chain but not accepted by the
``SingleOutputProblem`` or ``MultiOutputProblem`` (for example
parameters added by a noise model) will be ignored.
ref_parameters
A set of parameters for reference in the plot. For example,
if true values of parameters are known, they can be passed in for
plotting.
thinning
An integer greater than zero. If specified, only every
n-th sample (with ``n = thinning``) in the samples will be used. If
left at the default value ``None``, a value will be chosen so that
200 to 400 predictions are shown.
"""
import matplotlib.pyplot as plt
import numpy as np
# Check samples size
try:
n_sample, n_param = samples.shape
except ValueError:
raise ValueError('`samples` must be of shape (n_sample,'
+ ' n_parameters).')
# Get problem n_parameters
n_parameters = problem.n_parameters()
# Check reference parameters
if ref_parameters is not None:
if len(ref_parameters) != n_param and \
len(ref_parameters) != n_parameters:
raise ValueError(
'Length of `ref_parameters` must be same as number of'
' parameters.')
ref_series = problem.evaluate(ref_parameters[:n_parameters])
# Get number of problem output
n_outputs = problem.n_outputs()
# Get thinning rate
if thinning is None:
thinning = max(1, int(n_sample / 200))
else:
thinning = int(thinning)
if thinning < 1:
raise ValueError(
'Thinning rate must be `None` or an integer greater than'
' zero.')
# Get times
times = problem.times()
# Evaluate the model for all parameter sets in the samples
i = 0
predicted_values = []
for params in samples[::thinning, :n_parameters]:
predicted_values.append(problem.evaluate(params))
i += 1
predicted_values = np.array(predicted_values)
mean_values = np.mean(predicted_values, axis=0)
# Guess appropriate alpha (0.05 worked for 1000 plots)
alpha = max(0.05 * (1000 / (n_sample / thinning)), 0.5)
# Plot prediction
fig, axes = plt.subplots(n_outputs, 1, figsize=(8, np.sqrt(n_outputs) * 3),
sharex=True)
if n_outputs == 1:
plt.xlabel('Time')
plt.ylabel('Value')
plt.plot(
times, problem.values(), 'x', color='#7f7f7f', ms=6.5, alpha=0.5,
label='Original data')
plt.plot(
times, predicted_values[0], color='#1f77b4',
label='Inferred series')
for v in predicted_values[1:]:
plt.plot(times, v, color='#1f77b4', alpha=alpha)
plt.plot(times, mean_values, 'k:', lw=2,
label='Mean of inferred series')
# Add reference series if given
if ref_parameters is not None:
plt.plot(times, ref_series, color='#d62728', ls='--',
label='Reference series')
plt.legend()
elif n_outputs > 1:
# Remove horizontal space between axes and set common xlabel
fig.subplots_adjust(hspace=0)
axes[-1].set_xlabel('Time')
# Go through each output
for i_output in range(n_outputs):
axes[i_output].set_ylabel('Output %d' % (i_output + 1))
axes[i_output].plot(
times, problem.values()[:, i_output], 'x', color='#7f7f7f',
ms=6.5, alpha=0.5, label='Original data')
axes[i_output].plot(
times, predicted_values[0][:, i_output], color='#1f77b4',
label='Inferred series')
for v in predicted_values[1:]:
axes[i_output].plot(times, v[:, i_output], color='#1f77b4',
alpha=alpha)
axes[i_output].plot(times, mean_values[:, i_output], 'k:', lw=2,
label='Mean of inferred series')
# Add reference series if given
if ref_parameters is not None:
axes[i_output].plot(times, ref_series[:, i_output],
color='#d62728', ls='--',
label='Reference series')
axes[0].legend()
plt.tight_layout()
return fig, axes
def pairwise(samples,
kde=False,
heatmap=False,
opacity=None,
ref_parameters=None,
n_percentiles=None):
"""
Takes a markov chain or list of ``samples`` and creates a set of pairwise
scatterplots for all parameters (p1 versus p2, p1 versus p3, p2 versus p3,
etc.).
The returned plot is in a 'matrix' form, with histograms of each individual
parameter on the diagonal, and scatter plots of parameters ``i`` and ``j``
on each entry ``(i, j)`` below the diagonal.
Returns a ``matplotlib`` figure object and axes handle.
Parameters
----------
samples
A list of samples, with shape ``(n_samples, n_parameters)``, where
``n_samples`` is the number of samples in the list and ``n_parameters``
is the number of parameters.
kde
Set to ``True`` to use kernel-density estimation for the
histograms and scatter plots. Cannot use together with ``heatmap``.
heatmap
Set to ``True`` to plot heatmap for the pairwise plots.
Cannot be used together with ``kde``.
Opacity
This value can be used to manually set the opacity of the
points in the scatter plots (when ``kde=False`` and ``heatmap=False``
only).
ref_parameters
A set of parameters for reference in the plot. For example,
if true values of parameters are known, they can be passed in for
plotting.
n_percentiles
Shows only the middle n-th percentiles of the distribution.
Default shows all samples in ``samples``.
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
import warnings
from distutils.version import LooseVersion
# Check matplotlib version
use_old_matplotlib = LooseVersion(matplotlib.__version__) \
< LooseVersion("2.2")
# Check options kde and heatmap
if kde and heatmap:
raise ValueError('Cannot use `kde` and `heatmap` together.')
# Check samples size
try:
n_sample, n_param = samples.shape
except ValueError:
raise ValueError('`samples` must be of shape (n_sample,'
+ ' n_parameters).')
# Check number of parameters
if n_param < 2:
raise ValueError('Number of parameters must be larger than 2.')
# Check reference parameters
if ref_parameters is not None:
if len(ref_parameters) != n_param:
raise ValueError(
'Length of `ref_parameters` must be same as number of'
' parameters.')
# Create figure
fig_size = (3 * n_param, 3 * n_param)
fig, axes = plt.subplots(n_param, n_param, figsize=fig_size)
bins = 25
for i in range(n_param):
for j in range(n_param):
if i == j:
# Diagonal: Plot a histogram
if n_percentiles is None:
xmin, xmax = np.min(samples[:, i]), np.max(samples[:, i])
else:
xmin = np.percentile(samples[:, i],
50 - n_percentiles / 2.)
xmax = np.percentile(samples[:, i],
50 + n_percentiles / 2.)
xbins = np.linspace(xmin, xmax, bins)
axes[i, j].set_xlim(xmin, xmax)
if use_old_matplotlib: # pragma: no cover
axes[i, j].hist(samples[:, i], bins=xbins, normed=True)
else:
axes[i, j].hist(samples[:, i], bins=xbins, density=True)
# Add kde plot
if kde:
x = np.linspace(xmin, xmax, 100)
axes[i, j].plot(x, stats.gaussian_kde(samples[:, i])(x))
# Add reference parameters if given
if ref_parameters is not None:
ymin_tv, ymax_tv = axes[i, j].get_ylim()
axes[i, j].plot(
[ref_parameters[i], ref_parameters[i]],
[0.0, ymax_tv],
'--', c='k')
elif i < j:
# Top-right: no plot
axes[i, j].axis('off')
else:
# Lower-left: Plot the samples as density map
if n_percentiles is None:
xmin, xmax = np.min(samples[:, j]), np.max(samples[:, j])
ymin, ymax = np.min(samples[:, i]), np.max(samples[:, i])
else:
xmin = np.percentile(samples[:, j],
50 - n_percentiles / 2.)
xmax = np.percentile(samples[:, j],
50 + n_percentiles / 2.)
ymin = np.percentile(samples[:, i],
50 - n_percentiles / 2.)
ymax = np.percentile(samples[:, i],
50 + n_percentiles / 2.)
axes[i, j].set_xlim(xmin, xmax)
axes[i, j].set_ylim(ymin, ymax)
if not kde and not heatmap:
# Create scatter plot
# Determine point opacity
num_points = len(samples[:, i])
if opacity is None:
if num_points < 10:
opacity = 1.0
else:
opacity = 1.0 / np.log10(num_points)
# Scatter points
axes[i, j].scatter(
samples[:, j], samples[:, i], alpha=opacity, s=0.1)
elif kde:
# Create a KDE-based plot
# Plot values
values = np.vstack([samples[:, j], samples[:, i]])
axes[i, j].imshow(
np.rot90(values), cmap=plt.cm.Blues,
extent=[xmin, xmax, ymin, ymax])
# Create grid
xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([xx.ravel(), yy.ravel()])
# Get kernel density estimate and plot contours
kernel = stats.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)
axes[i, j].contourf(xx, yy, f, cmap='Blues')
axes[i, j].contour(xx, yy, f, colors='k')
# Force equal aspect ratio
# Matplotlib raises a warning here (on 2.7 at least)
# We can't do anything about it, so no other option than
# to suppress it at this stage...
with warnings.catch_warnings():
warnings.simplefilter('ignore', UnicodeWarning)
axes[i, j].set_aspect((xmax - xmin) / (ymax - ymin))
elif heatmap:
# Create a heatmap-based plot
# Create bins
xbins = np.linspace(xmin, xmax, bins)
ybins = np.linspace(ymin, ymax, bins)
# Plot heatmap
axes[i, j].hist2d(samples[:, j], samples[:, i],
bins=(xbins, ybins), cmap=plt.cm.Blues)
# Force equal aspect ratio
# Matplotlib raises a warning here (on 2.7 at least)
# We can't do anything about it, so no other option than
# to suppress it at this stage...
with warnings.catch_warnings():
warnings.simplefilter('ignore', UnicodeWarning)
axes[i, j].set_aspect((xmax - xmin) / (ymax - ymin))
# Add reference parameters if given
if ref_parameters is not None:
axes[i, j].plot(
[ref_parameters[j], ref_parameters[j]],
[ymin, ymax],
'--', c='k')
axes[i, j].plot(
[xmin, xmax],
[ref_parameters[i], ref_parameters[i]],
'--', c='k')
# Set tick labels
if i < n_param - 1:
# Only show x tick labels for the last row
axes[i, j].set_xticklabels([])
else:
# Rotate the x tick labels to fit in the plot
for tl in axes[i, j].get_xticklabels():
tl.set_rotation(45)
if j > 0:
# Only show y tick labels for the first column
axes[i, j].set_yticklabels([])
# Set axis labels
axes[-1, i].set_xlabel('Parameter %d' % (i + 1))
if i == 0:
# The first one is not a parameter
axes[i, 0].set_ylabel('Frequency')
else:
axes[i, 0].set_ylabel('Parameter %d' % (i + 1))
return fig, axes
| [
"[email protected]"
] | |
51e2d060a963f7a2fab9bc18b2a90a3b1995e848 | 750ab8655c293ead5f835a70bafccf1994f6e232 | /jade/run_manager/wandb.py | 4a8ea83f1caef81da54544cca86acfd2965b4b57 | [
"MIT"
] | permissive | mfederici/JADE | ae1281d131d073832b2846d10c548bf69b244071 | 9a581af29085d80ba2fb5292a9836b5c241aadfc | refs/heads/master | 2023-06-08T22:53:44.261365 | 2021-07-05T10:03:28 | 2021-07-05T10:03:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,898 | py | import wandb
import os
from jade.run_manager.base import RunManager, BACKUP_NAME, CODE_DIR, flatten_config, inflate_config
import matplotlib.pyplot as plt
class WANDBRunManager(RunManager):
def __init__(self, config=None, run_name=None, run_id=None, verbose=False,
code_dir=CODE_DIR, username=None, project=None,
wandb_dir=None, wandb_verbose=False):
self.verbose = verbose
if not wandb_verbose:
os.environ["WANDB_SILENT"] = "true"
if config is None and run_id is None:
raise Exception('Please specify an existing run_id or a configuration for a new run')
if not (run_id is None) and self.verbose and not (config is None):
print('Warning: the specified configuration will be overwritten by the one of the specified run_id')
if 'WANDB_PROJECT' in os.environ and project is None:
self.PROJECT = os.environ['WANDB_PROJECT']
elif not (project is None):
self.PROJECT = project
else:
raise Exception(
'In order to use the wandb framework the environment variable WANDB_PROJECT needs to be set')
if 'WANDB_USER' in os.environ and username is None:
self.USER = os.environ['WANDB_USER']
elif not (username is None):
self.USER = username
else:
raise Exception('In order to use the wandb framework the environment variable WANDB_USER needs to be set')
if 'WANDB_DIR' in os.environ and wandb_dir is None:
wandb_dir = os.environ['WANDB_DIR']
if verbose:
print('Weights and Biases root directory: %s' % wandb_dir)
# Instantiate api
self.api = wandb.Api()
run_exists = self.run_exists(run_id)
# If the run exists read the configuration
if run_exists:
config = self.read_config(run_id)
print('Resuming run %s' % run_id)
# Initialize wandb with the flattened configuration
flat_config = flatten_config(config) # wandb can't process nested dictionaries
resume = run_exists
wandb.init(name=run_name, project=self.PROJECT, config=flat_config, dir=wandb_dir,
resume=resume, id=(run_id if run_exists else None), save_code=False)
# Use the wandb config as the new one
flat_config = dict(wandb.config)
config = inflate_config(flat_config)
# And save the run_object
self.wandb_run = wandb.run
# If resuming a run, download the run code and set it as the code directory
if resume:
self.download_code(self.wandb_run.dir)
code_dir = os.path.join(self.wandb_run.dir, CODE_DIR)
super(WANDBRunManager, self).__init__(run_name=run_name, run_id=self.wandb_run.id, run_dir=self.wandb_run.dir,
config=config, resume=resume, verbose=verbose,
code_dir=code_dir)
def run_exists(self, run_id):
if run_id is None:
success = False
else:
try:
run = self.api.run('%s/%s/%s' % (self.USER, self.PROJECT, run_id))
success = True
if self.verbose:
print('Run %s/%s/%s has been found' % (self.USER, self.PROJECT, run_id))
except Exception as e:
print(e)
success = False
return success
def download_code(self, download_dir):
run = self.api.run('%s/%s/%s' % (self.USER, self.PROJECT, self.wandb_run.id))
for file in run.files():
if file.name.endswith('.py'):
file.download(download_dir, replace=True)
if self.verbose:
print('Downloading the code for %s in %s' % (file.name, download_dir))
def read_config(self, run_id):
run = self.api.run('%s/%s/%s' % (self.USER, self.PROJECT, run_id))
return inflate_config(run.config)
def download_checkpoint(self, checkpoint_file, download_path=None):
if self.verbose:
print("Dowloading the checkpoint: %s" % checkpoint_file)
run = self.api.run('%s/%s/%s' % (self.USER, self.PROJECT, self.run_id))
if download_path is None:
download_path = os.path.join(self.wandb_run.dir)
run.file(checkpoint_file).download(download_path, replace=True)
return os.path.join(download_path, checkpoint_file)
def load_checkpoint(self, trainer, checkpoint_file, device='cpu'):
file_path = self.download_checkpoint(checkpoint_file)
if self.verbose:
print("Resuming Training")
trainer.load(file_path, device=device)
if self.verbose:
print("Resuming Training from iteration %d" % trainer.model.iterations)
return trainer
def load_model(self, model, checkpoint_file, device='cpu'):
file_path = self.download_checkpoint(checkpoint_file)
if self.verbose:
print("Resuming Training")
model.load(file_path, device=device)
if self.verbose:
print("Resuming Training from iteration %d" % model.iterations)
return model
def load_last_trainer(self, trainer, device='cpu'):
return self.load_checkpoint(trainer, BACKUP_NAME, device=device)
def load_last_model(self, model, device='cpu'):
return self.load_model(model, BACKUP_NAME, device=device)
def log(self, name, value, type, iteration):
if type == 'scalar':
wandb.log({name: value}, step=iteration)
elif type == 'scalars':
for sub_name, v in value.items():
wandb.log({'%s/%s' % (name, sub_name): v}, step=iteration)
elif type == 'figure':
wandb.log({name: wandb.Image(value)}, step=iteration)
plt.close(value)
else:
raise Exception('Type %s is not recognized by WandBLogWriter' % type)
def make_checkpoint(self, trainer, force_upload=False):
super(WANDBRunManager, self).make_checkpoint(trainer)
if force_upload:
checkpoint_filename = os.path.join(self.run_dir, 'checkpoint_%d.pt' % trainer.model.iterations)
wandb.save(checkpoint_filename, base_path=self.run_dir)
def make_backup(self, trainer, force_upload=False):
super(WANDBRunManager, self).make_backup(trainer)
if force_upload:
model_filename = os.path.join(self.run_dir, BACKUP_NAME)
wandb.save(model_filename, base_path=self.run_dir)
def checkpoint_list(self):
checkpoints = []
run = self.api.run('%s/%s/%s' % (self.USER, self.PROJECT, self.wandb_run.id))
for file in run.files():
if file.name.endswith('.pt'):
checkpoints.append(file.name)
return checkpoints
| [
"[email protected]"
] | |
6c5e9965271a443300889d89cd4030b10c29cfee | f290130f79bcccc3f8881707766f7833d297d9f8 | /configsetup.py | 82f5f2388ed10d127a2f44be831613a7c9e22326 | [
"MIT"
] | permissive | BunniwuvsPoni/Python-Reddit_Submission_Analysis | 906e54beccef7779d1b88ebffd3e8887f4dbcd51 | 12c41ff09954e14852363776d31100dae9d47c09 | refs/heads/master | 2023-03-09T12:39:55.949656 | 2021-02-13T23:56:13 | 2021-02-13T23:56:13 | 338,662,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | # .gitignore should include reference to config.py
# configsetup.py should be duplicated to config.py to take effect
client_id = "personal use script"
secret_token = "secret token"
username = "username"
password = "password"
botname = "botname"
subreddits = ["subreddit1","subreddit2"] | [
"[email protected]"
] | |
fab4ea867b286fdb37f34d77e6e1491c80492515 | e038ce1696e7903b7a08c8644ab9a196aeb4f017 | /arangoProject/arango/__init__.py | 3cbb0c8256316a04572f24e93026bb980dd4955d | [] | no_license | Sunghyeok93/Flask_project | c351667db6b3f8337c022314b67619159f599a76 | 65eae7f8459833f7b2e25f823ea09dfb087f113c | refs/heads/master | 2021-04-15T03:36:14.726083 | 2018-05-17T03:09:58 | 2018-05-17T03:09:58 | 126,173,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,374 | py | from flask import Blueprint, request, render_template, jsonify, session, json, Response
from arango.model.users import *
from . import connection, dbResponse
blueprint = Blueprint('_arango', __name__)
arango = connection.arangoConnect()
dbresponse = dbResponse.arangoResponse()
@blueprint.route('/login')
def login():
session['ID'] = 'asdasd'
return render_template('index.html')
@blueprint.route('/')
def hello_world():
return render_template('main.html')
# 세션 생성 성공시 status 200, 새션 성공 실패(이미 세션 존재) status 401 리턴
@blueprint.route('/addSession', methods=["POST"])
def addSession():
json = request.form
session_id = json['data[email]']
if not 'ID' in session :
print('connect new session : '+ session_id )
session['ID'] = session_id
return dbresponse.statusResponse(responseStatus=200)
else :
print('already in session : ' + session['ID'])
return dbresponse.statusResponse(responseStatus=401)
# 성공적으로 세션 해제시 status 200, 세션 해제 실패(세션이 이미 존재하지않음) status 401 리턴
@blueprint.route('/deleteSession', methods=["POST"])
def deleteSession():
if not 'ID' in session :
return dbresponse.statusResponse(responseStatus=401)
else :
print(session['ID'])
session.clear()
return dbresponse.statusResponse(responseStatus=200)
@blueprint.route('/create/user', methods=["POST"])
def createUser():
if not 'ID' in session :
return dbresponse.statusResponse(responseStatus=401)
else :
json = request.form
email = json['data[email]']
name = json['data[name]']
pwd = json['data[pwd]']
desc = json['data[desc]']
if arango.addVertex('Users', {"_key": email, "name": name, "pwd": pwd, "desc": desc}) :
return dbresponse.statusResponse(responseStatus=200)
else :
return dbresponse.statusResponse(responseStatus=400)
@blueprint.route('/confirm/pwd', methods=["POST"])
def confirmPwd():
if not 'ID' in session :
return dbresponse.statusResponse(responseStatus=401)
else :
json = request.form
pwd = json['data[pwd]']
if arango.checkAttribute(collection='Users', document=session['ID'], attribute='pwd', checkValue=pwd ) :
return dbresponse.statusResponse(responseStatus=200)
else :
return dbresponse.statusResponse(responseStatus=400)
@blueprint.route('/return/user', methods=["POST"])
def returnUser():
if not 'ID' in session :
return dbresponse.statusResponse(responseStatus=401)
else :
try :
document = arango.returnDocument(collection='Users', document=session['ID'])
return dbresponse.userResponse(email=document['_key'], name=document['name'], pwd=document['pwd'], desc=document['desc'], responseStatus=200)
except :
return dbresponse.statusResponse(responseStatus=400)
#미완성 잼 ...
@blueprint.route('/edit/user', methods=["POST"])
def editUser():
arango.patchDocument(collection='Users', document='sdsdd', jsonValue={})
return dbresponse.statusResponse(responseStatus=404)
#if not 'ID' in session :
# return dbresponse.statusResponse(responseStatus=401)
#else :
# json = request.form
# value = {
# 'name' : json['data[name]'],
# 'pwd' : json['data[pwd'],
# 'desc' : json['data[desc']
# }
# return dbresponse.statusResponse(responseStatus=404)
@blueprint.route('/delete/user', methods=["POST"])
def deleteUser():
if not 'ID' in session :
return dbresponse.statusResponse(responseStatus=401)
else :
if arango.removeVertex('Users', session['ID']) :
del session['ID']
return dbresponse.statusResponse(responseStatus=200)
else :
return dbresponse.statusResponse(responseStatus=400)
@blueprint.route('/send/friend', methods=["POST"])
def sendFriend():
if not 'ID' in session :
return dbresponse.statusResponse(responseStatus=401)
else :
json = request.form
friendEmail = json['data[email]']
value = arango.addRelation(edgeCollection='Friends', _fromCollection='Users', _fromVertex=session['ID'], _toCollection='Users', _toVertex=friendEmail, content={"_key" : session['ID']+friendEmail})
if value == 1 : # 성공
return dbresponse.statusResponse(responseStatus=200)
elif value == 2 : # 이미 존재하는 relation
return dbresponse.statusResponse(responseStatus=400)
else : # 없는 email
return dbresponse.statusResponse(responseStatus=404)
@blueprint.route('/cancel/friend', methods=["POST"])
def cancelFriend():
if not 'ID' in session :
return dbresponse.statusResponse(responseStatus=401)
else :
json = request.form
friendEmail = json['data[email]']
if not arango.hasVertex(vertexCollection='Users', vertexId=friendEmail) : # 없는 이메일 일 경우
return dbresponse.statusResponse(responseStatus=404)
value = arango.removeRelation(edgeCollection='Friends', edgeKey=session['ID']+friendEmail)
if value == 1: # 성공
return dbresponse.statusResponse(responseStatus=200)
elif value ==2 : # 존재하지않는 친구 연결(cancel할게 없음)
return dbresponse.statusResponse(responseStatus=402)
else : # 이유모르는 실패
return dbresponse.statusResponse(responseStatus=400)
@blueprint.route('/accept/friend', methods=["POST"])
def acceptFriend():
if not 'ID' in session :
return dbresponse.statusResponse(responseStatus=401)
else :
json = request.form
friendEmail = json['data[email]']
relation = arango.isFriend(myKey=session['ID'], friendKey=friendEmail)
if relation == 0: # 이미 둘은 친구사이
return dbresponse.statusResponse(responseStatus=403)
elif relation == 2 :
value = arango.addRelation(edgeCollection='Friends', _fromCollection='Users', _fromVertex=session['ID'],
_toCollection='Users', _toVertex=friendEmail,
content={"_key": session['ID'] + friendEmail})
if value == 1 :# 친구 요청 수락 성공
return dbresponse.statusResponse(responseStatus=200)
else :
return dbresponse.statusResponse(responseStatus=400)
else : # 친구 요청이 없었을 경우
return dbresponse.statusResponse(responseStatus=402)
@blueprint.route('/decline/friend', methods=["POST"])
def declineFriend():
if not 'ID' in session :
return dbresponse.statusResponse(responseStatus=401)
else :
json = request.form
friendEmail = json['data[email]']
relation = arango.isFriend(myKey=session['ID'], friendKey=friendEmail)
if relation == 0 : # 이미 둘은 친구사이
return dbresponse.statusResponse(responseStatus=403)
elif relation == 2 :
value = arango.removeRelation(edgeCollection='Friends', edgeKey=friendEmail+session['ID'])
if value == 1 : # 성공적인 친구 요청 거절
return dbresponse.statusResponse(responseStatus=200)
else :
return dbresponse.statusResponse(responseStatus=400)
else : # 존재하지 않는 친구 요청
return dbresponse.statusResponse(responseStatus=402)
@blueprint.route('/delete/friend', methods=["POST"])
def deleteFriend():
if not 'ID' in session :
return dbresponse.statusResponse(responseStatus=401)
else :
json = request.form
friendEmail = json['data[email]']
value = arango.isFriend(myKey=session['ID'], friendKey=friendEmail)
if value == 0 :
arango.removeRelation(edgeCollection='Friends', edgeKey=session['ID'] + friendEmail )
arango.removeRelation(edgeCollection='Friends', edgeKey=friendEmail + session['ID'])
return dbresponse.statusResponse(responseStatus=200)
else : # 둘이서로 친구 관계가 아니거나 없는 email
return dbresponse.statusResponse(responseStatus=400)
@blueprint.route('/search/user', methods=["POST"])
def searchUser():
if not 'ID' in session :
return dbresponse.statusResponse(responseStatus=401)
else :
json = request.form
searchEmail = json['data[email]']
list = arango.searchUser(searchEmail)
return dbresponse.searchUserResponse(userList=list, responseStatus=200)
@blueprint.route('/list/friend', methods=["POST"])
def listFriend():
if not 'ID' in session :
return dbresponse.statusResponse(responseStatus=401)
else :
list = arango.listFriend(session['ID'])
return dbresponse.listFriendResponse(friendList=list, responseStatus=200)
@blueprint.route('/addRelation', methods=['post'])
def addRelation():
_fromVertex = {"name":"from", "_key" : "from"}
_toVertex = {"name": "to", "_key": "to"}
arango.addRelation('Members', 'Users', 'from', 'Groups', 'to', {"hello":"man"})
return "addRelation"
@blueprint.route('/removeRelation', methods=['post'])
def removeRelation():
arango.removeRelation('Members','197796')
return "removeRelation"
@blueprint.route('/returnFriends', methods=["POST"])
def returnFriends():
json = request.form
userName = json['data[name]']
#result = arango.returnVertex('Users', userName)
result = arango.getEdges('Users', 'in', 'Yuha')
if result == False :
return jsonify(returnFriends=False, username=userName)
else :
return jsonify(returnFriends=False, username=userName)
@blueprint.route('/addFriend', methods=["POST"])
def addFriend():
json = request.form
userName1 = json['data[name1]']
userName2 = json['data[name2]']
result = arango.addRelation('Friends','Users', userName1, 'Users', userName2, { "_key" : userName1+userName2} )
if result == False :
return jsonify(addFriend=False, username1=userName1, username2=userName2)
else :
return jsonify(addFriend=True, username1=userName1, username2=userName2)
@blueprint.route('/addProject', methods=["POST"])
def addProject():
json = request.form
projectName = json['data[name]']
result = arango.addVertex('Projects', { "_key" : projectName })
if result == False:
return jsonify(addProject=False, projectname=projectName)
else:
return jsonify(addProject=True, username=projectName)
@blueprint.route('/joinProject', methods=["POST"])
def joinProject():
json = request.form
projectName = json['data[project]']
userName = json['data[user]']
author = json['data[author]']
result = arango.addRelation('Members', 'Users', userName, 'Projects', projectName, {"_key":userName+projectName, "position":author})
if result == False:
return jsonify(joinProject=False, projectname=projectName,username = userName, author=author)
else:
return jsonify(joinProject=True, projectname=projectName, username=userName, author=author)
# ralation에 대한 _key Attribute 네이밍 정하기
# hasUser, hasProject, 내 친구들, 내 프로젝트 | [
"[email protected]"
] | |
4f93eb5a8ca11eb43e6903d4e1ccd3e91447737d | d2fe0085b52506733b72229cd1b851bfbdbfeb1f | /mean_var_std.py | b736ca0969bbde30a669f565be16ad47e4505ec5 | [] | no_license | maheshdbabar9340/Mean-Variance-Standard_Deviation_Calculator | 1b62a3203e3d20c7252f18ec5d7d05a8debb9388 | cffbc2974ead977b53fc0e6d65669b36fe7eae47 | refs/heads/main | 2023-05-12T15:48:00.725344 | 2021-06-03T10:51:40 | 2021-06-03T10:51:40 | 373,472,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | import numpy as np
def calculate(list):
if len(list) != 9:
raise ValueError("List must contain nine numbers.")
array = np.array(list).reshape((3, 3))
calculations = {
"mean": [
np.mean(array, axis = 0).tolist(),
np.mean(array, axis = 1).tolist(),
np.mean(array.tolist())
],
"variance": [
np.var(array, axis = 0).tolist(),
np.var(array, axis = 1).tolist(),
np.var(array) .tolist()
],
"standard deviation": [
np.std(array, axis = 0).tolist(),
np.std(array, axis = 1).tolist(),
np.std(array).tolist()
],
"max": [
np.max(array, axis = 0).tolist(),
np.max(array, axis = 1).tolist(),
np.max(array).tolist()
],
"min": [
np.min(array, axis = 0).tolist(),
np.min(array, axis = 1).tolist(),
np.min(array).tolist()
],
"sum": [
np.sum(array, axis = 0).tolist(),
np.sum(array, axis = 1).tolist(),
np.sum(array).tolist()
],
}
return calculations | [
"[email protected]"
] | |
587d3ccd32a63d4533cfe8bd0631d0c543135cd9 | bbb88937e81b29596ddc42b525bbb05fb2e55a48 | /adjutant/tasks/__init__.py | 73863576ecc3c406aa7f95ca0fb66f8c8b5c7450 | [
"Apache-2.0"
] | permissive | openstack/adjutant | f665ee6d903b560684b23462abd4c4c135863767 | df8c3e4a8b70be8697ac46f0acec9169752b4698 | refs/heads/master | 2023-09-03T11:43:39.117103 | 2022-12-07T03:27:10 | 2023-01-04T07:24:00 | 94,869,844 | 23 | 12 | Apache-2.0 | 2021-01-16T06:09:30 | 2017-06-20T08:49:01 | Python | UTF-8 | Python | false | false | 630 | py | # Copyright (C) 2019 Catalyst IT Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
TASK_CLASSES = {}
| [
"[email protected]"
] | |
5cd20bb33eb53e0632fbff667e2f70753a58513d | 35849876e426848b6985043d4a1537f4284d90eb | /icode121/dev.py | 2448218d7cc830a4797c252a4613f7968ec244fe | [] | no_license | P79N6A/icode | e2d0a982f0bd8462a5035ff802615ba961f1df22 | d13a5fcd744080099cb514d77d703384cea25ff9 | refs/heads/master | 2020-04-26T23:05:53.998314 | 2019-03-05T06:56:36 | 2019-03-05T06:56:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,373 | py | from .settings import *
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS += [
'app.artical',
'app.comment',
'app.course',
'app.user',
'app.other',
'app.activity',
'debug_toolbar',
'rest_framework',
'rest_framework.authtoken',
'rest_framework_swagger',
'django_filters',
'import_export',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Django Rest Framework
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',
],
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
MIDDLEWARE += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
| [
"[email protected]"
] | |
e591fa3628ca4a4f93b25edfbf537c50c7d91cc0 | ea17f5e7e3cfe51198bb014db152d22df827a998 | /variable_delay/third_party/mininet/node.py | 28c7db052841156d69f1f76e06cedcae849015ad | [
"LicenseRef-scancode-x11-stanford"
] | permissive | JerryLX/CoCo-beholder | e3bb15954c66e87fd209820cddea1adadc89af99 | 0c6698fcbf3134ae167e8f10a7b631b34957b726 | refs/heads/main | 2023-04-19T08:13:48.205838 | 2021-04-18T13:38:09 | 2021-04-18T13:38:09 | 357,019,424 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 61,673 | py | """
Node objects for Mininet.
Nodes provide a simple abstraction for interacting with hosts, switches
and controllers. Local nodes are simply one or more processes on the local
machine.
Node: superclass for all (primarily local) network nodes.
Host: a virtual host. By default, a host is simply a shell; commands
may be sent using Cmd (which waits for output), or using sendCmd(),
which returns immediately, allowing subsequent monitoring using
monitor(). Examples of how to run experiments using this
functionality are provided in the examples/ directory. By default,
hosts share the root file system, but they may also specify private
directories.
CPULimitedHost: a virtual host whose CPU bandwidth is limited by
RT or CFS bandwidth limiting.
Switch: superclass for switch nodes.
UserSwitch: a switch using the user-space switch from the OpenFlow
reference implementation.
OVSSwitch: a switch using the Open vSwitch OpenFlow-compatible switch
implementation (openvswitch.org).
OVSBridge: an Ethernet bridge implemented using Open vSwitch.
Supports STP.
IVSSwitch: OpenFlow switch using the Indigo Virtual Switch.
Controller: superclass for OpenFlow controllers. The default controller
is controller(8) from the reference implementation.
OVSController: The test controller from Open vSwitch.
NOXController: a controller node using NOX (noxrepo.org).
Ryu: The Ryu controller (https://osrg.github.io/ryu/)
RemoteController: a remote controller node, which may use any
arbitrary OpenFlow-compatible controller, and which is not
created or managed by Mininet.
Future enhancements:
- Possibly make Node, Switch and Controller more abstract so that
they can be used for both local and remote nodes
- Create proxy objects for remote nodes (Mininet: Cluster Edition)
"""
import os
import pty
import re
import signal
import select
from subprocess import Popen, PIPE
from time import sleep
from variable_delay.third_party.mininet.log import info, error, warn, debug
from variable_delay.third_party.mininet.util import ( quietRun, errRun, errFail, moveIntf, isShellBuiltin,
numCores, retry, mountCgroups, BaseString, decode,
encode, Python3, which )
from variable_delay.third_party.mininet.moduledeps import moduleDeps, pathCheck, TUN
from variable_delay.third_party.mininet.link import Link, Intf, TCIntf, OVSIntf
from re import findall
from distutils.version import StrictVersion
class Node( object ):
"""A virtual network node is simply a shell in a network namespace.
We communicate with it using pipes."""
portBase = 0 # Nodes always start with eth0/port0, even in OF 1.0
def __init__( self, name, inNamespace=True, **params ):
"""name: name of node
inNamespace: in network namespace?
privateDirs: list of private directory strings or tuples
params: Node parameters (see config() for details)"""
# Make sure class actually works
self.checkSetup()
self.name = params.get( 'name', name )
self.privateDirs = params.get( 'privateDirs', [] )
self.inNamespace = params.get( 'inNamespace', inNamespace )
# Python 3 complains if we don't wait for shell exit
self.waitExited = params.get( 'waitExited', Python3 )
# Stash configuration parameters for future reference
self.params = params
self.intfs = {} # dict of port numbers to interfaces
self.ports = {} # dict of interfaces to port numbers
# replace with Port objects, eventually ?
self.nameToIntf = {} # dict of interface names to Intfs
# Make pylint happy
( self.shell, self.execed, self.pid, self.stdin, self.stdout,
self.lastPid, self.lastCmd, self.pollOut ) = (
None, None, None, None, None, None, None, None )
self.waiting = False
self.readbuf = ''
# Start command interpreter shell
self.master, self.slave = None, None # pylint
self.startShell()
self.mountPrivateDirs()
# File descriptor to node mapping support
# Class variables and methods
inToNode = {} # mapping of input fds to nodes
outToNode = {} # mapping of output fds to nodes
@classmethod
def fdToNode( cls, fd ):
"""Return node corresponding to given file descriptor.
fd: file descriptor
returns: node"""
node = cls.outToNode.get( fd )
return node or cls.inToNode.get( fd )
# Command support via shell process in namespace
def startShell( self, mnopts=None ):
"Start a shell process for running commands"
if self.shell:
error( "%s: shell is already running\n" % self.name )
return
# vdlocalmnexec: (c)lose descriptors, (d)etach from tty,
# (p)rint pid, and run in (n)amespace
opts = '-cd' if mnopts is None else mnopts
if self.inNamespace:
opts += 'n'
# bash -i: force interactive
# -s: pass $* to shell, and make process easy to find in ps
# prompt is set to sentinel chr( 127 )
cmd = [ 'vdlocalmnexec', opts, 'env', 'PS1=' + chr( 127 ),
'bash', '--norc', '--noediting',
'-is', 'mininet:' + self.name ]
# Spawn a shell subprocess in a pseudo-tty, to disable buffering
# in the subprocess and insulate it from signals (e.g. SIGINT)
# received by the parent
self.master, self.slave = pty.openpty()
self.shell = self._popen( cmd, stdin=self.slave, stdout=self.slave,
stderr=self.slave, close_fds=False )
# XXX BL: This doesn't seem right, and we should also probably
# close our files when we exit...
self.stdin = os.fdopen( self.master, 'r' )
self.stdout = self.stdin
self.pid = self.shell.pid
self.pollOut = select.poll()
self.pollOut.register( self.stdout )
# Maintain mapping between file descriptors and nodes
# This is useful for monitoring multiple nodes
# using select.poll()
self.outToNode[ self.stdout.fileno() ] = self
self.inToNode[ self.stdin.fileno() ] = self
self.execed = False
self.lastCmd = None
self.lastPid = None
self.readbuf = ''
# Wait for prompt
while True:
data = self.read( 1024 )
if data[ -1 ] == chr( 127 ):
break
self.pollOut.poll()
self.waiting = False
# +m: disable job control notification
self.cmd( 'unset HISTFILE; stty -echo; set +m' )
def mountPrivateDirs( self ):
"mount private directories"
# Avoid expanding a string into a list of chars
assert not isinstance( self.privateDirs, BaseString )
for directory in self.privateDirs:
if isinstance( directory, tuple ):
# mount given private directory
privateDir = directory[ 1 ] % self.__dict__
mountPoint = directory[ 0 ]
self.cmd( 'mkdir -p %s' % privateDir )
self.cmd( 'mkdir -p %s' % mountPoint )
self.cmd( 'mount --bind %s %s' %
( privateDir, mountPoint ) )
else:
# mount temporary filesystem on directory
self.cmd( 'mkdir -p %s' % directory )
self.cmd( 'mount -n -t tmpfs tmpfs %s' % directory )
def unmountPrivateDirs( self ):
"mount private directories"
for directory in self.privateDirs:
if isinstance( directory, tuple ):
self.cmd( 'umount ', directory[ 0 ] )
else:
self.cmd( 'umount ', directory )
def _popen( self, cmd, **params ):
"""Internal method: spawn and return a process
cmd: command to run (list)
params: parameters to Popen()"""
# Leave this is as an instance method for now
assert self
popen = Popen( cmd, **params )
debug( '_popen', cmd, popen.pid )
return popen
def cleanup( self ):
"Help python collect its garbage."
# We used to do this, but it slows us down:
# Intfs may end up in root NS
# for intfName in self.intfNames():
# if self.name in intfName:
# quietRun( 'ip link del ' + intfName )
if self.shell:
# Close ptys
self.stdin.close()
os.close(self.slave)
if self.waitExited:
debug( 'waiting for', self.pid, 'to terminate\n' )
self.shell.wait()
self.shell = None
# Subshell I/O, commands and control
def read( self, maxbytes=1024 ):
"""Buffered read from node, potentially blocking.
maxbytes: maximum number of bytes to return"""
count = len( self.readbuf )
if count < maxbytes:
data = decode( os.read( self.stdout.fileno(), maxbytes - count ) )
self.readbuf += data
if maxbytes >= len( self.readbuf ):
result = self.readbuf
self.readbuf = ''
else:
result = self.readbuf[ :maxbytes ]
self.readbuf = self.readbuf[ maxbytes: ]
return result
def readline( self ):
"""Buffered readline from node, potentially blocking.
returns: line (minus newline) or None"""
self.readbuf += self.read( 1024 )
if '\n' not in self.readbuf:
return None
pos = self.readbuf.find( '\n' )
line = self.readbuf[ 0: pos ]
self.readbuf = self.readbuf[ pos + 1: ]
return line
def write( self, data ):
"""Write data to node.
data: string"""
os.write( self.stdin.fileno(), encode( data ) )
def terminate( self ):
"Send kill signal to Node and clean up after it."
self.unmountPrivateDirs()
if self.shell:
if self.shell.poll() is None:
os.killpg( self.shell.pid, signal.SIGHUP )
self.cleanup()
def stop( self, deleteIntfs=False ):
"""Stop node.
deleteIntfs: delete interfaces? (False)"""
if deleteIntfs:
self.deleteIntfs()
self.terminate()
def waitReadable( self, timeoutms=None ):
"""Wait until node's output is readable.
timeoutms: timeout in ms or None to wait indefinitely.
returns: result of poll()"""
if len( self.readbuf ) == 0:
return self.pollOut.poll( timeoutms )
def sendCmd( self, *args, **kwargs ):
"""Send a command, followed by a command to echo a sentinel,
and return without waiting for the command to complete.
args: command and arguments, or string
printPid: print command's PID? (False)"""
assert self.shell and not self.waiting
printPid = kwargs.get( 'printPid', False )
# Allow sendCmd( [ list ] )
if len( args ) == 1 and isinstance( args[ 0 ], list ):
cmd = args[ 0 ]
# Allow sendCmd( cmd, arg1, arg2... )
elif len( args ) > 0:
cmd = args
# Convert to string
if not isinstance( cmd, str ):
cmd = ' '.join( [ str( c ) for c in cmd ] )
if not re.search( r'\w', cmd ):
# Replace empty commands with something harmless
cmd = 'echo -n'
self.lastCmd = cmd
# if a builtin command is backgrounded, it still yields a PID
if len( cmd ) > 0 and cmd[ -1 ] == '&':
# print ^A{pid}\n so monitor() can set lastPid
cmd += ' printf "\\001%d\\012" $! '
elif printPid and not isShellBuiltin( cmd ):
cmd = 'vdlocalmnexec -p ' + cmd
self.write( cmd + '\n' )
self.lastPid = None
self.waiting = True
def sendInt( self, intr=chr( 3 ) ):
"Interrupt running command."
debug( 'sendInt: writing chr(%d)\n' % ord( intr ) )
self.write( intr )
def monitor( self, timeoutms=None, findPid=True ):
"""Monitor and return the output of a command.
Set self.waiting to False if command has completed.
timeoutms: timeout in ms or None to wait indefinitely
findPid: look for PID from vdlocalmnexec -p"""
ready = self.waitReadable( timeoutms )
if not ready:
return ''
data = self.read( 1024 )
pidre = r'\[\d+\] \d+\r\n'
# Look for PID
marker = chr( 1 ) + r'\d+\r\n'
if findPid and chr( 1 ) in data:
# suppress the job and PID of a backgrounded command
if re.findall( pidre, data ):
data = re.sub( pidre, '', data )
# Marker can be read in chunks; continue until all of it is read
while not re.findall( marker, data ):
data += self.read( 1024 )
markers = re.findall( marker, data )
if markers:
self.lastPid = int( markers[ 0 ][ 1: ] )
data = re.sub( marker, '', data )
# Look for sentinel/EOF
if len( data ) > 0 and data[ -1 ] == chr( 127 ):
self.waiting = False
data = data[ :-1 ]
elif chr( 127 ) in data:
self.waiting = False
data = data.replace( chr( 127 ), '' )
return data
def waitOutput( self, verbose=False, findPid=True ):
"""Wait for a command to complete.
Completion is signaled by a sentinel character, ASCII(127)
appearing in the output stream. Wait for the sentinel and return
the output, including trailing newline.
verbose: print output interactively"""
log = info if verbose else debug
output = ''
while self.waiting:
data = self.monitor( findPid=findPid )
output += data
log( data )
return output
def cmd( self, *args, **kwargs ):
"""Send a command, wait for output, and return it.
cmd: string"""
verbose = kwargs.get( 'verbose', False )
log = info if verbose else debug
log( '*** %s : %s\n' % ( self.name, args ) )
if self.shell:
self.sendCmd( *args, **kwargs )
return self.waitOutput( verbose )
else:
warn( '(%s exited - ignoring cmd%s)\n' % ( self, args ) )
def cmdPrint( self, *args):
"""Call cmd and printing its output
cmd: string"""
return self.cmd( *args, **{ 'verbose': True } )
def popen( self, *args, **kwargs ):
"""Return a Popen() object in our namespace
args: Popen() args, single list, or string
kwargs: Popen() keyword args"""
defaults = { 'stdout': PIPE, 'stderr': PIPE,
'mncmd':
[ 'vdlocalmnexec', '-da', str( self.pid ) ] }
defaults.update( kwargs )
shell = defaults.pop( 'shell', False )
if len( args ) == 1:
if isinstance( args[ 0 ], list ):
# popen([cmd, arg1, arg2...])
cmd = args[ 0 ]
elif isinstance( args[ 0 ], BaseString ):
# popen("cmd arg1 arg2...")
cmd = [ args[ 0 ] ] if shell else args[ 0 ].split()
else:
raise Exception( 'popen() requires a string or list' )
elif len( args ) > 0:
# popen( cmd, arg1, arg2... )
cmd = list( args )
if shell:
cmd = [ os.environ[ 'SHELL' ], '-c' ] + [ ' '.join( cmd ) ]
# Attach to our namespace using vdlocalmnexec -a
cmd = defaults.pop( 'mncmd' ) + cmd
popen = self._popen( cmd, **defaults )
return popen
def pexec( self, *args, **kwargs ):
"""Execute a command using popen
returns: out, err, exitcode"""
popen = self.popen( *args, stdin=PIPE, stdout=PIPE, stderr=PIPE,
**kwargs )
# Warning: this can fail with large numbers of fds!
out, err = popen.communicate()
exitcode = popen.wait()
return decode( out ), decode( err ), exitcode
# Interface management, configuration, and routing
# BL notes: This might be a bit redundant or over-complicated.
# However, it does allow a bit of specialization, including
# changing the canonical interface names. It's also tricky since
# the real interfaces are created as veth pairs, so we can't
# make a single interface at a time.
def newPort( self ):
"Return the next port number to allocate."
if len( self.ports ) > 0:
return max( self.ports.values() ) + 1
return self.portBase
def addIntf( self, intf, port=None, moveIntfFn=moveIntf ):
"""Add an interface.
intf: interface
port: port number (optional, typically OpenFlow port number)
moveIntfFn: function to move interface (optional)"""
if port is None:
port = self.newPort()
self.intfs[ port ] = intf
self.ports[ intf ] = port
self.nameToIntf[ intf.name ] = intf
debug( '\n' )
debug( 'added intf %s (%d) to node %s\n' % (
intf, port, self.name ) )
if self.inNamespace:
debug( 'moving', intf, 'into namespace for', self.name, '\n' )
moveIntfFn( intf.name, self )
def delIntf( self, intf ):
"""Remove interface from Node's known interfaces
Note: to fully delete interface, call intf.delete() instead"""
port = self.ports.get( intf )
if port is not None:
del self.intfs[ port ]
del self.ports[ intf ]
del self.nameToIntf[ intf.name ]
def defaultIntf( self ):
"Return interface for lowest port"
ports = self.intfs.keys()
if ports:
return self.intfs[ min( ports ) ]
else:
warn( '*** defaultIntf: warning:', self.name,
'has no interfaces\n' )
def intf( self, intf=None ):
"""Return our interface object with given string name,
default intf if name is falsy (None, empty string, etc).
or the input intf arg.
Having this fcn return its arg for Intf objects makes it
easier to construct functions with flexible input args for
interfaces (those that accept both string names and Intf objects).
"""
if not intf:
return self.defaultIntf()
elif isinstance( intf, BaseString):
return self.nameToIntf[ intf ]
else:
return intf
def connectionsTo( self, node):
"Return [ intf1, intf2... ] for all intfs that connect self to node."
# We could optimize this if it is important
connections = []
for intf in self.intfList():
link = intf.link
if link:
node1, node2 = link.intf1.node, link.intf2.node
if node1 == self and node2 == node:
connections += [ ( intf, link.intf2 ) ]
elif node1 == node and node2 == self:
connections += [ ( intf, link.intf1 ) ]
return connections
def deleteIntfs( self, checkName=True ):
"""Delete all of our interfaces.
checkName: only delete interfaces that contain our name"""
# In theory the interfaces should go away after we shut down.
# However, this takes time, so we're better off removing them
# explicitly so that we won't get errors if we run before they
# have been removed by the kernel. Unfortunately this is very slow,
# at least with Linux kernels before 2.6.33
for intf in list( self.intfs.values() ):
# Protect against deleting hardware interfaces
if ( self.name in intf.name ) or ( not checkName ):
intf.delete()
info( '.' )
# Routing support
def setARP( self, ip, mac ):
"""Add an ARP entry.
ip: IP address as string
mac: MAC address as string"""
result = self.cmd( 'arp', '-s', ip, mac )
return result
def setHostRoute( self, ip, intf ):
"""Add route to host.
ip: IP address as dotted decimal
intf: string, interface name"""
return self.cmd( 'route add -host', ip, 'dev', intf )
def setDefaultRoute( self, intf=None ):
"""Set the default route to go through intf.
intf: Intf or {dev <intfname> via <gw-ip> ...}"""
# Note setParam won't call us if intf is none
if isinstance( intf, BaseString ) and ' ' in intf:
params = intf
else:
params = 'dev %s' % intf
# Do this in one line in case we're messing with the root namespace
self.cmd( 'ip route del default; ip route add default', params )
# Convenience and configuration methods
def setMAC( self, mac, intf=None ):
"""Set the MAC address for an interface.
intf: intf or intf name
mac: MAC address as string"""
return self.intf( intf ).setMAC( mac )
def setIP( self, ip, prefixLen=8, intf=None, **kwargs ):
"""Set the IP address for an interface.
intf: intf or intf name
ip: IP address as a string
prefixLen: prefix length, e.g. 8 for /8 or 16M addrs
kwargs: any additional arguments for intf.setIP"""
return self.intf( intf ).setIP( ip, prefixLen, **kwargs )
def IP( self, intf=None ):
"Return IP address of a node or specific interface."
return self.intf( intf ).IP()
def MAC( self, intf=None ):
"Return MAC address of a node or specific interface."
return self.intf( intf ).MAC()
def intfIsUp( self, intf=None ):
"Check if an interface is up."
return self.intf( intf ).isUp()
# The reason why we configure things in this way is so
# That the parameters can be listed and documented in
# the config method.
# Dealing with subclasses and superclasses is slightly
# annoying, but at least the information is there!
def setParam( self, results, method, **param ):
"""Internal method: configure a *single* parameter
results: dict of results to update
method: config method name
param: arg=value (ignore if value=None)
value may also be list or dict"""
name, value = list( param.items() )[ 0 ]
if value is None:
return
f = getattr( self, method, None )
if not f:
return
if isinstance( value, list ):
result = f( *value )
elif isinstance( value, dict ):
result = f( **value )
else:
result = f( value )
results[ name ] = result
return result
def config( self, mac=None, ip=None,
defaultRoute=None, lo='up', **_params ):
"""Configure Node according to (optional) parameters:
mac: MAC address for default interface
ip: IP address for default interface
ifconfig: arbitrary interface configuration
Subclasses should override this method and call
the parent class's config(**params)"""
# If we were overriding this method, we would call
# the superclass config method here as follows:
# r = Parent.config( **_params )
r = {}
self.setParam( r, 'setMAC', mac=mac )
self.setParam( r, 'setIP', ip=ip )
self.setParam( r, 'setDefaultRoute', defaultRoute=defaultRoute )
# This should be examined
self.cmd( 'ifconfig lo ' + lo )
return r
def configDefault( self, **moreParams ):
"Configure with default parameters"
self.params.update( moreParams )
self.config( **self.params )
# This is here for backward compatibility
def linkTo( self, node, link=Link ):
"""(Deprecated) Link to another node
replace with Link( node1, node2)"""
return link( self, node )
# Other methods
def intfList( self ):
"List of our interfaces sorted by port number"
return [ self.intfs[ p ] for p in sorted( self.intfs.keys() ) ]
def intfNames( self ):
"The names of our interfaces sorted by port number"
return [ str( i ) for i in self.intfList() ]
def __repr__( self ):
"More informative string representation"
intfs = ( ','.join( [ '%s:%s' % ( i.name, i.IP() )
for i in self.intfList() ] ) )
return '<%s %s: %s pid=%s> ' % (
self.__class__.__name__, self.name, intfs, self.pid )
def __str__( self ):
"Abbreviated string representation"
return self.name
# Automatic class setup support
isSetup = False
@classmethod
def checkSetup( cls ):
"Make sure our class and superclasses are set up"
while cls and not getattr( cls, 'isSetup', True ):
cls.setup()
cls.isSetup = True
# Make pylint happy
cls = getattr( type( cls ), '__base__', None )
@classmethod
def setup( cls ):
"Make sure our class dependencies are available"
pathCheck( 'vdlocalmnexec', 'ifconfig', moduleName='Mininet')
class Host( Node ):
"A host is simply a Node"
pass
class CPULimitedHost( Host ):
"CPU limited host"
def __init__( self, name, sched='cfs', **kwargs ):
Host.__init__( self, name, **kwargs )
# Initialize class if necessary
if not CPULimitedHost.inited:
CPULimitedHost.init()
# Create a cgroup and move shell into it
self.cgroup = 'cpu,cpuacct,cpuset:/' + self.name
errFail( 'cgcreate -g ' + self.cgroup )
# We don't add ourselves to a cpuset because you must
# specify the cpu and memory placement first
errFail( 'cgclassify -g cpu,cpuacct:/%s %s' % ( self.name, self.pid ) )
# BL: Setting the correct period/quota is tricky, particularly
# for RT. RT allows very small quotas, but the overhead
# seems to be high. CFS has a mininimum quota of 1 ms, but
# still does better with larger period values.
self.period_us = kwargs.get( 'period_us', 100000 )
self.sched = sched
if sched == 'rt':
self.checkRtGroupSched()
self.rtprio = 20
def cgroupSet( self, param, value, resource='cpu' ):
"Set a cgroup parameter and return its value"
cmd = 'cgset -r %s.%s=%s /%s' % (
resource, param, value, self.name )
quietRun( cmd )
nvalue = int( self.cgroupGet( param, resource ) )
if nvalue != value:
error( '*** error: cgroupSet: %s set to %s instead of %s\n'
% ( param, nvalue, value ) )
return nvalue
def cgroupGet( self, param, resource='cpu' ):
"Return value of cgroup parameter"
cmd = 'cgget -r %s.%s /%s' % (
resource, param, self.name )
return int( quietRun( cmd ).split()[ -1 ] )
def cgroupDel( self ):
"Clean up our cgroup"
# info( '*** deleting cgroup', self.cgroup, '\n' )
_out, _err, exitcode = errRun( 'cgdelete -r ' + self.cgroup )
# Sometimes cgdelete returns a resource busy error but still
# deletes the group; next attempt will give "no such file"
return exitcode == 0 or ( 'no such file' in _err.lower() )
def popen( self, *args, **kwargs ):
"""Return a Popen() object in node's namespace
args: Popen() args, single list, or string
kwargs: Popen() keyword args"""
# Tell vdlocalmnexec to execute command in our cgroup
mncmd = kwargs.pop( 'mncmd', [ 'vdlocalmnexec', '-g', self.name,
'-da', str( self.pid ) ] )
# if our cgroup is not given any cpu time,
# we cannot assign the RR Scheduler.
if self.sched == 'rt':
if int( self.cgroupGet( 'rt_runtime_us', 'cpu' ) ) <= 0:
mncmd += [ '-r', str( self.rtprio ) ]
else:
debug( '*** error: not enough cpu time available for %s.' %
self.name, 'Using cfs scheduler for subprocess\n' )
return Host.popen( self, *args, mncmd=mncmd, **kwargs )
def cleanup( self ):
"Clean up Node, then clean up our cgroup"
super( CPULimitedHost, self ).cleanup()
retry( retries=3, delaySecs=.1, fn=self.cgroupDel )
_rtGroupSched = False # internal class var: Is CONFIG_RT_GROUP_SCHED set?
@classmethod
def checkRtGroupSched( cls ):
"Check (Ubuntu,Debian) kernel config for CONFIG_RT_GROUP_SCHED for RT"
if not cls._rtGroupSched:
release = quietRun( 'uname -r' ).strip('\r\n')
output = quietRun( 'grep CONFIG_RT_GROUP_SCHED /boot/config-%s' %
release )
if output == '# CONFIG_RT_GROUP_SCHED is not set\n':
error( '\n*** error: please enable RT_GROUP_SCHED '
'in your kernel\n' )
exit( 1 )
cls._rtGroupSched = True
def chrt( self ):
"Set RT scheduling priority"
quietRun( 'chrt -p %s %s' % ( self.rtprio, self.pid ) )
result = quietRun( 'chrt -p %s' % self.pid )
firstline = result.split( '\n' )[ 0 ]
lastword = firstline.split( ' ' )[ -1 ]
if lastword != 'SCHED_RR':
error( '*** error: could not assign SCHED_RR to %s\n' % self.name )
return lastword
def rtInfo( self, f ):
"Internal method: return parameters for RT bandwidth"
pstr, qstr = 'rt_period_us', 'rt_runtime_us'
# RT uses wall clock time for period and quota
quota = int( self.period_us * f )
return pstr, qstr, self.period_us, quota
def cfsInfo( self, f ):
"Internal method: return parameters for CFS bandwidth"
pstr, qstr = 'cfs_period_us', 'cfs_quota_us'
# CFS uses wall clock time for period and CPU time for quota.
quota = int( self.period_us * f * numCores() )
period = self.period_us
if f > 0 and quota < 1000:
debug( '(cfsInfo: increasing default period) ' )
quota = 1000
period = int( quota / f / numCores() )
# Reset to unlimited on negative quota
if quota < 0:
quota = -1
return pstr, qstr, period, quota
# BL comment:
# This may not be the right API,
# since it doesn't specify CPU bandwidth in "absolute"
# units the way link bandwidth is specified.
# We should use MIPS or SPECINT or something instead.
# Alternatively, we should change from system fraction
# to CPU seconds per second, essentially assuming that
# all CPUs are the same.
def setCPUFrac( self, f, sched=None ):
"""Set overall CPU fraction for this host
f: CPU bandwidth limit (positive fraction, or -1 for cfs unlimited)
sched: 'rt' or 'cfs'
Note 'cfs' requires CONFIG_CFS_BANDWIDTH,
and 'rt' requires CONFIG_RT_GROUP_SCHED"""
if not sched:
sched = self.sched
if sched == 'rt':
if not f or f < 0:
raise Exception( 'Please set a positive CPU fraction'
' for sched=rt\n' )
pstr, qstr, period, quota = self.rtInfo( f )
elif sched == 'cfs':
pstr, qstr, period, quota = self.cfsInfo( f )
else:
return
# Set cgroup's period and quota
setPeriod = self.cgroupSet( pstr, period )
setQuota = self.cgroupSet( qstr, quota )
if sched == 'rt':
# Set RT priority if necessary
sched = self.chrt()
info( '(%s %d/%dus) ' % ( sched, setQuota, setPeriod ) )
def setCPUs( self, cores, mems=0 ):
"Specify (real) cores that our cgroup can run on"
if not cores:
return
if isinstance( cores, list ):
cores = ','.join( [ str( c ) for c in cores ] )
self.cgroupSet( resource='cpuset', param='cpus',
value=cores )
# Memory placement is probably not relevant, but we
# must specify it anyway
self.cgroupSet( resource='cpuset', param='mems',
value=mems)
# We have to do this here after we've specified
# cpus and mems
errFail( 'cgclassify -g cpuset:/%s %s' % (
self.name, self.pid ) )
def config( self, cpu=-1, cores=None, **params ):
"""cpu: desired overall system CPU fraction
cores: (real) core(s) this host can run on
params: parameters for Node.config()"""
r = Node.config( self, **params )
# Was considering cpu={'cpu': cpu , 'sched': sched}, but
# that seems redundant
self.setParam( r, 'setCPUFrac', cpu=cpu )
self.setParam( r, 'setCPUs', cores=cores )
return r
inited = False
@classmethod
def init( cls ):
"Initialization for CPULimitedHost class"
mountCgroups()
cls.inited = True
# Some important things to note:
#
# The "IP" address which setIP() assigns to the switch is not
# an "IP address for the switch" in the sense of IP routing.
# Rather, it is the IP address for the control interface,
# on the control network, and it is only relevant to the
# controller. If you are running in the root namespace
# (which is the only way to run OVS at the moment), the
# control interface is the loopback interface, and you
# normally never want to change its IP address!
#
# In general, you NEVER want to attempt to use Linux's
# network stack (i.e. ifconfig) to "assign" an IP address or
# MAC address to a switch data port. Instead, you "assign"
# the IP and MAC addresses in the controller by specifying
# packets that you want to receive or send. The "MAC" address
# reported by ifconfig for a switch data port is essentially
# meaningless. It is important to understand this if you
# want to create a functional router using OpenFlow.
class Switch( Node ):
"""A Switch is a Node that is running (or has execed?)
an OpenFlow switch."""
portBase = 1 # Switches start with port 1 in OpenFlow
dpidLen = 16 # digits in dpid passed to switch
def __init__( self, name, dpid=None, opts='', listenPort=None, **params):
"""dpid: dpid hex string (or None to derive from name, e.g. s1 -> 1)
opts: additional switch options
listenPort: port to listen on for dpctl connections"""
Node.__init__( self, name, **params )
self.dpid = self.defaultDpid( dpid )
self.opts = opts
self.listenPort = listenPort
if not self.inNamespace:
self.controlIntf = Intf( 'lo', self, port=0 )
def defaultDpid( self, dpid=None ):
"Return correctly formatted dpid from dpid or switch name (s1 -> 1)"
if dpid:
# Remove any colons and make sure it's a good hex number
dpid = dpid.replace( ':', '' )
assert len( dpid ) <= self.dpidLen and int( dpid, 16 ) >= 0
else:
# Use hex of the first number in the switch name
nums = re.findall( r'\d+', self.name )
if nums:
dpid = hex( int( nums[ 0 ] ) )[ 2: ]
else:
self.terminate() # Python 3.6 crash workaround
raise Exception( 'Unable to derive default datapath ID - '
'please either specify a dpid or use a '
'canonical switch name such as s23.' )
return '0' * ( self.dpidLen - len( dpid ) ) + dpid
def defaultIntf( self ):
"Return control interface"
if self.controlIntf:
return self.controlIntf
else:
return Node.defaultIntf( self )
def sendCmd( self, *cmd, **kwargs ):
"""Send command to Node.
cmd: string"""
kwargs.setdefault( 'printPid', False )
if not self.execed:
return Node.sendCmd( self, *cmd, **kwargs )
else:
error( '*** Error: %s has execed and cannot accept commands' %
self.name )
def connected( self ):
"Is the switch connected to a controller? (override this method)"
# Assume that we are connected by default to whatever we need to
# be connected to. This should be overridden by any OpenFlow
# switch, but not by a standalone bridge.
debug( 'Assuming', repr( self ), 'is connected to a controller\n' )
return True
def stop( self, deleteIntfs=True ):
"""Stop switch
deleteIntfs: delete interfaces? (True)"""
if deleteIntfs:
self.deleteIntfs()
def __repr__( self ):
"More informative string representation"
intfs = ( ','.join( [ '%s:%s' % ( i.name, i.IP() )
for i in self.intfList() ] ) )
return '<%s %s: %s pid=%s> ' % (
self.__class__.__name__, self.name, intfs, self.pid )
class UserSwitch( Switch ):
"User-space switch."
dpidLen = 12
def __init__( self, name, dpopts='--no-slicing', **kwargs ):
"""Init.
name: name for the switch
dpopts: additional arguments to ofdatapath (--no-slicing)"""
Switch.__init__( self, name, **kwargs )
pathCheck( 'ofdatapath', 'ofprotocol',
moduleName='the OpenFlow reference user switch' +
'(openflow.org)' )
if self.listenPort:
self.opts += ' --listen=ptcp:%i ' % self.listenPort
else:
self.opts += ' --listen=punix:/tmp/%s.listen' % self.name
self.dpopts = dpopts
@classmethod
def setup( cls ):
"Ensure any dependencies are loaded; if not, try to load them."
if not os.path.exists( '/dev/net/tun' ):
moduleDeps( add=TUN )
def dpctl( self, *args ):
"Run dpctl command"
listenAddr = None
if not self.listenPort:
listenAddr = 'unix:/tmp/%s.listen' % self.name
else:
listenAddr = 'tcp:127.0.0.1:%i' % self.listenPort
return self.cmd( 'dpctl ' + ' '.join( args ) +
' ' + listenAddr )
def connected( self ):
"Is the switch connected to a controller?"
status = self.dpctl( 'status' )
return ( 'remote.is-connected=true' in status and
'local.is-connected=true' in status )
@staticmethod
def TCReapply( intf ):
"""Unfortunately user switch and Mininet are fighting
over tc queuing disciplines. To resolve the conflict,
we re-create the user switch's configuration, but as a
leaf of the TCIntf-created configuration."""
if isinstance( intf, TCIntf ):
ifspeed = 10000000000 # 10 Gbps
minspeed = ifspeed * 0.001
res = intf.config( **intf.params )
if res is None: # link may not have TC parameters
return
# Re-add qdisc, root, and default classes user switch created, but
# with new parent, as setup by Mininet's TCIntf
parent = res['parent']
intf.tc( "%s qdisc add dev %s " + parent +
" handle 1: htb default 0xfffe" )
intf.tc( "%s class add dev %s classid 1:0xffff parent 1: htb rate "
+ str(ifspeed) )
intf.tc( "%s class add dev %s classid 1:0xfffe parent 1:0xffff " +
"htb rate " + str(minspeed) + " ceil " + str(ifspeed) )
def start( self, controllers ):
"""Start OpenFlow reference user datapath.
Log to /tmp/sN-{ofd,ofp}.log.
controllers: list of controller objects"""
# Add controllers
clist = ','.join( [ 'tcp:%s:%d' % ( c.IP(), c.port )
for c in controllers ] )
ofdlog = '/tmp/' + self.name + '-ofd.log'
ofplog = '/tmp/' + self.name + '-ofp.log'
intfs = [ str( i ) for i in self.intfList() if not i.IP() ]
self.cmd( 'ofdatapath -i ' + ','.join( intfs ) +
' punix:/tmp/' + self.name + ' -d %s ' % self.dpid +
self.dpopts +
' 1> ' + ofdlog + ' 2> ' + ofdlog + ' &' )
self.cmd( 'ofprotocol unix:/tmp/' + self.name +
' ' + clist +
' --fail=closed ' + self.opts +
' 1> ' + ofplog + ' 2>' + ofplog + ' &' )
if "no-slicing" not in self.dpopts:
# Only TCReapply if slicing is enable
sleep(1) # Allow ofdatapath to start before re-arranging qdisc's
for intf in self.intfList():
if not intf.IP():
self.TCReapply( intf )
def stop( self, deleteIntfs=True ):
"""Stop OpenFlow reference user datapath.
deleteIntfs: delete interfaces? (True)"""
self.cmd( 'kill %ofdatapath' )
self.cmd( 'kill %ofprotocol' )
super( UserSwitch, self ).stop( deleteIntfs )
class OVSSwitch( Switch ):
"Open vSwitch switch. Depends on ovs-vsctl."
def __init__( self, name, failMode='secure', datapath='kernel',
inband=False, protocols=None,
reconnectms=1000, stp=False, batch=False, **params ):
"""name: name for switch
failMode: controller loss behavior (secure|standalone)
datapath: userspace or kernel mode (kernel|user)
inband: use in-band control (False)
protocols: use specific OpenFlow version(s) (e.g. OpenFlow13)
Unspecified (or old OVS version) uses OVS default
reconnectms: max reconnect timeout in ms (0/None for default)
stp: enable STP (False, requires failMode=standalone)
batch: enable batch startup (False)"""
Switch.__init__( self, name, **params )
self.failMode = failMode
self.datapath = datapath
self.inband = inband
self.protocols = protocols
self.reconnectms = reconnectms
self.stp = stp
self._uuids = [] # controller UUIDs
self.batch = batch
self.commands = [] # saved commands for batch startup
@classmethod
def setup( cls ):
"Make sure Open vSwitch is installed and working"
pathCheck( 'ovs-vsctl',
moduleName='Open vSwitch (openvswitch.org)')
# This should no longer be needed, and it breaks
# with OVS 1.7 which has renamed the kernel module:
# moduleDeps( subtract=OF_KMOD, add=OVS_KMOD )
out, err, exitcode = errRun( 'ovs-vsctl -t 1 show' )
if exitcode:
error( out + err +
'ovs-vsctl exited with code %d\n' % exitcode +
'*** Error connecting to ovs-db with ovs-vsctl\n'
'Make sure that Open vSwitch is installed, '
'that ovsdb-server is running, and that\n'
'"ovs-vsctl show" works correctly.\n'
'You may wish to try '
'"service openvswitch-switch start".\n' )
exit( 1 )
version = quietRun( 'ovs-vsctl --version' )
cls.OVSVersion = findall( r'\d+\.\d+', version )[ 0 ]
@classmethod
def isOldOVS( cls ):
"Is OVS ersion < 1.10?"
return ( StrictVersion( cls.OVSVersion ) <
StrictVersion( '1.10' ) )
def dpctl( self, *args ):
"Run ovs-ofctl command"
return self.cmd( 'ovs-ofctl', args[ 0 ], self, *args[ 1: ] )
def vsctl( self, *args, **kwargs ):
"Run ovs-vsctl command (or queue for later execution)"
if self.batch:
cmd = ' '.join( str( arg ).strip() for arg in args )
self.commands.append( cmd )
else:
return self.cmd( 'ovs-vsctl', *args, **kwargs )
@staticmethod
def TCReapply( intf ):
"""Unfortunately OVS and Mininet are fighting
over tc queuing disciplines. As a quick hack/
workaround, we clear OVS's and reapply our own."""
if isinstance( intf, TCIntf ):
intf.config( **intf.params )
def attach( self, intf ):
"Connect a data port"
self.vsctl( 'add-port', self, intf )
self.cmd( 'ifconfig', intf, 'up' )
self.TCReapply( intf )
def detach( self, intf ):
"Disconnect a data port"
self.vsctl( 'del-port', self, intf )
def controllerUUIDs( self, update=False ):
"""Return ovsdb UUIDs for our controllers
update: update cached value"""
if not self._uuids or update:
controllers = self.cmd( 'ovs-vsctl -- get Bridge', self,
'Controller' ).strip()
if controllers.startswith( '[' ) and controllers.endswith( ']' ):
controllers = controllers[ 1 : -1 ]
if controllers:
self._uuids = [ c.strip()
for c in controllers.split( ',' ) ]
return self._uuids
def connected( self ):
"Are we connected to at least one of our controllers?"
for uuid in self.controllerUUIDs():
if 'true' in self.vsctl( '-- get Controller',
uuid, 'is_connected' ):
return True
return self.failMode == 'standalone'
def intfOpts( self, intf ):
"Return OVS interface options for intf"
opts = ''
if not self.isOldOVS():
# ofport_request is not supported on old OVS
opts += ' ofport_request=%s' % self.ports[ intf ]
# Patch ports don't work well with old OVS
if isinstance( intf, OVSIntf ):
intf1, intf2 = intf.link.intf1, intf.link.intf2
peer = intf1 if intf1 != intf else intf2
opts += ' type=patch options:peer=%s' % peer
return '' if not opts else ' -- set Interface %s' % intf + opts
def bridgeOpts( self ):
"Return OVS bridge options"
opts = ( ' other_config:datapath-id=%s' % self.dpid +
' fail_mode=%s' % self.failMode )
if not self.inband:
opts += ' other-config:disable-in-band=true'
if self.datapath == 'user':
opts += ' datapath_type=netdev'
if self.protocols and not self.isOldOVS():
opts += ' protocols=%s' % self.protocols
if self.stp and self.failMode == 'standalone':
opts += ' stp_enable=true'
opts += ' other-config:dp-desc=%s' % self.name
return opts
def start( self, controllers ):
"Start up a new OVS OpenFlow switch using ovs-vsctl"
if self.inNamespace:
raise Exception(
'OVS kernel switch does not work in a namespace' )
int( self.dpid, 16 ) # DPID must be a hex string
# Command to add interfaces
intfs = ''.join( ' -- add-port %s %s' % ( self, intf ) +
self.intfOpts( intf )
for intf in self.intfList()
if self.ports[ intf ] and not intf.IP() )
# Command to create controller entries
clist = [ ( self.name + c.name, '%s:%s:%d' %
( c.protocol, c.IP(), c.port ) )
for c in controllers ]
if self.listenPort:
clist.append( ( self.name + '-listen',
'ptcp:%s' % self.listenPort ) )
ccmd = '-- --id=@%s create Controller target=\\"%s\\"'
if self.reconnectms:
ccmd += ' max_backoff=%d' % self.reconnectms
cargs = ' '.join( ccmd % ( name, target )
for name, target in clist )
# Controller ID list
cids = ','.join( '@%s' % name for name, _target in clist )
# Try to delete any existing bridges with the same name
if not self.isOldOVS():
cargs += ' -- --if-exists del-br %s' % self
# One ovs-vsctl command to rule them all!
self.vsctl( cargs +
' -- add-br %s' % self +
' -- set bridge %s controller=[%s]' % ( self, cids ) +
self.bridgeOpts() +
intfs )
# If necessary, restore TC config overwritten by OVS
if not self.batch:
for intf in self.intfList():
self.TCReapply( intf )
# This should be ~ int( quietRun( 'getconf ARG_MAX' ) ),
# but the real limit seems to be much lower
argmax = 128000
@classmethod
def batchStartup( cls, switches, run=errRun ):
"""Batch startup for OVS
switches: switches to start up
run: function to run commands (errRun)"""
info( '...' )
cmds = 'ovs-vsctl'
for switch in switches:
if switch.isOldOVS():
# Ideally we'd optimize this also
run( 'ovs-vsctl del-br %s' % switch )
for cmd in switch.commands:
cmd = cmd.strip()
# Don't exceed ARG_MAX
if len( cmds ) + len( cmd ) >= cls.argmax:
run( cmds, shell=True )
cmds = 'ovs-vsctl'
cmds += ' ' + cmd
switch.cmds = []
switch.batch = False
if cmds:
run( cmds, shell=True )
# Reapply link config if necessary...
for switch in switches:
for intf in switch.intfs.values():
if isinstance( intf, TCIntf ):
intf.config( **intf.params )
return switches
def stop( self, deleteIntfs=True ):
"""Terminate OVS switch.
deleteIntfs: delete interfaces? (True)"""
self.cmd( 'ovs-vsctl del-br', self )
if self.datapath == 'user':
self.cmd( 'ip link del', self )
super( OVSSwitch, self ).stop( deleteIntfs )
@classmethod
def batchShutdown( cls, switches, run=errRun ):
"Shut down a list of OVS switches"
delcmd = 'del-br %s'
if switches and not switches[ 0 ].isOldOVS():
delcmd = '--if-exists ' + delcmd
# First, delete them all from ovsdb
run( 'ovs-vsctl ' +
' -- '.join( delcmd % s for s in switches ) )
# Next, shut down all of the processes
pids = ' '.join( str( switch.pid ) for switch in switches )
run( 'kill -HUP ' + pids )
for switch in switches:
switch.terminate()
return switches
OVSKernelSwitch = OVSSwitch
class OVSBridge( OVSSwitch ):
"OVSBridge is an OVSSwitch in standalone/bridge mode"
def __init__( self, *args, **kwargs ):
"""stp: enable Spanning Tree Protocol (False)
see OVSSwitch for other options"""
kwargs.update( failMode='standalone' )
OVSSwitch.__init__( self, *args, **kwargs )
def start( self, controllers ):
"Start bridge, ignoring controllers argument"
OVSSwitch.start( self, controllers=[] )
def connected( self ):
"Are we forwarding yet?"
if self.stp:
status = self.dpctl( 'show' )
return 'STP_FORWARD' in status and not 'STP_LEARN' in status
else:
return True
class IVSSwitch( Switch ):
"Indigo Virtual Switch"
def __init__( self, name, verbose=False, **kwargs ):
Switch.__init__( self, name, **kwargs )
self.verbose = verbose
@classmethod
def setup( cls ):
"Make sure IVS is installed"
pathCheck( 'ivs-ctl', 'ivs',
moduleName="Indigo Virtual Switch (projectfloodlight.org)" )
out, err, exitcode = errRun( 'ivs-ctl show' )
if exitcode:
error( out + err +
'ivs-ctl exited with code %d\n' % exitcode +
'*** The openvswitch kernel module might '
'not be loaded. Try modprobe openvswitch.\n' )
exit( 1 )
@classmethod
def batchShutdown( cls, switches ):
"Kill each IVS switch, to be waited on later in stop()"
for switch in switches:
switch.cmd( 'kill %ivs' )
return switches
def start( self, controllers ):
"Start up a new IVS switch"
args = ['ivs']
args.extend( ['--name', self.name] )
args.extend( ['--dpid', self.dpid] )
if self.verbose:
args.extend( ['--verbose'] )
for intf in self.intfs.values():
if not intf.IP():
args.extend( ['-i', intf.name] )
for c in controllers:
args.extend( ['-c', '%s:%d' % (c.IP(), c.port)] )
if self.listenPort:
args.extend( ['--listen', '127.0.0.1:%i' % self.listenPort] )
args.append( self.opts )
logfile = '/tmp/ivs.%s.log' % self.name
self.cmd( ' '.join(args) + ' >' + logfile + ' 2>&1 </dev/null &' )
def stop( self, deleteIntfs=True ):
"""Terminate IVS switch.
deleteIntfs: delete interfaces? (True)"""
self.cmd( 'kill %ivs' )
self.cmd( 'wait' )
super( IVSSwitch, self ).stop( deleteIntfs )
def attach( self, intf ):
"Connect a data port"
self.cmd( 'ivs-ctl', 'add-port', '--datapath', self.name, intf )
def detach( self, intf ):
"Disconnect a data port"
self.cmd( 'ivs-ctl', 'del-port', '--datapath', self.name, intf )
def dpctl( self, *args ):
"Run dpctl command"
if not self.listenPort:
return "can't run dpctl without passive listening port"
return self.cmd( 'ovs-ofctl ' + ' '.join( args ) +
' tcp:127.0.0.1:%i' % self.listenPort )
class Controller( Node ):
"""A Controller is a Node that is running (or has execed?) an
OpenFlow controller."""
def __init__( self, name, inNamespace=False, command='controller',
cargs='-v ptcp:%d', cdir=None, ip="127.0.0.1",
port=6653, protocol='tcp', **params ):
self.command = command
self.cargs = cargs
self.cdir = cdir
# Accept 'ip:port' syntax as shorthand
if ':' in ip:
ip, port = ip.split( ':' )
port = int( port )
self.ip = ip
self.port = port
self.protocol = protocol
Node.__init__( self, name, inNamespace=inNamespace,
ip=ip, **params )
self.checkListening()
def checkListening( self ):
"Make sure no controllers are running on our port"
# Verify that Telnet is installed first:
out, _err, returnCode = errRun( "which telnet" )
if 'telnet' not in out or returnCode != 0:
raise Exception( "Error running telnet to check for listening "
"controllers; please check that it is "
"installed." )
listening = self.cmd( "echo A | telnet -e A %s %d" %
( self.ip, self.port ) )
if 'Connected' in listening:
servers = self.cmd( 'netstat -natp' ).split( '\n' )
pstr = ':%d ' % self.port
clist = servers[ 0:1 ] + [ s for s in servers if pstr in s ]
raise Exception( "Please shut down the controller which is"
" running on port %d:\n" % self.port +
'\n'.join( clist ) )
def start( self ):
"""Start <controller> <args> on controller.
Log to /tmp/cN.log"""
pathCheck( self.command )
cout = '/tmp/' + self.name + '.log'
if self.cdir is not None:
self.cmd( 'cd ' + self.cdir )
self.cmd( self.command + ' ' + self.cargs % self.port +
' 1>' + cout + ' 2>' + cout + ' &' )
self.execed = False
def stop( self, *args, **kwargs ):
"Stop controller."
self.cmd( 'kill %' + self.command )
self.cmd( 'wait %' + self.command )
super( Controller, self ).stop( *args, **kwargs )
def IP( self, intf=None ):
"Return IP address of the Controller"
if self.intfs:
ip = Node.IP( self, intf )
else:
ip = self.ip
return ip
def __repr__( self ):
"More informative string representation"
return '<%s %s: %s:%s pid=%s> ' % (
self.__class__.__name__, self.name,
self.IP(), self.port, self.pid )
@classmethod
def isAvailable( cls ):
"Is controller available?"
return which( 'controller' )
class OVSController( Controller ):
"Open vSwitch controller"
def __init__( self, name, **kwargs ):
kwargs.setdefault( 'command', self.isAvailable() or
'ovs-controller' )
Controller.__init__( self, name, **kwargs )
@classmethod
def isAvailable( cls ):
return (which( 'ovs-controller' ) or
which( 'test-controller' ) or
which( 'ovs-testcontroller' ))
class NOX( Controller ):
"Controller to run a NOX application."
def __init__( self, name, *noxArgs, **kwargs ):
"""Init.
name: name to give controller
noxArgs: arguments (strings) to pass to NOX"""
if not noxArgs:
warn( 'warning: no NOX modules specified; '
'running packetdump only\n' )
noxArgs = [ 'packetdump' ]
elif type( noxArgs ) not in ( list, tuple ):
noxArgs = [ noxArgs ]
if 'NOX_CORE_DIR' not in os.environ:
exit( 'exiting; please set missing NOX_CORE_DIR env var' )
noxCoreDir = os.environ[ 'NOX_CORE_DIR' ]
Controller.__init__( self, name,
command=noxCoreDir + '/nox_core',
cargs='--libdir=/usr/local/lib -v -i ptcp:%s ' +
' '.join( noxArgs ),
cdir=noxCoreDir,
**kwargs )
class Ryu( Controller ):
"Controller to run Ryu application"
def __init__( self, name, *ryuArgs, **kwargs ):
"""Init.
name: name to give controller.
ryuArgs: arguments and modules to pass to Ryu"""
homeDir = quietRun( 'printenv HOME' ).strip( '\r\n' )
ryuCoreDir = '%s/ryu/ryu/app/' % homeDir
if not ryuArgs:
warn( 'warning: no Ryu modules specified; '
'running simple_switch only\n' )
ryuArgs = [ ryuCoreDir + 'simple_switch.py' ]
elif type( ryuArgs ) not in ( list, tuple ):
ryuArgs = [ ryuArgs ]
Controller.__init__( self, name,
command='ryu-manager',
cargs='--ofp-tcp-listen-port %s ' +
' '.join( ryuArgs ),
cdir=ryuCoreDir,
**kwargs )
class RemoteController( Controller ):
"Controller running outside of Mininet's control."
def __init__( self, name, ip='127.0.0.1',
port=None, **kwargs):
"""Init.
name: name to give controller
ip: the IP address where the remote controller is
listening
port: the port where the remote controller is listening"""
Controller.__init__( self, name, ip=ip, port=port, **kwargs )
def start( self ):
"Overridden to do nothing."
return
def stop( self ):
"Overridden to do nothing."
return
def checkListening( self ):
"Warn if remote controller is not accessible"
if self.port is not None:
self.isListening( self.ip, self.port )
else:
for port in 6653, 6633:
if self.isListening( self.ip, port ):
self.port = port
info( "Connecting to remote controller"
" at %s:%d\n" % ( self.ip, self.port ))
break
if self.port is None:
self.port = 6653
warn( "Setting remote controller"
" to %s:%d\n" % ( self.ip, self.port ))
def isListening( self, ip, port ):
"Check if a remote controller is listening at a specific ip and port"
listening = self.cmd( "echo A | telnet -e A %s %d" % ( ip, port ) )
if 'Connected' not in listening:
warn( "Unable to contact the remote controller"
" at %s:%d\n" % ( ip, port ) )
return False
else:
return True
DefaultControllers = ( Controller, OVSController )
def findController( controllers=DefaultControllers ):
"Return first available controller from list, if any"
for controller in controllers:
if controller.isAvailable():
return controller
def DefaultController( name, controllers=DefaultControllers, **kwargs ):
"Find a controller that is available and instantiate it"
controller = findController( controllers )
if not controller:
raise Exception( 'Could not find a default OpenFlow controller' )
return controller( name, **kwargs )
def NullController( *_args, **_kwargs ):
"Nonexistent controller - simply returns None"
return None
| [
"[email protected]"
] | |
f8bc2d9b846c19133bad97101540f24fbf6596a6 | 72918242418d81e3ef2b9030553d3d8cc14f28ad | /code/RF-cv.py | d27454dd58f6eae12da993f41380f140aa46e724 | [] | no_license | qq-shu/TextCNN | e03e179454d52fcf00de4c4678d92e277b33cd4d | cdf61e9f72b1c0e7a71cb00b4f91d96b416a0ac3 | refs/heads/main | 2023-08-16T12:59:08.563400 | 2021-09-30T03:30:52 | 2021-09-30T03:30:52 | 411,896,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,726 | py | from sklearn.ensemble import RandomForestClassifier as RF
# from sklearn import cross_validation
from sklearn import model_selection
from sklearn.metrics import confusion_matrix
import pandas as pd
subtrainLabel = pd.read_csv('subtrainLabels.csv')
subtrainfeature = pd.read_csv("3gramfeature.csv")
subtrain = pd.merge(subtrainLabel,subtrainfeature,on='Id')
labels = subtrain.Class
subtrain.drop(["Class","Id"], axis=1, inplace=True)
subtrain = subtrain.values
# X_train, X_test, y_train, y_test = model_selection.train_test_split(subtrain,labels,test_size=0.4)
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import StratifiedKFold,KFold
srf = RF(n_estimators=200, n_jobs=-1)
kfolder = KFold(n_splits=10,random_state=1)
scores4=cross_val_score(srf, subtrain, labels,cv=kfolder)
print(scores4)
print(scores4.mean())
sfolder = StratifiedKFold(n_splits=4,random_state=0)
sfolder = StratifiedKFold(n_splits=4,random_state=0)
scores3=cross_val_score(srf, subtrain, labels,cv=sfolder)
print(scores3)
print(scores3.mean())
clf = KNeighborsClassifier()
kfolder = KFold(n_splits=10,random_state=1)
scores=cross_val_score(clf, subtrain, labels,cv=kfolder)
print(scores)
print(scores.mean())
from sklearn.svm import SVC
clf2 = SVC(kernel='rbf', probability=True)
sfolder = StratifiedKFold(n_splits=4,random_state=0)
scores2=cross_val_score(clf2, subtrain, labels,cv=sfolder)
print(scores2)
print(scores2.mean())
# srf = RF(n_estimators=200, n_jobs=-1)
# srf.fit(X_train,y_train)
# print (srf.score(X_test,y_test))
# y_pred = srf.predict(X_test)
# print (confusion_matrix(y_test, y_pred)) | [
"[email protected]"
] | |
99a64502bc4d3c80b07c903df53770314112a9ed | df7f13ec34591fe1ce2d9aeebd5fd183e012711a | /hata/discord/user/thread_profile/tests/test__ThreadProfile__magic.py | 62df5d60ace156b75fde3936db52d10717f48aed | [
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | HuyaneMatsu/hata | 63e2f6a2d7a7539fd8f18498852d9d3fe5c41d2e | 53f24fdb38459dc5a4fd04f11bdbfee8295b76a4 | refs/heads/master | 2023-08-20T15:58:09.343044 | 2023-08-20T13:09:03 | 2023-08-20T13:09:03 | 163,677,173 | 3 | 3 | Apache-2.0 | 2019-12-18T03:46:12 | 2018-12-31T14:59:47 | Python | UTF-8 | Python | false | false | 1,575 | py | from datetime import datetime as DateTime
import vampytest
from ..flags import ThreadProfileFlag
from ..thread_profile import ThreadProfile
def test__ThreadProfile__repr():
"""
Tests whether ``ThreadProfile.__repr__`` works as intended.
"""
flags = ThreadProfileFlag(2)
joined_at = DateTime(2016, 5, 15)
thread_profile = ThreadProfile(
flags = flags,
joined_at = joined_at,
)
vampytest.assert_instance(repr(thread_profile), str)
def test__ThreadProfile__hash():
"""
Tests whether ``ThreadProfile.__hash__`` works as intended.
"""
flags = ThreadProfileFlag(2)
joined_at = DateTime(2016, 5, 15)
thread_profile = ThreadProfile(
flags = flags,
joined_at = joined_at,
)
vampytest.assert_instance(hash(thread_profile), int)
def test__ThreadProfile__eq():
"""
Tests whether ``ThreadProfile.__eq__`` works as intended.
"""
flags = ThreadProfileFlag(2)
joined_at = DateTime(2016, 5, 15)
keyword_parameters = {
'flags': flags,
'joined_at': joined_at,
}
thread_profile = ThreadProfile(**keyword_parameters)
vampytest.assert_eq(thread_profile, thread_profile)
vampytest.assert_ne(thread_profile, object())
for field_name, field_value in (
('flags', ThreadProfileFlag(4)),
('joined_at', None),
):
test_thread_profile = ThreadProfile(**{**keyword_parameters, field_name: field_value})
vampytest.assert_ne(thread_profile, test_thread_profile)
| [
"[email protected]"
] | |
0f38b4d274aa3da2f5bb6a683d2fbe2da35282a0 | d82517dab71b7184ed504c5f2237637524113a53 | /s14/黑马/面向对象3/设计4S店(2).py | 96645f9ea42415190db6b18912021d541ff56b00 | [] | no_license | hurongyang/UUUU | 2326dcb21026df8e508f7c1c832a15293a1d8ce4 | a84555704fe6db0a6ece8c5d3c0f0f810ca74df9 | refs/heads/master | 2021-01-10T10:13:43.910107 | 2017-11-07T11:17:48 | 2017-11-07T11:17:48 | 54,094,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | __author = "Hu Rongyang"
class CarStore():
def order(self,car_type):
if car_type == "索纳塔":
return Suonata()
elif car_type == "名图":
return Mintu()
class Car():
def move(self):
print("车在移动...")
def music(self):
print("正在播放音乐...")
def stop(self):
print("车在停止...")
class Suonata(Car):#索纳塔
pass
class Mintu(Car):#名图
pass
car_store = CarStore()#创建4S店铺
car = car_store.order("索纳塔")#car就指向了Suonata()的实例对象
car.move()
car.music()
car.stop()
| [
"[email protected]"
] | |
7a5b181ef53123c6ce727d9255968c438b8932f4 | c82774869bc428b27037f5f6a1437e8477f28abe | /Modeling/Deprecated/1_Word2vec_Embedding_spm.py | f41cd6bf394d4762adc3292d72de78c5a9c41e7c | [] | no_license | spongebob03/Music-Recommend | 3c5cd9befc9d787aa3b0196f8fdedce896470adb | 23f89c7b94a9dfc3c2eea304f33ba32cd5ceebd6 | refs/heads/main | 2023-08-23T20:26:46.207396 | 2021-10-18T12:31:29 | 2021-10-18T12:31:29 | 399,398,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,771 | py | import sys
import argparse
from tqdm import tqdm
import numpy as np
import pandas as pd
import sentencepiece as spm
from khaiii import KhaiiiApi
from gensim.models import Word2Vec
from utils.arena_util import load_json
class Kakao_Tokenizer :
def __init__(self) -> None:
self.tokenizer = KhaiiiApi()
self.using_pos = ['NNG','SL','NNP','MAG','SN'] # 일반 명사, 외국어, 고유 명사, 일반 부사, 숫자
def re_sub(self, series: pd.Series) -> pd.Series:
series = series.str.replace(pat=r'[ㄱ-ㅎ]', repl=r'', regex=True) # ㅋ 제거용
series = series.str.replace(pat=r'[^\w\s]', repl=r'', regex=True) # 특수문자 제거
series = series.str.replace(pat=r'[ ]{2,}', repl=r' ', regex=True) # 공백 제거
series = series.str.replace(pat=r'[\u3000]+', repl=r'', regex=True) # u3000 제거
return series
def flatten(self, list_of_list) :
flatten = [j for i in list_of_list for j in i]
return flatten
def get_token(self, title: str) :
if len(title)== 0 or title== ' ': # 제목이 공백인 경우 tokenizer에러 발생
return []
result = self.tokenizer.analyze(title)
result = [(morph.lex, morph.tag) for split in result for morph in split.morphs] # (형태소, 품사) 튜플의 리스트
return result
def get_all_tags(self, df: pd.DataFrame) :
tag_list = df['tags'].values.tolist()
tag_list = self.flatten(tag_list)
return tag_list
def filter_by_exist_tag(self, tokens, exist_tags) :
token_tag = [self.get_token(x) for x in exist_tags]
token_itself = list(filter(lambda x: len(x)==1, token_tag))
token_itself = self.flatten(token_itself)
unique_tag = set(token_itself)
unique_word = [x[0] for x in unique_tag]
tokens = tokens.map(lambda x: list(filter(lambda x: x[0] in unique_word, x)))
tokens = tokens.map(lambda x : list(set(x)))
return tokens
def sentences_to_tokens(self, sentences, exist_tags=None) :
token_series = self.re_sub(pd.Series(sentences))
token_series = token_series.map(lambda x: self.get_token(x))
token_series = token_series.map(lambda x: list(filter(lambda x: x[1] in self.using_pos, x)))
if exist_tags is not None :
token_series = self.filter_by_exist_tag(token_series, exist_tags)
tokenized_stc = token_series.map(lambda x: [tag[0] for tag in x]).tolist()
return tokenized_stc
class SP_Tokenizer :
def __init__(self, model_type='bpe', vocab_size=24000) :
self.model_type = model_type
self.vocab_size = vocab_size
self.sp = spm.SentencePieceProcessor()
def train(self, input_file_path, model_path):
templates = ' --input={} \
--pad_id=0 \
--bos_id=1 \
--eos_id=2 \
--unk_id=3 \
--model_prefix={} \
--vocab_size={} \
--character_coverage=1.0 \
--model_type={}'
cmd = templates.format(input_file_path,
model_path,
self.vocab_size, # 작을수록 문장을 잘게 쪼갬
self.model_type) # unigram (default), bpe, char
spm.SentencePieceTrainer.Train(cmd)
print("tokenizer {} is generated".format(model_path))
self.set_model(model_path + '.model')
def set_model(self, model_path) :
try :
self.sp.Load(model_path)
except :
raise RuntimeError("Failed to load {}".format(model_path + '.model'))
return True
def sentences_to_tokens(self, sentences):
tokenized_stc = []
for sentence in sentences:
tokens = self.sp.EncodeAsPieces(sentence)
new_tokens = []
for token in tokens:
token = token.replace('▁', '')
if len(token) > 1:
new_tokens.append(token)
if len(new_tokens) > 1:
tokenized_stc.append(new_tokens)
return tokenized_stc
class string2vec :
def __init__(self, train_data, size=200, window=5, min_count=2, workers=8, sg=1, hs=1):
self.model = Word2Vec(size=size, window=window, min_count=min_count, workers=workers, sg=sg, hs=hs)
self.model.build_vocab(train_data)
def set_model(self, model_fn):
self.model = Word2Vec.load(model_fn)
def save_embeddings(self, emb_fn):
word_vectors = self.model.wv
vocabs = []
vectors = []
for key in word_vectors.vocab:
vocabs.append(key)
vectors.append(word_vectors[key])
df = pd.DataFrame()
df['voca'] = vocabs
df['vector'] = vectors
df.to_csv(emb_fn, index=False)
def save_model(self, md_fn):
self.model.save(md_fn)
print("word embedding model {} is trained".format(md_fn))
def show_similar_words(self, word, topn):
print(self.model.most_similar(positive=[word], topn=topn))
class Word2VecHandler :
def __init__(self, token_method, vocab_size, model_postfix) :
self.tokenizer = SP_Tokenizer(token_method, vocab_size)
#self.tokenizer = Kakao_Tokenizer()
self.w2v = None
self.token_method = token_method
self.vocab_size = vocab_size
self.model_postfix = model_postfix
def make_input4tokenizer(self, train_file_path, genre_file_path, tokenize_input_file_path, val_file_path=None, test_file_path=None):
def _wv_genre(genre):
genre_dict = dict()
for code, value in genre:
code_num = int(code[2:])
if not code_num % 100:
cur_genre = value
genre_dict[cur_genre] = []
else:
value = ' '.join(value.split('/'))
genre_dict[cur_genre].append(value)
genre_sentences = []
for key, sub_list in genre_dict.items():
sub_list = genre_dict[key]
key = ' '.join(key.split('/'))
if not len(sub_list):
continue
for sub in sub_list:
genre_sentences.append(key+' '+sub)
return genre_sentences
try:
playlists = load_json(train_file_path)
if val_file_path is not None:
playlists += load_json(val_file_path)
if test_file_path is not None:
playlists += load_json(test_file_path)
genre_all = load_json(genre_file_path)
genre_all_lists = []
for code, gnr in genre_all.items():
if gnr != '세부장르전체':
genre_all_lists.append([code, gnr])
genre_all_lists = np.asarray(genre_all_lists)
genre_stc = _wv_genre(genre_all_lists)
sentences = []
for playlist in playlists:
title_stc = playlist['plylst_title']
tag_stc = ' '.join(playlist['tags'])
date_stc = ' '.join(playlist['updt_date'][:7].split('-'))
sentences.append(' '.join([title_stc, tag_stc, date_stc]))
sentences = sentences + genre_stc
with open(tokenize_input_file_path, 'w', encoding='utf8') as f:
for sentence in sentences:
f.write(sentence + '\n')
except Exception as e:
print(e.with_traceback())
return False
return sentences
def train_word2vec(self, train_file_path, val_file_path, test_file_path, genre_file_path, tokenize_input_file_path, _submit_type):
sentences = self.make_input4tokenizer(
train_file_path, genre_file_path, tokenize_input_file_path, val_file_path, test_file_path)
if not sentences:
sys.exit(1)
tokenizer_name = 'model/tokenizer_{}_{}_{}'.format(self.token_method, self.vocab_size, self.model_postfix)
self.tokenizer.train(tokenize_input_file_path, tokenizer_name)
tokenized_sentences = self.tokenizer.sentences_to_tokens(sentences)
#tokenized_sentences = self.tokenizer.sentences_to_tokens(sentences, self.tokenizer.get_all_tags(pd.read_json(train_file_path)))
w2v_name = 'model/w2v_{}_{}_{}.model'.format(self.token_method, self.vocab_size, self.model_postfix)
print("start train_w2v.... name : {}".format(w2v_name))
self.w2v = string2vec(tokenized_sentences, size=200, window=5, min_count=1, workers=8, sg=1, hs=1)
print(self.w2v.model.wv)
self.w2v.save_model(w2v_name)
def get_plylsts_embeddings(self, train_data, question_data, _submit_type):
print('saving embeddings')
# train plylsts to vectors
t_plylst_title_tag_emb = {} # plylst_id - vector dictionary
for plylst in tqdm(train_data):
p_id = plylst['id']
p_title = plylst['plylst_title']
p_title_tokens = self.tokenizer.sentences_to_tokens([p_title])
if len(p_title_tokens):
p_title_tokens = p_title_tokens[0]
else:
p_title_tokens = []
p_tags = plylst['tags']
p_times = plylst['updt_date'][:7].split('-')
p_words = p_title_tokens + p_tags + p_times
word_embs = []
for p_word in p_words:
try:
word_embs.append(self.w2v.model.wv[p_word])
except KeyError:
pass
if len(word_embs):
p_emb = np.average(word_embs, axis=0).tolist()
else:
p_emb = np.zeros(200).tolist()
t_plylst_title_tag_emb[p_id] = p_emb
# val plylsts to vectors
for plylst in tqdm(question_data):
p_id = plylst['id']
p_title = plylst['plylst_title']
p_title_tokens = self.tokenizer.sentences_to_tokens([p_title])
p_songs = plylst['songs']
if len(p_title_tokens):
p_title_tokens = p_title_tokens[0]
else:
p_title_tokens = []
p_tags = plylst['tags']
p_times = plylst['updt_date'][:7].split('-')
p_words = p_title_tokens + p_tags + p_times
word_embs = []
for p_word in p_words:
try:
word_embs.append(self.w2v.model.wv[p_word])
except KeyError:
pass
if len(word_embs):
p_emb = np.average(word_embs, axis=0).tolist()
else:
p_emb = np.zeros(200).tolist()
t_plylst_title_tag_emb[p_id] = p_emb
return t_plylst_title_tag_emb
def get_file_paths(method, vocab_size, model_postfix):
genre_file_path = 'res/genre_gn_all.json'
tokenize_input_file_path = f'model/tokenizer_input_{method}_{vocab_size}_{model_postfix}.txt'
if model_postfix == 'val':
default_file_path = 'res'
train_file_path = 'res/train.json'
question_file_path = 'res/val.json'
val_file_path = question_file_path
test_file_path = None
elif model_postfix == 'test':
default_file_path = 'res'
train_file_path = 'res/train.json'
question_file_path = 'res/test.json'
val_file_path = 'res/val.json'
test_file_path = question_file_path
elif model_postfix == 'local_val':
default_file_path = 'arena_data'
train_file_path = f'{default_file_path}/orig/train.json'
question_file_path = f'{default_file_path}/questions/val.json'
val_file_path = None
test_file_path = None
return train_file_path, question_file_path, val_file_path, test_file_path, genre_file_path, tokenize_input_file_path
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-mode', type=int, help="local_val: 0, val: 1, test: 2", default=2)
parser.add_argument('-vocab_size', type=int, help="vocabulary_size", default=24000)
args = parser.parse_args()
print(args)
# Resample Dataset Only
vocab_size = 13200
# Original Dataset
vocab_size = args.vocab_size
method = 'bpe'
if args.mode == 0:
default_file_path = 'arena_data'
model_postfix = 'local_val'
elif args.mode == 1:
default_file_path = 'res'
model_postfix = 'val'
elif args.mode == 2:
default_file_path = 'res'
model_postfix = 'test'
train_file_path, question_file_path, val_file_path, test_file_path, genre_file_path, tokenize_input_file_path = \
get_file_paths(method, vocab_size, model_postfix)
handler = Word2VecHandler(method, vocab_size, model_postfix)
handler.train_word2vec(train_file_path, val_file_path, test_file_path, genre_file_path, tokenize_input_file_path, model_postfix)
if model_postfix == 'local_val':
train = load_json(train_file_path)
question = load_json(question_file_path)
elif model_postfix == 'val':
train = load_json(train_file_path)
question = load_json(question_file_path)
elif model_postfix == 'test':
train = load_json(train_file_path)
val = load_json(val_file_path)
test = load_json(test_file_path)
train = train + val
question = test
plylst_title_tag_emb = handler.get_plylsts_embeddings(train, question, model_postfix)
np.save('{}/plylst_w2v_emb.npy'.format(default_file_path),
plylst_title_tag_emb)
print('Word2Vec Embedding Complete')
| [
"[email protected]"
] | |
b4158282f6e90ee810904eb5e6be6f5e5f95435d | 1fad121fea752aa3aee03f7665917ce9563e0d08 | /src/form/panel/VmdPanel.py | e75138d37ae45d096b8a52074f7f82a941f91b1f | [
"MIT"
] | permissive | JerryAJIAN/vmd_sizing | 0d382b9b94cdc3878e9d9a1c03f2c9c5f285ac6a | baad81eb40a21c9fa864344fbbf75cdab887c9c6 | refs/heads/master | 2022-11-18T03:57:57.111852 | 2020-07-06T15:10:27 | 2020-07-06T15:10:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,450 | py | # -*- coding: utf-8 -*-
#
import wx
import wx.lib.newevent
import sys
from form.panel.BasePanel import BasePanel
from form.parts.BaseFilePickerCtrl import BaseFilePickerCtrl
from form.parts.ConsoleCtrl import ConsoleCtrl
from form.worker.VmdWorkerThread import VmdWorkerThread
from module.MMath import MRect, MVector3D, MVector4D, MQuaternion, MMatrix4x4 # noqa
from utils import MFormUtils, MFileUtils # noqa
from utils.MLogger import MLogger # noqa
logger = MLogger(__name__)
# イベント定義
(VmdThreadEvent, EVT_VMD_THREAD) = wx.lib.newevent.NewEvent()
class VmdPanel(BasePanel):
def __init__(self, frame: wx.Frame, parent: wx.Notebook, tab_idx: int):
super().__init__(frame, parent, tab_idx)
self.convert_vmd_worker = None
self.description_txt = wx.StaticText(self, wx.ID_ANY, "指定されたCSVファイル(ボーン+モーフ or カメラ)を、VMDファイルとして出力します。\n" \
+ "モデルモーション(ボーン・モーフ)とカメラモーション(カメラ)は別々に出力できます。\n" \
+ "CSVのフォーマットは、CSVタブで出力したデータと同じものを定義してください。", wx.DefaultPosition, wx.DefaultSize, 0)
self.sizer.Add(self.description_txt, 0, wx.ALL, 5)
self.static_line = wx.StaticLine(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
self.sizer.Add(self.static_line, 0, wx.EXPAND | wx.ALL, 5)
# CSVファイルコントロール(ボーン)
self.bone_csv_file_ctrl = BaseFilePickerCtrl(frame, self, u"CSVファイル(ボーン)", u"CSVファイルを選択してください", ("csv"), wx.FLP_DEFAULT_STYLE, \
u"VMDに変換したいボーンモーションのファイルパスを指定してください。", \
is_aster=False, is_save=False, set_no=0, required=False)
self.sizer.Add(self.bone_csv_file_ctrl.sizer, 0, wx.EXPAND | wx.ALL, 0)
# CSVファイルコントロール(モーフ)
self.morph_csv_file_ctrl = BaseFilePickerCtrl(frame, self, u"CSVファイル(モーフ)", u"CSVファイルを選択してください", ("csv"), wx.FLP_DEFAULT_STYLE, \
u"VMDに変換したいモーフモーションのファイルパスを指定してください。", \
is_aster=False, is_save=False, set_no=0, required=False)
self.sizer.Add(self.morph_csv_file_ctrl.sizer, 0, wx.EXPAND | wx.ALL, 0)
self.static_line2 = wx.StaticLine(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
self.sizer.Add(self.static_line2, 0, wx.EXPAND | wx.ALL, 5)
# CSVファイルコントロール(カメラ)
self.camera_csv_file_ctrl = BaseFilePickerCtrl(frame, self, u"CSVファイル(カメラ)", u"CSVファイルを選択してください", ("csv"), wx.FLP_DEFAULT_STYLE, \
u"VMDに変換したいカメラモーションのファイルパスを指定してください。", \
is_aster=False, is_save=False, set_no=0, required=False)
self.sizer.Add(self.camera_csv_file_ctrl.sizer, 0, wx.EXPAND | wx.ALL, 0)
btn_sizer = wx.BoxSizer(wx.HORIZONTAL)
# VMD変換実行ボタン
self.vmd_btn_ctrl = wx.Button(self, wx.ID_ANY, u"VMD変換実行", wx.DefaultPosition, wx.Size(200, 50), 0)
self.vmd_btn_ctrl.SetToolTip(u"CSVをVMDに変換します。")
self.vmd_btn_ctrl.Bind(wx.EVT_BUTTON, self.on_convert_vmd)
btn_sizer.Add(self.vmd_btn_ctrl, 0, wx.ALL, 5)
self.sizer.Add(btn_sizer, 0, wx.ALIGN_CENTER | wx.SHAPED, 5)
# コンソール
self.console_ctrl = ConsoleCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size(-1, 420), \
wx.TE_MULTILINE | wx.TE_READONLY | wx.BORDER_NONE | wx.HSCROLL | wx.VSCROLL | wx.WANTS_CHARS)
self.console_ctrl.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DLIGHT))
self.console_ctrl.Bind(wx.EVT_CHAR, lambda event: MFormUtils.on_select_all(event, self.console_ctrl))
self.sizer.Add(self.console_ctrl, 1, wx.ALL | wx.EXPAND, 5)
# ゲージ
self.gauge_ctrl = wx.Gauge(self, wx.ID_ANY, 100, wx.DefaultPosition, wx.DefaultSize, wx.GA_HORIZONTAL)
self.gauge_ctrl.SetValue(0)
self.sizer.Add(self.gauge_ctrl, 0, wx.ALL | wx.EXPAND, 5)
self.fit()
# フレームに変換完了処理バインド
self.frame.Bind(EVT_VMD_THREAD, self.on_convert_vmd_result)
# フォーム無効化
def disable(self):
self.bone_csv_file_ctrl.disable()
self.morph_csv_file_ctrl.disable()
self.camera_csv_file_ctrl.disable()
self.vmd_btn_ctrl.Disable()
# フォーム無効化
def enable(self):
self.bone_csv_file_ctrl.enable()
self.morph_csv_file_ctrl.enable()
self.camera_csv_file_ctrl.enable()
self.vmd_btn_ctrl.Enable()
# VMD変換
def on_convert_vmd(self, event: wx.Event):
# フォーム無効化
self.disable()
# タブ固定
self.fix_tab()
# コンソールクリア
self.console_ctrl.Clear()
# 出力先をVMDパネルのコンソールに変更
sys.stdout = self.console_ctrl
wx.GetApp().Yield()
self.elapsed_time = 0
result = True
result = self.bone_csv_file_ctrl.is_valid() and result
if not result:
# 終了音
self.frame.sound_finish()
# タブ移動可
self.release_tab()
# フォーム有効化
self.enable()
# 出力先をデフォルトに戻す
sys.stdout = self.frame.file_panel_ctrl.console_ctrl
return result
# VMD変換開始
if self.convert_vmd_worker:
logger.error("まだ処理が実行中です。終了してから再度実行してください。", decoration=MLogger.DECORATION_BOX)
else:
# 別スレッドで実行
self.convert_vmd_worker = VmdWorkerThread(self.frame, VmdThreadEvent)
self.convert_vmd_worker.start()
return result
event.Skip()
# VMD変換完了処理
def on_convert_vmd_result(self, event: wx.Event):
self.elapsed_time = event.elapsed_time
# 終了音
self.frame.sound_finish()
# タブ移動可
self.release_tab()
# フォーム有効化
self.enable()
# ワーカー終了
self.convert_vmd_worker = None
# プログレス非表示
self.gauge_ctrl.SetValue(0)
if not event.result:
logger.error("VMD変換処理に失敗しました。", decoration=MLogger.DECORATION_BOX)
event.Skip()
return False
logger.info("VMD変換が完了しました", decoration=MLogger.DECORATION_BOX, title="OK")
# 出力先をデフォルトに戻す
sys.stdout = self.frame.file_panel_ctrl.console_ctrl
| [
"[email protected]"
] | |
8e2d2088f01a109bfc25a01cd8df43e2e61b6a55 | 0d4ea19eaab074f39bcfd07796244ed15b59d9d9 | /dino/folder.py | 2709ef84c8c8c3fb65c03ac641e82488ab21c077 | [
"MIT",
"Apache-2.0"
] | permissive | HimariO/ImgSim | b1af426d0922dd1bff2a6a6940a15129a581e3e0 | 9b99be7eac21bc2e86ea4c83b0ddd46e1b9b66d3 | refs/heads/main | 2023-08-13T12:11:32.756898 | 2021-10-14T09:19:21 | 2021-10-14T09:19:21 | 383,164,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,188 | py | import os
import glob
import math
from typing import *
import torch
import numpy as np
import pytorch_lightning as pl
from PIL import Image
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from . import utils
class ImageFolder(Dataset):
MEAN = [0.485,
0.456, 0.406]
STD = [0.229, 0.224, 0.225]
EIG_VALS = [0.2175, 0.0188, 0.0045]
EIG_VECS = np.array([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]
])
def __init__(self, root_or_imgs, transform=None, max_indice=None, slice=None) -> None:
super().__init__()
if isinstance(root_or_imgs, list):
self.img_list = root_or_imgs
else:
self.img_list = glob.glob(os.path.join(root_or_imgs, '*.png'))
self.img_list += glob.glob(os.path.join(root_or_imgs, '*.jpg'))
self.max_num = np.inf if max_indice is None else max_indice
if slice is not None:
n = len(self.img_list)
self.img_list = self.img_list[math.floor(slice[0] * n): math.floor(slice[1] * n)]
self.transforms = transform
def __len__(self) -> int:
return min(len(self.img_list), self.max_num)
def __getitem__(self, index) -> torch.Tensor:
if self.max_num is not None:
if self.max_num < list(self.img_list):
n = len(self.img_list)
mul = math.ceil(n / self.max_num)
index = (index * mul) % n
pil_img = Image.open(self.img_list[index]).convert("RGB")
if self.transforms:
img = self.transforms(pil_img)
else:
img = pil_img
# return {
# 'input': img,
# 'instance_target': index
# }
return img, index
class KpImageFolder(ImageFolder):
def __getitem__(self, index) -> Dict[str, torch.Tensor]:
if self.max_num is not None:
n = len(self.img_list)
mul = math.ceil(n / self.max_num)
index = (index * mul) % n
pil_img = Image.open(self.img_list[index]).convert("RGB")
if self.transforms:
datas = self.transforms(pil_img, index)
return datas
else:
raise RuntimeError('Need transforms to genreating keypoints')
class LitImgFolder(pl.LightningDataModule):
def __init__(self, root_or_imgs, transform, batch_size=32, num_worker=16,
split=0.01, step_per_epoch=100_000, shuffle=True):
super().__init__()
self.root_or_imgs = root_or_imgs
self.batch_size = batch_size
self.num_worker = num_worker
self.split = split
self.steps = step_per_epoch
self.transform = transform
self.shuffle = shuffle
assert self.batch_size % (self.transform.n_derive + 1) == 0, \
f"{self.batch_size} % {self.transform.n_derive}"
def train_dataloader(self) -> DataLoader:
slice_range = (0, 1 - self.split)
train_dataset = KpImageFolder(
self.root_or_imgs,
transform=self.transform,
slice=slice_range,
max_indice=self.steps)
train_loader = DataLoader(
train_dataset,
batch_size=self.batch_size // (self.transform.n_derive + 1),
num_workers=self.num_worker,
collate_fn=self.transform.collect,
worker_init_fn=utils.worker_init_fn,
shuffle=self.shuffle,
pin_memory=True)
return train_loader
def val_dataloader(self) -> DataLoader:
slice_range = (1 - self.split, 1)
val_dataset = KpImageFolder(
self.root_or_imgs,
transform=self.transform,
slice=slice_range,
max_indice=10000)
val_loader = DataLoader(
val_dataset,
batch_size=self.batch_size // (self.transform.n_derive + 1),
num_workers=self.num_worker,
collate_fn=self.transform.collect,
worker_init_fn=utils.worker_init_fn,
shuffle=self.shuffle,
pin_memory=True)
return val_loader
| [
"[email protected]"
] | |
93ea71308e6fd5d365bda5609b169c4f773ce234 | 2a3157ccb5376ffb03b13df4721afa405fbfc95d | /bin/virtualenv | 851dd46bb411994b649306face43a5dd104c9557 | [] | no_license | bopopescu/DemoDjango | 694501259322590d2959ef65cb6231ba1b1cf128 | b5ea252f0293ea63905a72045703b50815fbd673 | refs/heads/master | 2022-11-20T23:25:41.737807 | 2018-09-17T09:49:28 | 2018-09-17T09:49:28 | 282,543,262 | 0 | 0 | null | 2020-07-25T23:44:16 | 2020-07-25T23:44:16 | null | UTF-8 | Python | false | false | 241 | #!/home/jinesh/Documents/djangoproj/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from virtualenv import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
637a4d73b01376a8357c87c6dbc7abaa6c3643b7 | c0e41a88ff45bdb7bbdc8e4276427b276eec5d13 | /kyo/python/1_function/6_math.py | a143e59a07c796621f035b244c5de37240af8055 | [] | no_license | sunzhqiiang888/KingSun | 7440748825ed72c411f8ff63c478ad412efa51ac | 7473cc30d398f1ae96cbc7175198052d518fef94 | refs/heads/master | 2020-03-08T08:17:17.284210 | 2018-04-15T03:15:25 | 2018-04-15T03:15:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 779 | py | #!/usr/bin/env python3
def math(s):
stack = []
m = dict([x for x in ")( ][ }{ ><".split()])
for c in s:
if c in '([{<':
stack.append(c)
elif c in ')]}>':
try:
l = stack.pop()
except:
return "缺少左括号!"
if m[c] != l:
return "左右不匹配!"
# if (c == ')' and l != '('
# or c == ']' and l != '['
# or c == '}' and l != '{'
# or c == '>' and l != '<'):
# return "左右不匹配!"
if len(stack) != 0:
return "缺少右括号!"
return s
if __name__ == "__main__":
def main():
print(math(input("请输入: ")))
main()
| [
"[email protected]"
] | |
3c1e759a36ac17443a120a3600f59e2a5af68931 | fdc1b3845db0228519267786c25ef5dcd3b45618 | /sensor_tests/rn2483/ttn.py | a616b421419ff6b4999e6043b07d9f983f0d30a3 | [] | no_license | NaimaClaesson91/Examensarbete2019 | e3e3a57bbf6a039da2903a2be1d991ca285bf1be | f577aad7100572b97af689ca38231c5e608ff48a | refs/heads/master | 2020-09-18T18:28:24.836358 | 2019-12-17T15:01:09 | 2019-12-17T15:01:09 | 224,165,578 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,857 | py | class Ttn:
'''Module for connecting to The Things Network.'''
def __init__(self, modem):
self.modem = modem
def configure_frequency_plan(self):
'''Configure the EU863-870 frequency plan.'''
# Calculate the duty cycle value.
duty_cycle_pct = 1
channels = 8
duty_cycle_pct_per_channel = duty_cycle_pct / channels
duty_cycle = int((100 / duty_cycle_pct_per_channel) - 1)
frequencies = [868100000, 868300000, 868500000, 867100000,
867300000, 867500000, 867700000, 867900000]
for channel in range(channels):
self.modem.lorawan.set_ch_duty_cycle(channel, duty_cycle)
self.modem.lorawan.set_ch_frequency(channel, frequencies[channel])
if frequencies[channel] == 868300000:
# SF7BW125 to SF12BW125 and SF7BW250
self.modem.lorawan.set_ch_data_rate_range(channel, 0, 6)
else:
# SF7BW125 to SF12BW125
self.modem.lorawan.set_ch_data_rate_range(channel, 0, 5)
# Activate channel.
self.modem.lorawan.set_ch_status(channel, "on")
# Set the TX output power to 14 dBm.
self.modem.lorawan.set_output_power(1)
def join_abp(self, dev_address, nwk_session_key, app_session_key):
'''Join the LoRaWAN network using Activation By Personalisation.'''
frequency_band = 868
# Reset LoRaWAN settings, all previously set keys will be cleared.
self.modem.lorawan.reset(frequency_band)
# Set the needed credentials.
self.modem.lorawan.set_device_address(dev_address)
self.modem.lorawan.set_network_session_key(nwk_session_key)
self.modem.lorawan.set_app_session_key(app_session_key)
# Turn on Adaptive Data Rate (ADR).
self.modem.lorawan.set_adaptive_data_rate("on")
# Set the initial data rate.
# SF12BW125 = 0, SF11BW125 = 1, SF10BW125 = 2, SF9BW125 = 3,
# SF8BW125 = 4, SF7BW125 = 5, SF7BW250 = 6
self.modem.lorawan.set_data_rate(5)
# Configure the second receive window for SF9BW125.
self.modem.lorawan.set_second_rx_window(3, 869525000)
# Configure the synchronization word.
self.modem.lorawan.set_sync_word("34")
# Save LoRaWAN settings to the EEPROM.
#self.modem.lorawan.save()
# Attempt to join the network.
return self.modem.lorawan.join("abp")
def join_otaa(self, dev_eui, app_eui, app_key):
'''Join the LoRaWAN network using Over-The-Air-Activation.'''
frequency_band = 868
# Reset LoRaWAN settings, all previously set keys will be cleared.
self.modem.lorawan.reset(frequency_band)
# Set the needed credentials.
self.modem.lorawan.set_device_eui(dev_eui)
self.modem.lorawan.set_app_eui(app_eui)
self.modem.lorawan.set_app_key(app_key)
# Turn on Adaptive Data Rate (ADR).
self.modem.lorawan.set_adaptive_data_rate("on")
# Set the initial data rate.
# SF12BW125 = 0, SF11BW125 = 1, SF10BW125 = 2, SF9BW125 = 3,
# SF8BW125 = 4, SF7BW125 = 5, SF7BW250 = 6
self.modem.lorawan.set_data_rate(5)
# Configure the synchronization word.
self.modem.lorawan.set_sync_word("34")
# Save LoRaWAN settings to the EEPROM.
#self.modem.lorawan.save()
# Attemt to join the network.
return self.modem.lorawan.join("otaa")
def transmit_unconfirmed_packet(self, data):
'''Transmit a message without expected confirmation.'''
return self.modem.lorawan.transmit("uncnf", 1, data)
def transmit_confirmed_packet(self, data):
'''Transmit a message with expected confirmation.'''
return self.modem.lorawan.transmit("cnf", 1, data)
| [
"[email protected]"
] | |
597ed3021932f77385f3b2c410bdedc42f93e5e5 | 5658a581490c45ec45161174364e9aa1d143e7dc | /p1app/views.py | 905e2488d5254ee142ac54a1a5ce26bc42e8c235 | [] | no_license | subhamkumarmal/fontvideo | e40d4c72bfde5b8c22fbf8b3dec36c4a4abef5c5 | 185c3bb1f43a21f8def8d0ad5dff75949e838c23 | refs/heads/master | 2023-07-07T15:32:05.779397 | 2021-08-11T08:12:54 | 2021-08-11T08:12:54 | 394,914,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | from django.shortcuts import render
# Create your views here.
def Index(request):
return render(request,'p1app/index.html')
| [
"[email protected]"
] | |
f048eabd955bcc093164421606757595cab699a0 | ef15856f4e4cc789486ad2aa49225b48cd179c56 | /app/api/koko/crud.py | 1328187994286484879a2c3bceba626c474d9cf4 | [] | no_license | sbansa1/Koko | 7f6154c4f1ba47478a04585bd918ed51a57d781e | 2b24b2ac13c578c3fc983343e72f6dc91629737b | refs/heads/mast-1 | 2023-07-22T15:35:53.578284 | 2020-06-22T22:55:18 | 2020-06-22T22:55:18 | 273,943,071 | 0 | 0 | null | 2021-09-08T15:43:40 | 2020-06-21T16:30:21 | Python | UTF-8 | Python | false | false | 373 | py | from app.api.koko.model import Rental,Book,Customer
from app.extensions import db
def all_rental_by_customer(customer_id):
"""Gets all the books rented by the customer"""
all_books_query = Rental.query.join(Book,Rental.books)
all_books_query = all_books_query.filter(Rental.customer_id==customer_id).order_by(Rental.issued_on).all()
return all_books_query
| [
"[email protected]"
] | |
608e364f79ffd4650f6c6bb5d9a4bf570f427b91 | d563508e16932deb44c59c602834e2817d94e583 | /nn/extended_layers.py | 4afe8c3250172c10641888bc010f284cc13f5ab3 | [] | no_license | abiraja2004/summarization-5 | 6866c15cc42958088e53eb02926b9143d045276c | 0aed22b793937bbe17e1c0621da6023a86327238 | refs/heads/master | 2021-09-08T16:10:14.686644 | 2018-03-11T05:45:14 | 2018-03-11T05:45:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,839 | py | import numpy as np
import theano
import theano.tensor as T
from theano.ifelse import ifelse
from theano.sandbox.rng_mrg import MRG_RandomStreams
from nn.advanced import RCNN
from nn.basic import LSTM
from nn.initialization import create_shared, random_init, sigmoid
class ExtRCNN(RCNN):
def forward(self, x_t, mask_t, hc_tm1):
hc_t = super(ExtRCNN, self).forward(x_t, hc_tm1)
hc_t = mask_t * hc_t + (1 - mask_t) * hc_tm1
return hc_t
def forward_all(self, x, mask, h0=None, return_c=False):
if h0 is None:
if x.ndim > 1:
h0 = T.zeros((x.shape[1], self.n_out * (self.order + 1)), dtype=theano.config.floatX)
else:
h0 = T.zeros((self.n_out * (self.order + 1),), dtype=theano.config.floatX)
h, _ = theano.scan(
fn=self.forward,
sequences=[x, mask],
outputs_info=[h0]
)
if return_c:
return h
elif x.ndim > 1:
return h[:, :, self.n_out * self.order:]
else:
return h[:, self.n_out * self.order:]
def forward_all_2(self, x, mask, h0=None, return_c=False):
if h0 is None:
if x.ndim > 1:
h0 = T.zeros((x.shape[1], self.n_out * (self.order + 1)), dtype=theano.config.floatX)
else:
h0 = T.zeros((self.n_out * (self.order + 1),), dtype=theano.config.floatX)
h, _ = theano.scan(
fn=self.forward,
sequences=[x, mask],
outputs_info=[h0]
)
if return_c:
return h
elif x.ndim > 1:
return h[:, :, self.n_out * self.order:]
else:
return h[:, self.n_out * self.order:]
def copy_params(self, from_obj):
self.internal_layers = from_obj.internal_layers
self.bias = from_obj.bias
class ExtLSTM(LSTM):
def forward(self, x_t, mask_t, hc_tm1):
hc_t = super(ExtLSTM, self).forward(x_t, hc_tm1)
hc_t = mask_t * hc_t + (1 - mask_t) * hc_tm1
return hc_t
def forward_all(self, x, mask, h0=None, return_c=False):
if h0 is None:
if x.ndim > 1:
h0 = T.zeros((x.shape[1], self.n_out), dtype=theano.config.floatX)
else:
h0 = T.zeros((self.n_out * (self.order + 1),), dtype=theano.config.floatX)
h, _ = theano.scan(
fn=self.forward,
sequences=[x, mask],
outputs_info=[h0]
)
if return_c:
return h
elif x.ndim > 1:
return h[:, :, self.n_out:]
else:
return h[:, self.n_out * self.order:]
def forward_all_2(self, x, mask, h0=None, return_c=False):
if h0 is None:
if x.ndim > 1:
h0 = T.zeros((x.shape[1], self.n_out), dtype=theano.config.floatX)
else:
h0 = T.zeros((self.n_out * (self.order + 1),), dtype=theano.config.floatX)
h, _ = theano.scan(
fn=self.forward,
sequences=[x, mask],
outputs_info=[h0]
)
return h
if return_c:
return h
elif x.ndim > 1:
return h[:, :, self.n_out:]
else:
return h[:, self.n_out * self.order:]
def copy_params(self, from_obj):
self.internal_layers = from_obj.internal_layers
class HLLSTM(LSTM):
def forward(self, x_t, mask_t, hc_tm1):
hc_t = super(HLLSTM, self).forward(x_t, hc_tm1)
hc_t = mask_t * hc_t + (1 - mask_t) * hc_tm1
return hc_t
def forward_all(self, x, mask, h0=None, return_c=False):
if h0 is None:
if x.ndim > 1:
h0 = T.zeros((x.shape[1], self.n_out*2), dtype=theano.config.floatX)
else:
h0 = T.zeros((self.n_out *2,), dtype=theano.config.floatX)
h, _ = theano.scan(
fn=self.forward,
sequences=[x, mask],
outputs_info=[h0]
)
if return_c:
return h[-1, :]
elif x.ndim > 1:
return h[-1 :, :, self.n_out:]
else:
return h[-1 :, self.n_out:]
def forward_all_x(self, x, mask, h0=None, return_c=False):
if h0 is None:
if x.ndim > 1:
h0 = T.zeros((x.shape[1], self.n_out*2), dtype=theano.config.floatX)
else:
h0 = T.zeros((self.n_out *2,), dtype=theano.config.floatX)
h, _ = theano.scan(
fn=self.forward,
sequences=[x, mask],
outputs_info=[h0]
)
if return_c:
return h
elif x.ndim > 1:
return h[:, :, self.n_out:]
else:
return h[ :, self.n_out:]
def copy_params(self, from_obj):
self.internal_layers = from_obj.internal_layers
class LossComponent(object):
def __init__(self, h_final_y):
self.h_final_y = h_final_y
def inner_argmin(self, gs, sys, min_prev):
return ifelse(T.lt(min_prev, sys - gs), min_prev, sys - gs)
def outer_sum(self, h_final, s):
min_prev = T.zeros(h_final.shape, dtype=theano.config.floatX)
return s + theano.scan(fn=self.inner_argmin, sequences=[self.h_final_y], non_sequences=[h_final, min_prev])
def get_loss(self, h_final):
sum_set = T.zeros(h_final.shape, dtype=theano.config.floatX)
(s, _) = theano.scan(fn=self.outer_sum, sequences=[h_final], non_sequences=[sum_set])
return s
class ZLayer(object):
def __init__(self, n_in, n_hidden, activation, layer, test=False):
self.n_in, self.n_hidden, self.activation, self.layer, self.test = \
n_in, n_hidden, activation, layer, test
self.MRG_rng = MRG_RandomStreams()
self.create_parameters()
def create_parameters(self):
n_in, n_hidden = self.n_in, self.n_hidden
activation = self.activation
self.w1 = create_shared(random_init((n_in,)), name="w1")
self.w2 = create_shared(random_init((n_hidden,)), name="w2")
bias_val = random_init((1,))[0]
self.bias = theano.shared(np.cast[theano.config.floatX](bias_val))
if self.layer == 'lstm':
rlayer = LSTM((n_in + 1), n_hidden, activation=activation)
else:
rlayer = RCNN((n_in + 1), n_hidden, activation=activation, order=2)
self.rlayer = rlayer
self.layers = [rlayer]
def forward(self, x_t, z_t, h_tm1, pz_tm1):
pz_t = sigmoid(
T.dot(x_t, self.w1) +
T.dot(h_tm1[:, -self.n_hidden:], self.w2) +
self.bias
)
xz_t = T.concatenate([x_t, z_t.reshape((-1, 1))], axis=1)
h_t = self.rlayer.forward(xz_t, h_tm1)
# batch
return h_t, pz_t
def forward_all(self, x, z):
assert x.ndim == 3
assert z.ndim == 2
xz = T.concatenate([x, z.dimshuffle((0, 1, "x"))], axis=2)
h0 = T.zeros((1, x.shape[1], self.n_hidden), dtype=theano.config.floatX)
h = self.rlayer.forward_all(xz)
h_prev = T.concatenate([h0, h[:-1]], axis=0)
assert h.ndim == 3
assert h_prev.ndim == 3
pz = sigmoid(
T.dot(x, self.w1) +
T.dot(h_prev, self.w2) +
self.bias
)
assert pz.ndim == 2
return pz
def sample(self, x_t, z_tm1, h_tm1):
pz_t = sigmoid(
T.dot(x_t, self.w1) +
T.dot(h_tm1[:, -self.n_hidden:], self.w2) +
self.bias
)
# batch
pz_t = pz_t.ravel()
z_t = T.cast(self.MRG_rng.binomial(size=pz_t.shape,
p=pz_t), theano.config.floatX)
xz_t = T.concatenate([x_t, z_t.reshape((-1, 1))], axis=1)
h_t = self.rlayer.forward(xz_t, h_tm1)
return z_t, h_t
def sample_pretrain(self, x_t, z_tm1, h_tm1):
pz_t = sigmoid(
T.dot(x_t, self.w1) +
T.dot(h_tm1[:, -self.n_hidden:], self.w2) +
self.bias
)
# batch
pz_t = pz_t.ravel()
z_t = T.cast(T.round(pz_t, mode='half_away_from_zero'), theano.config.floatX)
xz_t = T.concatenate([x_t, z_t.reshape((-1, 1))], axis=1)
h_t = self.rlayer.forward(xz_t, h_tm1)
return z_t, h_t
def sample_all(self, x):
if self.layer == 'lstm':
h0 = T.zeros((x.shape[1], self.n_hidden), dtype=theano.config.floatX)
else:
h0 = T.zeros((x.shape[1], self.n_hidden * (self.rlayer.order + 1)), dtype=theano.config.floatX)
z0 = T.zeros((x.shape[1],), dtype=theano.config.floatX)
([z, h], updates) = theano.scan(fn=self.sample, sequences=[x], outputs_info=[z0, h0])
assert z.ndim == 2
return z, updates
def sample_all_pretrain(self, x):
if self.layer == 'lstm':
h0 = T.zeros((x.shape[1], self.n_hidden), dtype=theano.config.floatX)
else:
h0 = T.zeros((x.shape[1], self.n_hidden * (self.rlayer.order + 1)), dtype=theano.config.floatX)
z0 = T.zeros((x.shape[1],), dtype=theano.config.floatX)
([z, h], updates) = theano.scan(fn=self.sample_pretrain, sequences=[x], outputs_info=[z0, h0])
assert z.ndim == 2
return z, updates
@property
def params(self):
return [x for layer in self.layers for x in layer.params] + \
[self.w1, self.w2, self.bias]
@params.setter
def params(self, param_list):
start = 0
for layer in self.layers:
end = start + len(layer.params)
layer.params = param_list[start:end]
start = end
self.w1.set_value(param_list[-3].get_value())
self.w2.set_value(param_list[-2].get_value())
self.bias.set_value(param_list[-1].get_value())
| [
"[email protected]"
] | |
099b882a7057d5cd24e0b98ae5aa752f70f5f128 | 30a8b69bd2e0a3f3c2c1c88fb3bd8a28e6fc4cd0 | /Part1/load_shapefile.py | 5f31c2a37d32ed6c638bb6a4a1c85920628b25c3 | [] | no_license | llord1/Mining-Georeferenced-Data | d49108f443922f02b90431ad7a9626ea17fd0554 | c71f2e151ccfc4a1a9c07b5fcf4e95b7f7ba70e9 | refs/heads/master | 2021-05-30T13:27:57.663015 | 2015-12-29T09:10:08 | 2015-12-29T09:10:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | #!/usr/bin/env python
import sys
import shapefile
from shapely.geometry import shape
shp = shapefile.Reader(sys.argv[1])
print "Found", shp.numRecords, "records:"
pos = None
count = 0
for record in shp.records():
print " ", record[1]
if record[1] == sys.argv[2]:
pos = count
count += 1
if pos is None:
print >> sys.stderr, sys.argv[2], "not found in shapefile"
sys.exit()
print >> sys.stderr, "Using", sys.argv[2], "..."
manhattan = shape(shp.shapes()[pos])
print manhattan.contains(manhattan.centroid)
| [
"[email protected]"
] | |
dca2f3644310a1e7c67b6ae89b9eb9ea3a0c23db | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/b4037f9e2f47429f9d3e6ac8ed0fa8bf.py | 1a70ecf442bfba0ffa267c895ed7411ce53dcf4a | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 515 | py | class Bob:
def hey(self, ask):
conversation = Identify(ask)
if conversation.question():
return "Sure."
elif conversation.yell():
return "Woah, chill out!"
elif conversation.anything():
return "Fine. Be that way!"
else:
return "Whatever."
class Identify:
def __init__(self, ask):
self.ask = ask or ""
def question(self):
return self.ask.endswith("?")
def yell(self):
return self.ask == self.ask.upper()
def anything(self):
return self.ask.replace(" ","") == self.ask.split()
| [
"[email protected]"
] | |
d58966ee9ea4c514981564727e37c53a3d07c319 | 4790769cef64b0fa8c96ebe2b2a6220ac8c8e671 | /mmdetection/configs/icartoonface/fr50_lite_dcn_gn_scratch_icf_wf.py | 3532127f97418b90d32772f713a3ea148846ca4a | [
"Apache-2.0"
] | permissive | keshawnhsieh/icartoonface_v2 | 72341b3745c254e549376229689bf8fed8706a26 | eca1db41f7f067e2ca8dd5a5e791016ff0350ace | refs/heads/master | 2022-11-30T15:00:12.284543 | 2020-08-17T15:59:49 | 2020-08-17T15:59:49 | 272,957,476 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | _base_ = './fr50_lite_gn_scratch_icf_wf.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True))) | [
"[email protected]"
] | |
cb67d2165007b907e26b0adbbebfc239ddfcc091 | 33589e038d48d097ce6da36fa0d9178736e8daa2 | /default_cards/BaseStrike.py | 47a69fc966cfe58d493063f1ce79d52c8183c95f | [] | no_license | Aleexm/BCObot | bd62f9e6c2eb34861a526ecc2a2ee662088c4996 | d54a60001e0231d673dd25b6fc9fa373db1daaf9 | refs/heads/master | 2020-06-22T06:25:40.154230 | 2020-04-27T01:16:03 | 2020-04-27T01:16:03 | 197,657,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | import sys
sys.path.append("..")
from Card import Card
from Option import Option
class BaseStrike(Card):
def __init__(self):
super().__init__(name="Strike", type=Card.Type.base, attack=4, priority=3, defense=5, min_range=1)
def __repr__(self):
return super().__repr__()
| [
"[email protected]"
] | |
29c2b6fed2ce4af8db9eda6d8d3a6890a05f7aaa | 89a9d31cd37ffd95a4d2b942b8e183eea5f499d2 | /envs/cartpole-envs/cartpole_envs/envs/CartPoleSwingUpEnvCm05Pm07Pl07.py | 08d480f1b53e6d28e6bec199b1520fac89193ac0 | [] | no_license | mxu34/mbrl-gpmm | 9ba12364c1917972b917832e6ab3a3f381f4d2bf | 0cd89919cb45718aa29f902bc4d2cdeed17d4dc9 | refs/heads/master | 2022-11-06T14:44:22.031728 | 2020-06-23T01:21:08 | 2020-06-23T01:21:08 | 273,567,557 | 25 | 1 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | from .cartpole_swingup_temp import *
class CartPoleSwingUpEnvCm05Pm07Pl07(CartPoleSwingUpEnv_template):
def __init__(self):
super().__init__( masscart =0.5, masspole=0.7, polelength=0.7) | [
"[email protected]"
] | |
ce80b79b3d6c15b24c5624275137c3633ce47a76 | f6c3fa5d5ceaf8bb9a9ab52fc64d121ee0e92a51 | /E4.py | 234aaa54ca6039f45cfdecdfd7852797c83d5ba1 | [] | no_license | AdrianMartinezCodes/PythonScripts | 15cef21f6800207b54ba73763c86fcd96bdcace7 | 8009e78ac2914c96c001d70a4611d60ac91a6a47 | refs/heads/master | 2023-03-30T21:17:13.219192 | 2021-04-07T06:15:38 | 2021-04-07T06:15:38 | 355,431,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | num = int(input("Please enter a number to divide: "))
diviser_list = []
for i in range(1,num):
if num%i == 0:
diviser_list.append(i)
print(diviser_list)
| [
"[email protected]"
] | |
fc668b0f4beb102abcf466f2f54e0323dd94b77f | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /k9usvZ8wfty4HwqX2_2.py | 6df3da8982061b94fd50d4d07581a39b1c4e148e | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,045 | py | """
Create a function to check whether a given number is **Cuban Prime**. A cuban
prime is a prime number that is a solution to one of two different specific
equations involving third powers of x and y. For this challenge we are only
concerned with the cuban numbers from the **first equation**. We **ignore**
the cuban numbers from the **second equation**.
### Equation Form
p = (x^3 - y^3)/(x - y), x = y + 1, y > 0
... and the first few cuban primes from this equation are 7, 19, 37, 61, 127,
271.
### Examples
cuban_prime(7) ➞ "7 is cuban prime"
cuban_prime(9) ➞ "9 is not cuban prime"
cuban_prime(331) ➞ "331 is cuban prime"
cuban_prime(40) ➞ "40 is not cuban prime"
### Notes
* The inputs are positive integers only.
* Check the **Resources** for help.
"""
is_prime=lambda p:p>1and all(p%i for i in range(2,int(p**0.5+1)))
def cuban_prime(n):
for y in range(n):
if n==3*y**2+3*y+1 and is_prime(n):return str(n)+' is cuban prime'
return str(n)+' is not cuban prime'
| [
"[email protected]"
] | |
4cc349ba956cce3ae174f5c5f3c10fc651c8934b | f875c94f596edc4771ee5e526f0d576ddc195de8 | /blogapi/apiview.py | 098d47b90c5751f4aabe1ebdee78a688c2708bd7 | [] | no_license | OLAOSEBIKAN/reactNativeDjangoBlogApi | 6d673be18ae1e85fdd01eaad8938067cd77fae76 | b56c45eac49df7fd5fe5ae1e85bfefb47c62923d | refs/heads/main | 2023-02-10T13:09:34.929886 | 2021-01-08T17:04:44 | 2021-01-08T17:04:44 | 327,963,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,341 | py | from django.contrib.auth import authenticate
from rest_framework import generics, status
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from .models import Post
from .serializers import RegisterSerializer, PostSerializer
class RegisterUser(generics.CreateAPIView):
authentication_classes = ()
permission_classes = ()
serializer_class = RegisterSerializer
class LoginView(APIView):
permission_classes = ()
def post(self, request, ):
username = request.data.get("username")
password = request.data.get("password")
user = authenticate(username=username, password=password)
if user is not None:
try:
return Response({"token": user.auth_token.key})
except ValueError:
return Response({"error": "Unable To Login"},
status=status.HTTP_417_EXPECTATION_FAILED)
else:
return Response({"error": "Wrong Credentials"}, status=status.HTTP_400_BAD_REQUEST)
'''if user is not None:
try:
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
payload = jwt_payload_handler(user)
token = jwt_encode_handler(payload)
userprofile = UserProfile.objects.get(email=email)
user_token = Token()
user_token.user = userprofile
user_token.token = token
user_token.save()
return Response({"token": token})
except Error:
return Response({"error": "Unable "},
status=status.HTTP_417_EXPECTATION_FAILED)
else:
return Response({"error": "Unable to login with the provided Credentials"},
status=status.HTTP_400_BAD_REQUEST)'''
class PostList(generics.ListCreateAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
permission_classes = (IsAuthenticated,)
class PostDetail(generics.RetrieveDestroyAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
permission_classes = (IsAuthenticated,)
| [
"[email protected]"
] | |
dbcdb025a6cf7fbdf313e3dd3a1286e17ad37472 | 7d7462ca9b94934c71886a1fcf09ea7f16ca94b8 | /python/python_assignment_programs3/pro7.py | dca2fded3d4fbf8023a742259dfcbe5887f12670 | [] | no_license | karthik1017/mca | 16de1c0b87e5d5617de61dc08a5d61d6c7cf8ec7 | b2182c2bf62151150304aad23d476ee956dbf605 | refs/heads/master | 2020-04-22T20:10:53.308901 | 2019-08-14T07:22:30 | 2019-08-14T07:22:30 | 170,632,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | ch=65
for i in range(0,5):
for x in range(1,i+2):
print(chr(ch), end="")
print()
ch+=1 | [
"[email protected]"
] | |
fc214f4aa80f4a5e84e0ec03c061e769d17ae787 | 5c9d0f7a0b74613dc633004dcaa000f36cdd6096 | /tests/functional/utils.py | b6e91ac64161740c0d15445020193287f35103df | [
"Apache-2.0"
] | permissive | mayani-nv/model_navigator | 05291ed5f2fea7fd286da38f231cf3e391d2f82a | 925255bbeb9be7ac6f35407267e87a29a33087ab | refs/heads/main | 2023-07-17T02:24:13.432380 | 2021-08-17T15:24:15 | 2021-08-17T15:24:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,892 | py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import logging
import os
import sys
from pathlib import Path
from typing import Any, Dict, List
LOGGER = logging.getLogger(__name__)
def load_file(file, label, target):
if not os.path.isfile(file):
raise ValueError(f"Provided file {file} for {target} does not exists")
spec = importlib.util.spec_from_file_location(name=label, location=file)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module) # pytype: disable=attribute-error
return getattr(module, target)
def download_model_and_run_script(config_path: Path, script_with_args: List[str], downloader_path: str):
import sh
config = load_config(config_path)
src_model_path = Path(config["model_path"])
_download_model(config_path=config_path, output_model_path=src_model_path, downloader_path=downloader_path)
cmd_name, *args = script_with_args
LOGGER.debug(f"Run {cmd_name} {' '.join(args)}")
cmd = sh.Command(cmd_name)
cmd(args, _in=sys.stdin, _out=sys.stdout, _err=sys.stderr)
def expand_model_path(config):
# expand model_path with environment variables
src_model_path = Path(os.path.expandvars(config["model_path"]))
if "$" in src_model_path.as_posix():
raise ValueError(f"Could not expend vars for {src_model_path}")
# update and save model-navigator config file
config["model_path"] = src_model_path.as_posix()
LOGGER.debug(f"Expanded model_path: {src_model_path}")
return config
def save_config(config: Dict[str, Any], updated_config_path: Path):
import yaml
updated_config_path.parent.mkdir(parents=True, exist_ok=True)
with updated_config_path.open("w") as config_file:
yaml.dump(config, config_file)
def load_config(config_path):
import yaml
with config_path.open("r") as config_file:
return yaml.safe_load(config_file)
def _download_model(config_path: Path, output_model_path: Path, downloader_path: str):
config = load_config(config_path)
downloader_cls = load_file(downloader_path, "downloader_cls", "downloader_cls")
downloader = downloader_cls(config)
LOGGER.debug(f"Using downloader {downloader_cls}")
workdir = Path(__file__).parent.parent.parent.resolve()
from model_navigator.utils.docker import DockerImage
docker_image = DockerImage(f"{downloader.framework.image}:{config['container_version']}-{downloader.framework.tag}")
download_model_path = "tests/functional/download_model.py"
dataloader_kwargs = config["model_downloader_kwargs"]
docker_container = None
try:
docker_container = docker_image.run_container(
workdir_path=workdir,
mount_as_volumes=[
output_model_path.parent.resolve(),
config_path.parent.resolve(),
Path(__file__).parent.resolve(),
workdir,
*(Path(p) for p in dataloader_kwargs.get("mounts", [])),
*(Path(p) for p in downloader.mounts),
],
environment={"PYTHONPATH": workdir.as_posix(), **dataloader_kwargs.get("envs", {}), **downloader.envs},
)
docker_container.run_cmd(
(
"bash -c '"
f"python {download_model_path} -vvvv "
f"--downloader-path {downloader_path} "
f"--config-path {config_path.resolve()} "
f"--output-path {output_model_path.resolve()}"
"'"
),
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stdout,
)
LOGGER.debug("Finished download_model.py script")
finally:
if docker_container:
docker_container.kill()
def resolve_config_paths(config_paths):
CONFIG_SUFFIX = ".yaml"
resolved_config_paths = []
for entry in config_paths:
entry = Path(entry)
if "*" in entry.name:
resolved_config_paths.extend(list(entry.parent.glob(entry.name)))
elif entry.is_dir():
resolved_config_paths.extend(list(entry.glob("*")))
else:
resolved_config_paths.append(entry)
resolved_config_paths = [p for p in resolved_config_paths if p.suffix == CONFIG_SUFFIX]
return sorted(set(resolved_config_paths))
| [
"[email protected]"
] | |
4a896a192b264dbd2592b138999899b9d708d0f9 | 97b5505e6ebf0c2045b091cd7855f7b51e02185d | /1.py | 415ff7aaa3da212f9980231b243020bfdc1b0ade | [] | no_license | lala1919/python | 7e2c25ee92e2149a8302041385f163088558928e | bb2ca17ea942c76e9ac887a10673dd4d0370df52 | refs/heads/master | 2023-01-31T19:01:10.145026 | 2020-12-13T02:18:48 | 2020-12-13T02:18:48 | 316,654,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,968 | py | # [변수와 데이터타입]
# 주석(comment) : # 입력 또는 ctrl + / 로 표현한다. 메모같은 것
# 변수(variable) : 변하는 수
id = "pencil"
print(id)
print("뭐로 쓸까요? - ",id,"으로 쓰죠.")
# 숫자 변수 : ""를 입력하지 않으며, 숫자. 연산가능. 데이터타입이 숫자인 변수.
num = 10
plus = 1
minus = 2
multiply = 3
divide = 5
print(num + plus)
print(num - minus)
print(num * multiply) # 30
print(num / divide) # 2
# int 정수 : 1, 2, 3, 4
# float 실수 : 1.0, 1.1 ...
num2 = 1.0
print(num2 + 2)
a = 5 # int
b = 1.1 # float
c = 'hello' # string
print(c/b) # error
a = a + 2
a = a + 3
a = ((a-5)*3/5) + 2
print(a)
print(c,a+b)
# 변수 맞바꾸기
a=None
b=None
a=10
b=20
temp =a
a=b
b=temp
print(a,b)
c=None
d=None
c=1
d=2
c,d=d,c
print(c,d)
# 문자열 변수 : ""안에 입력하며, 문자. 연산불가
a = 'hello' # character
b = "world!" # string
c = "안녕하세요, 저는 'D.Yang'입니다."
d = """
안녕하세요, 저는 "D.Yang"입니다. """ # long string
e = """
안녕하세요,
저는 "D.Yang"입니다.
긴 글을 작성할 때도 이런식으로 스트링 지정을
해줄 수 있습니다.
줄 바꿈이 적용이 되니깐요.
"""
print(a,b,c,d,e)
# 논리 변수 boolean
a = 0
b = 2
c = bool(a < b)
print(c)
# null값(비어있는 값) = false, 그 외에는 전부 true
a = bool(a) # false
b = bool("") # false
c = bool([]) # false
d = bool(None) # false
print(a,b,c,d)
# 배열 변수 list > dict > tuple
# list 리스트 a = []
# dict 딕셔너리 a ={}
# tuple 튜플 a = ()
a = [1, 2, 3]
b = {1, 2, 3}
c = (1, 2, 3)
print(a)
print(b)
print(c)
b2 = {"no": 153, "class": "vip", "username": "yangdonghwa"}
print(b2)
print(b2['username'])
# 총정리! : 변수의 데이터타입
# int 정수 1
# float 실수 0.1, 1.0
# string 문자열 "", ''
# boolean 참, 거짓 true, flase
# list 리스트 a = []
# dict 딕셔너리 a ={}
# tuple 튜플 a = ()
| [
"[email protected]"
] | |
8baf39710e255504a040b81bb6999e6e90b09408 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnn998.py | 6856125ce675d837a7794fd06121ad13f12b0ccf | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 120 | py | ii = [('UnitAI.py', 2), ('WadeJEB.py', 1), ('MereHHB3.py', 4), ('StorJCC.py', 2), ('SomeMMH.py', 2), ('MereHHB2.py', 1)] | [
"[email protected]"
] | |
d72a22dd378cd6a2c077576c25f292ec76cd2435 | e727251c8ad860aad795357889bf0ceed61c53ed | /MsvcScript.py | 5de4b64b0b0846a1b4a0e1e8b61dc03ebd6779ef | [] | no_license | Trietptm-on-Coding-Algorithms/idascripts | 1a06ac4ca979fe7f5ccbf1da8fba46fb5197b33c | 385a44b1a1a114c420224cfa638b29705ae9a103 | refs/heads/master | 2020-03-21T17:57:32.136033 | 2014-06-25T21:58:20 | 2014-06-25T21:58:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,099 | py | import idaapi
import Msvc.Rtti
Msvc.Rtti = reload(Msvc.Rtti)
print "*" * 40
print "dev_zzo's Uber MSVC Script!"
print "*" * 40
def rtti_scan() :
Msvc.Rtti.scanRtti()
def rtti_td(ea) :
try :
print Msvc.Rtti.TypeDescriptor(ea)
except Exception, e:
print str(e)
def hk_rtti_td() :
rtti_td(idaapi.get_screen_ea())
def rtti_bcd(ea) :
try :
print Msvc.Rtti.BaseClassDescriptor(ea)
except Exception, e:
print str(e)
def hk_rtti_bcd() :
rtti_bcd(idaapi.get_screen_ea())
def rtti_chd(ea) :
try :
print Msvc.Rtti.ClassHierarchyDescriptor(ea)
except Exception, e:
print str(e)
def hk_rtti_chd() :
rtti_chd(idaapi.get_screen_ea())
def rtti_col(ea) :
try :
print Msvc.Rtti.CompleteObjectLocator(ea)
except Exception, e:
print str(e)
def hk_rtti_col() :
rtti_col(idaapi.get_screen_ea())
def reg_hotkey(hotkey, func) :
hk_ctx = idaapi.add_hotkey(hotkey, func)
if hk_ctx is None:
print("Failed to register hotkey `%s'!" % hotkey)
reg_hotkey('Shift-Ctrl-Alt-T', hk_rtti_td)
reg_hotkey("Shift-Ctrl-Alt-B", hk_rtti_bcd)
reg_hotkey("Shift-Ctrl-Alt-H", hk_rtti_chd)
reg_hotkey("Shift-Ctrl-Alt-L", hk_rtti_col)
print "Call msvc_help() for help."
def msvc_help() :
print """ Welcome!
Useful functions:
rtti_scan()
Scan for RTTI information; mark up structures and VFTables.
This *will* take some time.
rtti_td(ea)
Parse the TypeDescriptor structure at the given ea.
rtti_bcd(ea)
Parse the BaseClassDescriptor structure at the given ea.
rtti_chd(ea)
Parse the ClassHierarchyDescriptor structure at the given ea.
rtti_col(ea)
Parse the CompleteObjectLocator structure at the given ea.
Useful key combos:
Shift-Ctrl-Alt-T
Parse the TypeDescriptor structure at the cursor position.
Shift-Ctrl-Alt-B
Parse the BaseClassDescriptor structure at the cursor position.
Shift-Ctrl-Alt-H
Parse the ClassHierarchyDescriptor structure at the cursor position.
Shift-Ctrl-Alt-L
Parse the CompleteObjectLocator structure at the cursor position.
"""
| [
"[email protected]"
] | |
3ca771e19dc6b23d14b4a8164764a44e5830a529 | 03195a6f98396fd27aedc3c06d81f1553fb1d16b | /pandas/core/_numba/executor.py | 0b59d0717a476b949054b145952a0c044d5e15b9 | [
"BSD-3-Clause"
] | permissive | huaxz1986/pandas | a08d80d27726fe141d449835b9a09265bca5b5e0 | ba2473834fedcf571d3f8245b4b24796873f2736 | refs/heads/master | 2023-06-11T02:20:14.544220 | 2022-01-12T04:40:06 | 2022-01-12T04:40:06 | 131,370,494 | 3 | 4 | BSD-3-Clause | 2018-04-28T03:51:05 | 2018-04-28T03:51:05 | null | UTF-8 | Python | false | false | 1,726 | py | from __future__ import annotations
from typing import (
TYPE_CHECKING,
Callable,
)
import numpy as np
from pandas._typing import Scalar
from pandas.compat._optional import import_optional_dependency
from pandas.core.util.numba_ import (
NUMBA_FUNC_CACHE,
get_jit_arguments,
)
def generate_shared_aggregator(
func: Callable[..., Scalar],
engine_kwargs: dict[str, bool] | None,
cache_key_str: str,
):
"""
Generate a Numba function that loops over the columns 2D object and applies
a 1D numba kernel over each column.
Parameters
----------
func : function
aggregation function to be applied to each column
engine_kwargs : dict
dictionary of arguments to be passed into numba.jit
cache_key_str: str
string to access the compiled function of the form
<caller_type>_<aggregation_type> e.g. rolling_mean, groupby_mean
Returns
-------
Numba function
"""
nopython, nogil, parallel = get_jit_arguments(engine_kwargs, None)
cache_key = (func, cache_key_str)
if cache_key in NUMBA_FUNC_CACHE:
return NUMBA_FUNC_CACHE[cache_key]
if TYPE_CHECKING:
import numba
else:
numba = import_optional_dependency("numba")
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
def column_looper(
values: np.ndarray,
start: np.ndarray,
end: np.ndarray,
min_periods: int,
*args,
):
result = np.empty((len(start), values.shape[1]), dtype=np.float64)
for i in numba.prange(values.shape[1]):
result[:, i] = func(values[:, i], start, end, min_periods, *args)
return result
return column_looper
| [
"[email protected]"
] | |
99f8c1a49641c470c778fea08467ebaf332d4693 | 8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a | /graph__networkx__d3__dot_graphviz/graphviz__examples/generate__as__bytes.py | cad13511e6c2200cf6958416c256790986119d81 | [
"CC-BY-4.0"
] | permissive | stepik/SimplePyScripts | 01092eb1b2c1c33756427abb2debbd0c0abf533f | 3259d88cb58b650549080d6f63b15910ae7e4779 | refs/heads/master | 2023-05-15T17:35:55.743164 | 2021-06-11T22:59:07 | 2021-06-11T22:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# pip install graphviz
from graphviz import Digraph
g = Digraph('G', format='svg')
g.edge('Hello', 'World')
# Get bytes
print(g.pipe())
print(g.pipe('png'))
# OR:
# g.format = 'png'
# print(g.pipe())
print(g.pipe('pdf'))
| [
"[email protected]"
] | |
86b66c96c2d6d9dd783f304f8ff52968a5670b04 | 0c4419e1822d2d9acebd18f688d4babf6780a239 | /runner_master/runner/utils/__init__.py | 42be164d10fb8587014ee1e7d7d1370a1871c631 | [
"MIT"
] | permissive | bigvideoresearch/SCC | a852d2200c47f6f628fc63d836f323d23035e446 | f26cdb6aaf248b5112812dbdac1f1b5086aebccc | refs/heads/master | 2023-04-11T10:29:55.529491 | 2021-04-19T02:24:00 | 2021-04-19T02:24:00 | 359,295,295 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | # from .tensorboard import *
from .rank0_tensor_cache import *
from .misc import *
| [
"[email protected]"
] | |
d62a1e6ee7429d4badfe985780e74c2507865591 | 5eb02446f374a9c73f54a859668f46e80f4ef2fe | /ex35.py | f3e8b1edbe06119982737eb64ad94555c1bee7e2 | [] | no_license | Unique-Red/HardwaySeries | 1b93429da48b1b2c0c3b5a6de8e8f5c635cd959a | 3166a864c28e1ea49dce3f427f5f63429b4e2f53 | refs/heads/main | 2023-06-21T14:55:55.116124 | 2021-07-27T01:26:02 | 2021-07-27T01:26:02 | 370,170,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,047 | py | from sys import exit
def gold_room():
print("This room is full of gold. How much do you take?")
choice = input("> ")
if "0" in choice or "1" in choice:
how_much = int(choice)
else:
dead("Man, learn to type a number.")
if how_much < 50:
print("Nice, you're not greedy, you win!")
exit(0)
else:
dead("You greedy bastard!")
def bear_room():
print("There is a bear here.")
print("The bear has a bunch of honey.")
print("The fat bear is in front of another door.")
print("How are you going to move the bear?")
bear_moved = False
while True:
choice = input("> ")
if choice == "take honey":
dead("The bear looks at you then slaps your face off.")
elif choice == "taunt bear" and not bear_moved:
print("The bear has moved from the door.")
print("You can go through it now.")
bear_moved = True
elif choice == "taunt bear" and bear_moved:
dead("The bear gets pissed off and chews your leg off.")
elif choice == "open door" and bear_moved:
gold_room()
else:
print("I got no idea what that means.")
def cthulhu_room():
print("Here you see the great evil Cthulhu.")
print("He, it, whatever stares at you and you go insane.")
print("Do you flee for your life or eat your head?")
choice = input("> ")
if "flee" in choice:
start()
elif "head" in choice:
dead("Well that was tasty!")
else:
cthulhu_room()
def dead(why):
print(why, "Good job!")
exit()
def start():
print("You are in a dark room.")
print("There is a door to your right and left.")
print("Which one do you take?")
choice = input("> ")
if choice == "left":
bear_room()
elif choice == "right":
cthulhu_room()
else:
dead("You stumble around the room until you starve.")
start() | [
"[email protected]"
] | |
517a32400b135c30df9b22a971e141d5474c896b | 9436682c3d21fbe2737724ce24f2d7c79b5f7598 | /transformerLM/my_dictionary.py | a7551eb478ba782c5c5f05f740d193af50565507 | [
"BSD-3-Clause"
] | permissive | grandeelee/codeswitch-challenge-LM | 5f2cea2db29b02e54d4b36f69b85a0e3b4689442 | d44f922d25df34a5905917b0351a7bbf3cccf22f | refs/heads/master | 2022-08-12T05:43:45.836690 | 2019-12-20T01:39:41 | 2019-12-20T01:39:41 | 210,730,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,135 | py | import os
from collections import Counter, OrderedDict
import numpy as np
from tqdm import tqdm
import mmap
import torch
from logging import getLogger
logger = getLogger(__name__)
def get_num_lines(file):
"""
a helper function to get the total number of lines from file read quickly
:param file:
:return:
"""
fp = open(file, 'r+')
buf = mmap.mmap(fp.fileno(), 0)
lines = 0
while buf.readline():
lines += 1
return lines
# consider using these special characters for all subsequent dict
BOS_WORD = '<s>'
EOS_WORD = '</s>'
PAD_WORD = '<pad>'
UNK_WORD = '<unk>'
SPECIAL_WORD = '<special%i>'
N_SPECIAL_WORDS = 10
SEP_WORD = SPECIAL_WORD % 0
MASK_WORD = SPECIAL_WORD % 1
class Dictionary(object):
"""
1) before using anything from this class make sure to tokenize the data corpus according
to the desired format, it return indexed data with the dict.
2) Dictionary can be initialized from count file, self.from_vocab
or get dictionary from corpus -> 1) self.from_corpus
3) self.id2word, word2id, counts are all dictionaries
4) perform self.check_valid() if unsure
5) implement index_data which could be overwritten if calls for other format
6) data['dictionary'] contains id2word word2id and counts
data['positions'] contains shape: (n_sent, 2) where each item is (start, stop) of sent
data['sentences'] contains shape: (n_words,) sents are separated by eos index
data['unk_words'] contains the stats of unknown word distribution, (n_words, counts)
"""
def __init__(self):
self.id2word = {}
self.word2id = {}
self.counts = Counter()
def __len__(self):
"""
Returns the number of words in the dictionary.
"""
return len(self.id2word)
def __getitem__(self, i):
"""
Returns the word of the specified index.
"""
return self.id2word[i]
def __contains__(self, w):
"""
Returns whether a word is in the dictionary.
"""
return w in self.word2id
def __eq__(self, y):
"""
Compare this dictionary with another one.
"""
self.check_valid()
y.check_valid()
if len(self.id2word) != len(y):
return False
return all(self.id2word[i] == y[i] for i in range(len(y)))
def from_corpus(self, path, min_occur=0, most_freq=-1):
"""
path accept file path or a list of file paths
:param path:
:param min_occur: default 0
:param most_freq: default -1
:return:
"""
# if one big corpus
if isinstance(path, str):
assert os.path.isfile(path), path
data = open(path, 'r', encoding='utf-8')
for line in tqdm(data, total=get_num_lines(path)):
# the file has to be preprocessed with the desired tokenizer
self.counts.update(line.split())
data.close()
# if split into train test valid
if isinstance(path, list):
for p in path:
assert os.path.isfile(p), p
data = open(p, 'r', encoding='utf-8')
for line in tqdm(data, total=get_num_lines(p)):
# the file has to be preprocessed with the desired tokenizer
self.counts.update(line.split())
data.close()
# sort counts into descending order
ordered_counts = OrderedDict(sorted(self.counts.items(), key=lambda item: (-item[1], item[0])))
# takes care of special tokens
self.word2id = {BOS_WORD: 0, EOS_WORD: 1, PAD_WORD: 2, UNK_WORD: 3}
for i in range(N_SPECIAL_WORDS):
self.word2id[SPECIAL_WORD % i] = 4 + i
self.counts = {k: 0 for k in self.word2id.keys()}
for k, v in ordered_counts.items():
if len(self.id2word) == most_freq:
break
if ordered_counts[k] > min_occur:
if k != UNK_WORD:
self.word2id[k] = len(self.word2id)
self.counts[k] = v
self.id2word = {v: k for k, v in self.word2id.items()}
self.check_valid()
def from_vocab(self, vocab_path):
"""
Create a dictionary from a vocabulary file. the vocab file must be in word count format
"""
skipped = 0
assert os.path.isfile(vocab_path), vocab_path
# takes care of special tokens, if the count file has no special token
self.word2id = {BOS_WORD: 0, EOS_WORD: 1, PAD_WORD: 2, UNK_WORD: 3}
for i in range(N_SPECIAL_WORDS):
self.word2id[SPECIAL_WORD % i] = 4 + i
self.counts = {k: 0 for k in self.word2id.keys()}
# read in vocab file
f = open(vocab_path, 'r', encoding='utf-8')
for i, line in enumerate(f):
if '\u2028' in line:
skipped += 1
continue
line = line.rstrip().split()
if len(line) != 2:
skipped += 1
continue
assert len(line) == 2, (i, line)
assert line[1].isdigit(), (i, line)
if line[0] in self.word2id:
skipped += 1
logger.debug('%s already in vocab' % line[0])
continue
if not line[1].isdigit():
skipped += 1
logger.debug('Empty word at line %s with count %s' % (i, line))
continue
self.word2id[line[0]] = 4 + N_SPECIAL_WORDS + i - skipped # shift because of extra words
self.counts[line[0]] = int(line[1])
f.close()
self.id2word = {v: k for k, v in self.word2id.items()}
logger.info("Read %i words from the vocabulary file." % len(self.id2word))
if skipped > 0:
logger.warning("Skipped %i empty lines!" % skipped)
self.check_valid()
def check_valid(self):
"""
Check that the dictionary is valid.
"""
# check the special tokens
assert self.word2id[BOS_WORD] == 0
assert self.word2id[EOS_WORD] == 1
assert self.word2id[PAD_WORD] == 2
assert self.word2id[UNK_WORD] == 3
assert all(self.id2word[4 + i] == SPECIAL_WORD % i for i in range(N_SPECIAL_WORDS))
# check the words
assert len(self.id2word) == len(self.word2id) == len(self.counts)
assert set(self.word2id.keys()) == set(self.counts.keys())
# check the word list and index tally
for i in range(len(self.id2word)):
assert self.word2id[self.id2word[i]] == i
# check it is sorted
last_count = 1e18
for i in range(4 + N_SPECIAL_WORDS, len(self.id2word) - 1):
count = self.counts[self.id2word[i]]
assert count <= last_count
last_count = count
def index(self, word, no_unk=False):
"""
Returns the index of the specified word.
"""
if no_unk:
return self.word2id[word]
else:
return self.word2id.get(word, self.word2id[UNK_WORD])
def max_vocab(self, max_vocab):
"""
Limit the vocabulary size.
"""
assert max_vocab >= 1
init_size = len(self)
self.id2word = {k: v for k, v in self.id2word.items() if k < max_vocab}
self.word2id = {v: k for k, v in self.id2word.items()}
self.counts = {k: v for k, v in self.counts.items() if k in self.word2id}
self.check_valid()
logger.info("Maximum vocabulary size: %i. Dictionary size: %i -> %i (removed %i words)."
% (max_vocab, init_size, len(self), init_size - len(self)))
def min_count(self, min_count):
"""
Threshold on the word frequency counts.
"""
assert min_count >= 0
init_size = len(self)
self.id2word = {k: v for k, v in self.id2word.items() if
self.counts[self.id2word[k]] >= min_count or k < 4 + N_SPECIAL_WORDS}
self.word2id = {v: k for k, v in self.id2word.items()}
self.counts = {k: v for k, v in self.counts.items() if k in self.word2id}
self.check_valid()
logger.info("Minimum frequency count: %i. Dictionary size: %i -> %i (removed %i words)."
% (min_count, init_size, len(self), init_size - len(self)))
def write_vocab(self, path):
"""
write a list to file separated by '\n' in the format of word count
"""
with open(path, mode='w', encoding='utf-8') as f:
f.writelines('{} {}'.format(k, v) + '\n' for k, v in self.counts.items())
@staticmethod
def index_data(dico, path, bin_path=None):
"""
Index sentences with a dictionary. The dictionary will not keep the data.
return one big list of indexed ids, special token are not added at this stage
so the corpus should only have <unk> and </s> as a special token
"""
positions = []
sentences = []
unk_words = {}
eos_index = dico.word2id[EOS_WORD]
unk_index = dico.word2id[UNK_WORD]
# index sentences
f = open(path, 'r', encoding='utf-8')
for i, line in tqdm(enumerate(f), total=get_num_lines(path)):
s = line.rstrip().split()
# skip empty sentences
if len(s) == 0:
logger.debug("Empty sentence in line %i." % i)
# index sentence words
count_unk = 0
indexed = []
for w in s:
word_id = dico.index(w, no_unk=False)
# if we find a special word which is not an unknown word, skip the sentence
if 0 <= word_id < 4 + N_SPECIAL_WORDS and word_id != unk_index:
logger.warning('Found unexpected special word "%s" (%i)!!' % (w, word_id))
continue
assert word_id >= 0
indexed.append(word_id)
# useful to see the unk distribution
if word_id == unk_index:
unk_words[w] = unk_words.get(w, 0) + 1
count_unk += 1
# add sentence
# a list of start stop
positions.append([len(sentences), len(sentences) + len(indexed)])
# a huge list of index
sentences.extend(indexed)
sentences.append(eos_index) # EOS index
f.close()
# tensorize data
positions = np.int64(positions)
if len(dico) < 1 << 16:
sentences = np.uint16(sentences)
elif len(dico) < 1 << 31:
sentences = np.int32(sentences)
else:
raise Exception("Dictionary is too big.")
assert sentences.min() >= 0
data = {
'dictionary': dico,
'positions': positions,
'sentences': sentences,
'unk_words': unk_words,
}
if bin_path:
logger.info("Saving the data to %s ..." % bin_path)
torch.save(data, bin_path, pickle_protocol=4)
return data
| [
"[email protected]"
] | |
923c8acffc221e4307035641e78cc6c987a1417d | 5cf5ec4cec8c312c8a16efacceb3612e5d5c5fb4 | /code2021/day16/p1.py | 50ce24472bfd6446b5b8950f5fe4376e401ac9e2 | [] | no_license | zac112/adventOfCode | a9523f4ff7dc4f3af0137807d6e09eb9692531bf | 6f5955a4c827c3513e0d83877708175976ceb204 | refs/heads/master | 2022-12-26T02:17:41.554781 | 2022-12-25T23:45:53 | 2022-12-25T23:45:53 | 160,374,492 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,435 | py | with open('data.txt') as f:
data = f.readline().strip()
#data = "A0016C880162017C3686B18A3D4780"
#data = "C0015000016115A2E0802F182340"
binary = bin(int(data,base=16))[2:]
print(binary)
binary = list(binary)
class Literal():
def __init__(self,val, version):
self.val = int("".join(val),2)
self.version = int(version,2)
def __repr__(self):
version = self.version
value = self.val
return f"Literal {value=}, {version=}"
def getSubitems(self):
return []
class Operator():
def __init__(self,typeId, packets, version):
self.id = int(typeId,2)
self.version = int(version,2)
self.packets = packets
def __repr__(self):
version = self.version
typeId = self.id
item = self.packets
res = f"Operator {typeId=}"
res += "\n {"+"\n".join([repr(x) for x in item])+"}\n"
res += f"{version=}"
return res
def getSubitems(self):
return self.packets
def parsePacket(binary):
def readbits(binary, num):
res = []
for _ in range(num):
res.append(binary.pop(0))
return "".join(res)
#print("Parsing","".join(binary))
version = readbits(binary,3)
packetType = readbits(binary,3)
if int(packetType, base=2)==4:
subpacket = []
while (t := readbits(binary,5))[0] == '1':
subpacket += t[1:]
subpacket += t[1:]
return Literal(subpacket, version)
else:
lengthID = readbits(binary,1)
packets = []
#print("Operator lengthID",lengthID)
if lengthID == '0':
subpacketLength = int(readbits(binary,15),2)
#print("length:",subpacketLength)
subpacketbits = list(readbits(binary, subpacketLength))
while subpacketbits:
packets.append(parsePacket(subpacketbits))
else:
subpackets = int(readbits(binary,11),2)
#print("packets:",subpackets)
for packet in range(subpackets):
packets.append(parsePacket(binary))
return Operator(packetType, packets, version)
def sumVersions(packet):
versionsum = packet.version
for p in packet.getSubitems():
versionsum += sumVersions(p)
return versionsum
packet = parsePacket(binary)
print(sumVersions(packet))
| [
"[email protected]"
] | |
95748eb243f692d15954f276e6a1972724e02f4b | 82922001f908e899d594c5b81567650d6179b64e | /SQL/ReadCSV.py | 673830f1fb27fae6e2aa16e0d9ad31f063cbb93e | [] | no_license | yves40/python | 29d1ec3dc33d2d7bf38a0fe192284fa065af8948 | 258777bea1be5d40853c4cbbad31e789300fe32a | refs/heads/master | 2022-09-29T00:32:03.407712 | 2022-09-12T08:38:32 | 2022-09-12T08:38:32 | 164,092,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,376 | py | #!/usr/bin/python
# coding: utf8
#-----------------------------------------------------------------------------------------------------------
# ReadCSV.py
#
# Aug 09 2016 Initial
#-----------------------------------------------------------------------------------------------------------import sys
from __future__ import unicode_literals
#
# Look @ Windows command : chcp
#
import logging
import sys, string
import encodings
import sqlite3 as lite
#import codecs
# Configure a logger
logging.basicConfig(level=logging.INFO)
# create a file handler
handler = logging.FileHandler('mylogger.log')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# Get a logger
log = logging.getLogger(__name__)
# add the file handler to the logger
log.addHandler(handler)
print
log.info('Starting')
con = lite.connect('../testdb')
cur = con.cursor()
log.info('Begin transaction')
cur.execute("""begin transaction""")
print ''.join('- ' + e + '\n' for e in sorted(set(encodings.aliases.aliases.values())))
log.info('Entering the file loop')
print
for fn in sys.argv[1:]:
try:
# fin = codecs.open(fn, 'r', 'utf-8')
fin = open(fn, 'r')
except:
(type, detail) = sys.exc_info()[:2]
print "\n*** %s: %s: %s ***" % (fn, type, detail)
continue
print "\n*** Contents of", fn, "***\n\n"
# Print the file, with line numbers.
lno = 1
line = u''
while lno < 10:
line = fin.readline()
if not line:
break;
# line.encode('utf-8')
print '%5d: %-s' % (lno, line[:-1])
fields = string.split(line, ',')
region = fields[2]
department = fields[5]
commune = fields[8]
codepostal = fields[9]
latitude = fields[11]
longitude = fields[12]
print '\t', region, '-', department, '-', commune, '-', codepostal, '-', latitude , '-', longitude
lno = lno + 1
cur.execute("""INSERT INTO communes (nomregion, departement, commune, codepostal, latitude, longitude) VALUES (?,?,?,?,?,?)""",
(region, department, commune, codepostal, latitude, longitude))
fin.close()
print
log.info('Commit')
cur.execute("""Commit""")
con.close()
print
log.info('Exit')
print | [
"[email protected]"
] | |
ef456e67563e978d78cbc6a2c22cf101e2d80c1b | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/adjur.py | b95db66e7aeb7eacf64d904654b1562db5591749 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 370 | py | ii = [('GodwWSL2.py', 4), ('FerrSDO3.py', 1), ('CarlTFR.py', 2), ('LyttELD.py', 1), ('TalfTAC.py', 2), ('KiddJAE.py', 1), ('BailJD1.py', 1), ('ClarGE.py', 1), ('LandWPA.py', 1), ('AinsWRR.py', 1), ('LandWPA2.py', 2), ('TalfTIT.py', 1), ('NewmJLP.py', 1), ('SoutRD.py', 1), ('HowiWRL2.py', 1), ('BailJD3.py', 1), ('HogaGMM.py', 1), ('AinsWRR2.py', 3), ('HogaGMM2.py', 1)] | [
"[email protected]"
] | |
32e7fa01aaac221c663b3755550af8ca71e6301f | 827f75462d8f78abc416128e33ee25f278606e18 | /Program_Python_code/IOT_02/temp.py | 7114a6a0a836d63538f4e522ad8af85031904a01 | [
"MIT"
] | permissive | skyhigh8591/VocationalTraining_LearningCode | 5a2573933a9a290195987b6580702105263cc67c | 5f3c0f11874618919002126863772e0dd06a1072 | refs/heads/master | 2022-12-13T20:14:28.316342 | 2020-09-09T02:15:14 | 2020-09-09T02:15:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import matplotlib.pyplot as plt
listx = [1,5,7,9,13,16]
listy = [15,50,80,40,70,50]
plt.plot(listx,listy)
plt.show() | [
"[email protected]"
] | |
f4f5aba0f8f2e294996ec623c74604d180bfc276 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/2D_20200722181027.py | ea6e6f492e2b93ebfeedfabbb4e5edb694f6f6ce | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,348 | py | def array(n,m):
# where n is row size and m is column size
array = [[0 for x in range(n)] for x in range(m)]
print(array)
a = [[2, 4, 6, 8, 10], [3, 6, 9, 12, 15], [4, 8, 12, 16, 20]]
# where the first arguement reps the row and second arguement reps the column
print(a[0][3])
from sys import maxint
def hourGlass(arr):
# you have a 2d array
# get max hour glass
# var maxCount to keep record of the max count
# what do you know about an hourglass
# the indicies fall in a pattern where
# i and i+2 are not equal to 0 and i + 1 is equal to 0
maxCount = - maxint
if arr !=[]:
for i in range(len(arr)-2):
totalCount = 0
# remember j is looping through arr[i]
for j in range(len(arr[i])-2):
totalCount = arr[i][j] + arr[i][j+1] + arr[i][j+2] + arr[i+1][j+1] + arr[i+2][j] + arr[i+2][j+1] + arr[i+2][j+2]
print('total',totalCount)
if totalCount > maxCount:
maxCount = totalCount
print(maxCount)
else:
return 0
print(hourGlass([[-1,-1,0,-9,-2,-2],[-2,-1,-6,-8,-2,-5],[-1,-1,-1,-2,-3,-4],[-1,-9,2,-4,-4,-5],[-7,-3,-3,-2,-9,-9],[-1,-3,-1,-2,-4,-5]])) | [
"[email protected]"
] | |
036d79f27089d61da8159cdbab9c887da4e45d27 | ac790417f7318ab48946dc1226178846cf2a0216 | /resume/forms.py | 283e032e8ecf5ff8fac23a50b9ee5981cb697d85 | [] | no_license | Ubaydah/Resume | f3b544be9305d11b5208d8088c92e16c4f6e9a75 | 63bc7c85acff2f047bd4628c86bc59c8a0c1429a | refs/heads/main | 2023-07-05T03:05:33.893114 | 2021-08-21T16:40:18 | 2021-08-21T16:40:18 | 398,582,089 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | from django.db.models import fields
from django.forms import ModelForm
from .models import Contact
class ContactForm(ModelForm):
class Meta:
model = Contact
fields = '__all__'
| [
"[email protected]"
] | |
776d0b19881407fcf3376a214c627dbe68eb9edd | b2e3878ffd64e00cdfc2f4a2227f5b48aa0b7860 | /Modules/llpgenerator.py | 850c6c82240284fc06cb48062dedd35b95580905 | [] | no_license | TheoMoretti/PreShower_ALP-W | c5e149f664aa440b192c729be7540cdea6807cd2 | a93240c7390f0895561a734196beee637b0d8c38 | refs/heads/main | 2023-07-05T00:16:46.324734 | 2021-08-18T14:24:00 | 2021-08-18T14:24:00 | 313,635,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,832 | py | import pythia8
from various import *
from llpmodel import LLPModel
class LLPGenerator():
def __init__(self,model, setup="theory"):
self.model = model
self.setup = setup
#specify Dark Photon model
def specify_darkphoton(self,pythia,mass):
pythia.readString("ProcessLevel:all = off");
pythia.readString("PartonLevel:FSR = off");
pythia.readString("111:mayDecay = off");
pythia.readString("310:mayDecay = off");
pythia.readString("130:mayDecay = off");
#define LLP model
llpmodel = LLPModel(self.model, mass=mass, coupling=1)
channels, branching = llpmodel.decays, llpmodel.branching
#Decay into Hadrons
if mass <= 1.50001:
#use Y(1s)
pythia.readString("553:m0="+str(mass));
command="oneChannel"
for channel in channels:
# ignore 'other' channel and decay into quarks
if channels[channel][0] == "large": continue
if channels[channel][1] is None: continue
# bRatio
bRatio = str(branching[channel])
# products
products = ""
for pid in channels[channel][1]: products = products + str(pid) + " "
# meMode
meMode =channels[channel][2]
# add to pythia
pythia.readString("553:"+command+" = 1 "+bRatio+" "+meMode + " "+products)
command="addChannel"
return pythia, 553
else:
#use Z'
pythia.readString("32:m0="+str(mass));
command="oneChannel"
for channel in channels:
# ignore decay into hadrons
if channels[channel][0] == "small": continue
# bRatio
bRatio = str(branching[channel])
# products
products = ""
for pid in channels[channel][1]: products = products + str(pid) + " "
# meMode
meMode =channels[channel][2]
# add to pythia
pythia.readString("32:"+command+" = 1 "+bRatio+" "+meMode + " "+products)
command="addChannel"
return pythia, 32
#specify Dark Photon model
def specify_darkhiggs(self,pythia,mass):
pythia.readString("ProcessLevel:all = off");
pythia.readString("PartonLevel:FSR = off");
pythia.readString("111:mayDecay = off");
pythia.readString("310:mayDecay = off");
pythia.readString("130:mayDecay = off");
#define LLP model
llpmodel = LLPModel(self.model, mass=mass, coupling=1)
channels, branching = llpmodel.decays, llpmodel.branching
#Decay into Hadrons
if mass <= 2.0001:
#use etab0(1P)
pythia.readString("10551:m0="+str(mass));
command="oneChannel"
for channel in channels:
# ignore 'other' channel and decay into quarks
if channels[channel][0] == "large": continue
if channels[channel][1] is None: continue
# bRatio
bRatio = str(branching[channel])
# products
products = ""
for pid in channels[channel][1]: products = products + str(pid) + " "
# meMode
meMode =channels[channel][2]
# add to pythia
pythia.readString("10551:"+command+" = 1 "+bRatio+" "+meMode + " "+products)
command="addChannel"
return pythia, 10551
else:
#use Higgs
pythia.readString("25:m0="+str(mass));
command="oneChannel"
for channel in channels:
# ignore decay into hadrons
if channels[channel][0] == "small": continue
# bRatio
bRatio = str(branching[channel])
# products
products = ""
for pid in channels[channel][1]: products = products + str(pid) + " "
# meMode
meMode =channels[channel][2]
# add to pythia
pythia.readString("25:"+command+" = 1 "+bRatio+" "+meMode + " "+products)
command="addChannel"
return pythia, 25
#specify ALP-W model
def specify_alpw(self,pythia,mass):
pythia.readString("ProcessLevel:all = off");
pythia.readString("PartonLevel:FSR = off");
#define LLP model
llpmodel = LLPModel(self.model, mass=mass, coupling=1)
channels, branching = llpmodel.decays, llpmodel.branching
#Decay into Hadrons
if mass <= 1:
#use etab0(1P)
pythia.readString("10551:m0="+str(mass));
command="oneChannel"
for channel in channels:
# ignore 'other' channel and decay into quarks
if channels[channel][0] == "large": continue
if channels[channel][1] is None: continue
# bRatio
bRatio = str(branching[channel])
# products
products = ""
for pid in channels[channel][1]: products = products + str(pid) + " "
# meMode
meMode =channels[channel][2]
# add to pythia
pythia.readString("10551:"+command+" = 1 "+bRatio+" "+meMode + " "+products)
command="addChannel"
return pythia, 10551
else:
#use Higgs
pythia.readString("25:m0="+str(mass));
command="oneChannel"
for channel in channels:
# ignore decay into hadrons
if channels[channel][0] == "small": continue
# bRatio
bRatio = str(branching[channel])
# products
products = ""
for pid in channels[channel][1]: products = products + str(pid) + " "
# meMode
meMode =channels[channel][2]
# add to pythia
pythia.readString("25:"+command+" = 1 "+bRatio+" "+meMode + " "+products)
command="addChannel"
return pythia, 25
#specify Dark Photon model
def specify_darkphoton_pythia(self,pythia,mass):
pythia.readString("Zprime:universality=on");
pythia.readString("32:m0="+str(mass));
pythia.readString("Zprime:vd=-0.3333");
pythia.readString("Zprime:vu=0.6666");
pythia.readString("Zprime:ve=-1");
pythia.readString("Zprime:vnue=0");
pythia.readString("Zprime:ad=0");
pythia.readString("Zprime:au=0");
pythia.readString("Zprime:ae=0");
pythia.readString("Zprime:anue=0");
pythia.readString("ProcessLevel:all = off");
pythia.readString("PartonLevel:FSR = off");
pythia.readString("111:mayDecay = off");
pythia.readString("310:mayDecay = off");
pythia.readString("130:mayDecay = off");
return pythia, 32
#specify Dark Higgs model
def specify_darkhiggs_pythia(self,pythia,mass):
pythia.readString("54:m0="+str(mass));
pythia.readString("Sdm:vf=1");
pythia.readString("Sdm:af=0");
pythia.readString("Sdm:vX=0");
pythia.readString("Sdm:aX=0");
pythia.readString("ProcessLevel:all = off");
pythia.readString("PartonLevel:FSR = off");
pythia.readString("111:mayDecay = off");
pythia.readString("310:mayDecay = off");
pythia.readString("130:mayDecay = off");
return pythia, 54
# function that simulates `nevent` dark photon decays for dark photon mass `mass`
def simulate_events(self,mass, nevent=1000, print_first_event=False,print_partile_data = False,outputfile=None):
#specify particle
px,py,pz,en = 0,0,0,mass
status,col,acol,scale,pol = 2,0,0,0,9.
#initialize pythia
pythia = pythia8.Pythia()
if self.model=="DarkPhoton" and self.setup=="theory":
pythia, pid =self.specify_darkphoton(pythia=pythia,mass=mass)
if self.model=="DarkHiggs" and self.setup=="theory":
pythia, pid =self.specify_darkhiggs(pythia=pythia,mass=mass)
if self.model=="DarkPhoton" and self.setup=="pythia":
pythia, pid =self.specify_darkphoton_pythia(pythia=pythia,mass=mass)
if self.model=="DarkHiggs" and self.setup=="pythia":
pythia, pid =self.specify_darkhiggs_pythia(pythia=pythia,mass=mass)
if self.model=="ALP-W":
pythia, pid =self.specify_alpw(pythia=pythia,mass=mass)
if print_partile_data: print (pythia.particleData)
pythia.init()
# Begin event loop. Generate event. Skip if error. List first one.
events = []
for iEvent in range(0, nevent):
pythia.event.reset()
pythia.event.append(pid, status, col, acol, px, py, pz, en, mass, scale, pol)
pythia.next()
if print_first_event and iEvent==0: print(pythia.event)
#Loop over particles in event. Find pions
event = []
for part in pythia.event:
if part.status()>0:
event.append([part.id(),part.px(),part.py(),part.pz(),part.e()])
events.append(event)
if outputfile is not None:
np.save(outputfile,events)
return events
# function that extracts branching fractions
def extract_br(self,events):
nevent = float(len(events))
branching_fraction={}
for event in events:
final_state=[particle[0] for particle in event]
final_state=list(np.sort(final_state))
if str(final_state) in branching_fraction.keys():
branching_fraction[str(final_state)] += 1./nevent
else:
branching_fraction[str(final_state)] = 1./nevent
return branching_fraction
# function that scans over the mass and obtains the branching fraction
def br_scan(self,massmin=0.105, massmax=1.95, nmass = 40, nevent=1000):
branching_fractions=[]
for mass in np.linspace(massmin, massmax, nmass):
events=self.simulate_events(mass=mass,nevent=nevent)
bf=self.extract_br(events)
branching_fractions.append([mass,bf])
return np.array(branching_fractions)
# scan over mass and claculate BR
def scan_br(self, massmin=0.01, massmax=2.0, nmass=40, nevent=1000):
# Simulate BR
data=self.br_scan(massmin=massmin, massmax=massmax,nmass=nmass, nevent=nevent)
np.save("files/results/brscan_"+self.model+".npy",data)
| [
"[email protected]"
] | |
00b1a11eae7b2cec07120665c6de7285c8bbdae3 | 7ce479cac0a14d924159db9c784e3325b8f0bce7 | /schemaorgschemas/Thing/Intangible/Enumeration/MedicalImagingTechnique/Ultrasound/__init__.py | f0e44756fdb174fb8619176fe9fda3fa72543f5a | [] | no_license | EvelineAndreea/AGRe | 1f0c27237eb047a60bbcfb8d73e3157035406409 | b952125896a82741f6617c259dd4060954583180 | refs/heads/master | 2020-04-08T16:08:11.517166 | 2018-11-28T07:15:56 | 2018-11-28T07:15:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | # -*- coding: utf-8 -*-
from schemaorgschemas.djangoschema import SchemaObject, SchemaProperty, SchemaEnumProperty, SCHEMA_ORG
from django.conf import settings
class UltrasoundSchema(SchemaObject):
"""Schema Mixin for Ultrasound
Usage: place after django model in class definition, schema will return the schema.org url for the object
Ultrasound imaging.
"""
def __init__(self):
self.schema = 'Ultrasound'
# schema.org version 2.0
| [
"[email protected]"
] | |
68a610b4031ce7289b9815128385f9e2f76c905c | 6349e2d51504500b20aeb7f784a569e218fd2650 | /palindrome_index.py | 111f34f67131b608138bb159c92fb4c351924472 | [] | no_license | dhananjaysahu79/hackerRanksubs | 4a963eaf7d4f72fdf62c236f26160a2b0f3ef4ba | e5a711c6805902997f1339829583002cda332c73 | refs/heads/main | 2023-08-12T10:08:42.179209 | 2021-09-30T17:40:56 | 2021-09-30T17:40:56 | 379,961,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 832 | py | link = 'https://www.hackerrank.com/challenges/palindrome-index/problem'
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'palindromeIndex' function below.
#
# The function is expected to return an INTEGER.
# The function accepts STRING s as parameter.
#
def palindromeIndex(s):
a = 0; b = len(s) - 1
while a <= b:
if s[a] != s[b]: break
a += 1
b -= 1
if a == b: return -1
t1 = s[:a] + s[a+1:]
t2 = s[:b] + s[b+1:]
if t1 == t1[::-1]: return a
if t2 == t2[::-1]: return b
return -1
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input().strip())
for q_itr in range(q):
s = input()
result = palindromeIndex(s)
fptr.write(str(result) + '\n')
fptr.close()
| [
"[email protected]"
] | |
f3d4772cd3c835c606ce487aeab851ffdf081c5d | 85793c77984f254c0b1c7d5d2cb9e8e52cbbc753 | /problem020.py | cf95adef43fbbf9865de093a732c8494ffc67a62 | [] | no_license | alexanderteves/project-euler | 3604728e4b2f7045c96aac3cabadcf1cb366dbbb | 2ba6a526092dcdbbf0ecd00822a8e2acf05d4b65 | refs/heads/master | 2021-01-19T21:27:50.444717 | 2012-10-22T14:15:52 | 2012-10-22T14:15:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | import re
import math
x = []
for n in re.findall('\d', str(math.factorial(100))): x.append(int(n))
print sum(x)
| [
"[email protected]"
] | |
c6564498023bda58af9884f9c42dcde4fc338d1a | 4ab698389f13f0552787de0dd846cf9b01aae4bc | /csuro_intro_py/scripts/nodo_iterativo.py | 93a84e73d790d9ecc08a484d5fe15caa9bdfb467 | [
"MIT"
] | permissive | IntelligentRoboticsLabs/CSUROS | 575a4e06a42a946ba75d6f06cbc53e524f606be6 | 95a2d0b1d7c9edc7351a6fb510936fc293a63428 | refs/heads/master | 2020-05-02T05:34:35.654342 | 2019-05-26T07:35:55 | 2019-05-26T07:35:55 | 177,774,700 | 1 | 4 | null | 2019-05-21T07:54:31 | 2019-03-26T11:28:22 | C++ | UTF-8 | Python | false | false | 400 | py | #!/usr/bin/env python
import rospy
if __name__ == '__main__':
try:
rospy.init_node('iterativo', anonymous=False)
rate = rospy.Rate(20)
count = 0;
while not rospy.is_shutdown():
count = count + 1
rospy.loginfo("iteration %d" % count)
rate.sleep()
except rospy.ROSInterruptException:
pass | [
"[email protected]"
] | |
f2961d2cf0e5afc4a1c5617b84a68c33f5035a08 | 5f91ef601aa3e58cb5c24f1a4cfefd264078f4ee | /Python Task Day4/Text Wrap.py | fbee398512838dcdefb4e3d1b75cbbab797692cd | [] | no_license | Jayasree-Repalla/Innomatics_Internship_APR_21 | 386a71d2a7da788aa6088087c5e42c97271c6afe | f5505045a09b8445c704c0b0135502731fc42a5f | refs/heads/main | 2023-05-09T13:59:20.194465 | 2021-05-29T12:21:47 | 2021-05-29T12:21:47 | 363,651,405 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | import textwrap
def wrap(string, max_width):
l=textwrap.wrap(string,max_width)
str1=''
for i in l:
str1=str1+i+"\n"
return str1 | [
"[email protected]"
] | |
c807082254ccacd00922058c4aff343aabe56cb0 | 46a784ddc9377216faa9ba205729ca6524f901b6 | /classoutput.py | 4941a65639929d210976591e886a48ed590b58f3 | [] | no_license | asiddiq1/MapQuestAPI | 9933253e9baaea54df0fb1f0d6f09035ca577a0c | c2d8813238cb3023169c2f1dae682dd3bb545696 | refs/heads/master | 2021-05-15T14:24:34.920005 | 2018-04-10T00:39:18 | 2018-04-10T00:39:18 | 107,197,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,649 | py | #Aisha Siddiq lab 12:00 - 1:50 pm (project 3)
class STEPS:
def __init__(self, jsondata):
self.jsonD = jsondata
def return_data(self)->list:
'''Returns the json steps in a list
'''
directions = ["DIRECTIONS"]
for items in self.jsonD['route']['legs']:
for maneuvers in items['maneuvers']:
directions.append(maneuvers['narrative'])
return directions
class TOTALDISTANCE:
def __init__(self, jsondata):
self.jsonD = jsondata
def return_data(self)->list:
'''Returns the total distance in a list
'''
distance = []
distance.append('TOTAL DISTANCE: ' + str(round(self.jsonD['route']['distance'])) + ' '+ "miles")
return distance
class TOTALTIME:
def __init__(self, jsondata):
self.jsonD = jsondata
def return_data(self)->list:
'''Returns the total time in a list
'''
time = []
time_mins = round(self.jsonD['route']['time']/60)
time.append('TOTAL TIME: ' + str(time_mins) + ' ' + 'minutes')
return time
class LATLONG:
def __init__(self, jsondata):
self.jsonD = jsondata
def return_data(self)->list:
'''Returns the formatted longitude and latitude in a list
'''
latlonglist = ['LATLONGS']
for items in self.jsonD['route']['locations']:
latlong = items['latLng']
if latlong['lat'] < 0:
latitude = '{:.2f}S'.format(latlong['lat'] * -1)
elif latlong['lat'] > 0:
latitude = '{:.2f}N'.format(latlong['lat'])
else:
latitude = '{}'.format(latlong['lat'])
if latlong['lng'] < 0:
longitude = '{:.2f}W'.format(latlong['lng'] * -1)
elif latlong['lng'] > 0:
longitude = '{:.2f}E'.format(latlong['lng'])
else:
longitude = '{}'.format(latlong['lng'])
latlonglist.append(latitude + ' ' + longitude)
return latlonglist
class ELEVATION:
def __init__(self, jsonlist):
self.jsonDlist = jsonlist
def return_data(self)->list:
'''Returns the elevation in a list
'''
elevation_list = ['ELEVATIONS']
for jsondatalist in self.jsonDlist:
for distance in jsondatalist['elevationProfile']:
elevation_list.append(round(distance['height'] * 3.2808))
return elevation_list
| [
"[email protected]"
] | |
252f84d6a95e092876dd4c65bc19bbc252cab747 | 8f619dc8adb894d92590b14af0e5a4b6f7981c44 | /users/api/serializers.py | 106e4e01304d8c13262022590aaf055f510e641d | [] | no_license | ArashMoshirnia/store_with_smart_recommender | e427c97ae18945bb3a0d63b6a8efd2620c048314 | ccfddd3c03963d820054f2f418037ae0b81b10f5 | refs/heads/master | 2023-07-05T03:09:50.181457 | 2021-08-10T14:20:34 | 2021-08-10T14:20:34 | 352,128,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | from rest_framework import serializers
from users.models import User
class UserSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True)
class Meta:
model = User
fields = ('id', 'username', 'password', 'email')
def create(self, validated_data):
user = User.objects.create_user(**validated_data)
return user
| [
"[email protected]"
] | |
8053e4303904863cf6697e7b52d39559a3132db8 | ab6d64c36ddbdc042f6676d6e10f6a25360870cd | /test45_pruebas_p2.py | 00623e6da2486977da8020aaeacda50fba4cc2df | [] | no_license | Leduin-Abel/Python | 3219867ef39c81870a720c7ce22fc7c28cbdb8be | 925c907e0215b93d251c4b3c3d1383206332251d | refs/heads/main | 2023-07-09T14:43:31.488191 | 2021-08-22T23:24:27 | 2021-08-22T23:24:27 | 398,921,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | import math
def raizCuadrada(listaNumeros):
"""
La función devuelve una lista con la
raíz cuadrada de los elementos númericos
pasados por parámetros en otra lista
>>> lista=[]
>>> for i in [4,9,16]:
... lista.append(i)
>>> raizCuadrada(lista)
[2.0, 3.0, 4.0]
>>> lista=[]
>>> for i in [4, -9, 16]:
... lista.append(i)
>>> raizCuadrada(lista)
Traceback (most recent call last):
...
ValueError: math domain error
"""
#para expresiones anidadas se utilizan 3 puntos
#tambien se utilizan para excepciones
return[math.sqrt(n) for n in listaNumeros]
#print(raizCuadrada([9, -16, 25, 36]))
import doctest
doctest.testmod() | [
"[email protected]"
] | |
71cf9ef7365861b60961c7c755e9c789b1f5252f | d6d4612967636fa0452a7b84bc38adbb7a21d415 | /mlflow-project-driver.py | ffa296184ade7130926d126724dee485a7f34915 | [] | no_license | MaisaDaoud/celeb-mlflow-example | 50a0bb9b91c5b1af6deeaa107d63abe30c774631 | 9a56efe5ce47d9561e7a4e172da3120e3f53a59d | refs/heads/main | 2023-01-30T17:37:15.259333 | 2020-12-15T21:26:50 | 2020-12-15T21:26:50 | 321,790,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 996 | py | import mlflow
mlflow.projects.run(
'https://github.com/MaisaDaoud/celeb-mlflow-example',
backend='local',
synchronous=False,
parameters={
'batch_size': 32,
'epochs': 10,
'convolutions': 3,
'training_samples': 15000,
'validation_samples': 2000,
'randomize_images': True
})
mlflow.projects.run(
'https://github.com/MaisaDaoud/celeb-mlflow-example',
backend='local',
synchronous=False,
parameters={
'batch_size': 32,
'epochs': 10,
'convolutions': 2,
'training_samples': 15000,
'validation_samples': 2000,
'randomize_images': False
})
mlflow.projects.run(
'https://github.com/MaisaDaoud/celeb-mlflow-example',
backend='local',
synchronous=False,
parameters={
'batch_size': 32,
'epochs': 10,
'convolutions': 0,
'training_samples': 15000,
'validation_samples': 2000,
'randomize_images': False
}) | [
"[email protected]"
] | |
40795157a3f554244a207f013e2929446f70dbea | 1d9a012f38a40f662c6e1047b2b2e52635374c0d | /login.py | cafb9a5376f71db36a020a4ee82326d564e76d97 | [] | no_license | Tanjila-Sarkar-Promi/3rd_Semester_project | 3b356bbaa09ba56ce61b9db0bc125611edef70f6 | 824061f59ecfd707b64bf624d4bb53533ba817bf | refs/heads/master | 2023-08-22T00:25:24.862850 | 2021-10-17T15:32:41 | 2021-10-17T15:32:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | Set as interpreter
#!C:/pythoncode/venv/Scripts/python.exe
print("Content-type:text/html\r\n\r\n")
import mysql.connector
import cgi
form = cgi.FieldStorage()
n=form.getvalue("name")
p=form.getvalue("pass")
mydb=mysql.connector.connect(
host="localhost",
user="root",
password="",
database="medi"
)
mycursor=mydb.cursor()
sql= "INSERT INTO login_tb (Name,Password) Values (%s,%s)"
val=(n, p)
mycursor.execute(sql, val)
mydb.commit()
mydb.close()
print(mycursor.rowcount,"Record Inserted!")
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.